Skip to content

Commit 9df04e1

Browse files
davideltorvalds
authored andcommitted
epoll: drop max_user_instances and rely only on max_user_watches
Linus suggested to put limits where the money is, and max_user_watches already does that w/out the need of max_user_instances. That has the advantage to mitigate the potential DoS while allowing pretty generous default behavior. Allowing top 4% of low memory (per user) to be allocated in epoll watches, we have: LOMEM MAX_WATCHES (per user) 512MB ~178000 1GB ~356000 2GB ~712000 A box with 512MB of lomem, will meet some challenge in hitting 180K watches, socket buffers math teaches us. No more max_user_instances limits then. Signed-off-by: Davide Libenzi <davidel@xmailserver.org> Cc: Willy Tarreau <w@1wt.eu> Cc: Michael Kerrisk <mtk.manpages@googlemail.com> Cc: Bron Gondwana <brong@fastmail.fm> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 3095eb8 commit 9df04e1

File tree

2 files changed

+4
-19
lines changed

2 files changed

+4
-19
lines changed

fs/eventpoll.c

+4-18
Original file line numberDiff line numberDiff line change
@@ -234,8 +234,6 @@ struct ep_pqueue {
234234
/*
235235
* Configuration options available inside /proc/sys/fs/epoll/
236236
*/
237-
/* Maximum number of epoll devices, per user */
238-
static int max_user_instances __read_mostly;
239237
/* Maximum number of epoll watched descriptors, per user */
240238
static int max_user_watches __read_mostly;
241239

@@ -260,14 +258,6 @@ static struct kmem_cache *pwq_cache __read_mostly;
260258
static int zero;
261259

262260
ctl_table epoll_table[] = {
263-
{
264-
.procname = "max_user_instances",
265-
.data = &max_user_instances,
266-
.maxlen = sizeof(int),
267-
.mode = 0644,
268-
.proc_handler = &proc_dointvec_minmax,
269-
.extra1 = &zero,
270-
},
271261
{
272262
.procname = "max_user_watches",
273263
.data = &max_user_watches,
@@ -491,7 +481,6 @@ static void ep_free(struct eventpoll *ep)
491481

492482
mutex_unlock(&epmutex);
493483
mutex_destroy(&ep->mtx);
494-
atomic_dec(&ep->user->epoll_devs);
495484
free_uid(ep->user);
496485
kfree(ep);
497486
}
@@ -581,10 +570,6 @@ static int ep_alloc(struct eventpoll **pep)
581570
struct eventpoll *ep;
582571

583572
user = get_current_user();
584-
error = -EMFILE;
585-
if (unlikely(atomic_read(&user->epoll_devs) >=
586-
max_user_instances))
587-
goto free_uid;
588573
error = -ENOMEM;
589574
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
590575
if (unlikely(!ep))
@@ -1141,7 +1126,6 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
11411126
flags & O_CLOEXEC);
11421127
if (fd < 0)
11431128
ep_free(ep);
1144-
atomic_inc(&ep->user->epoll_devs);
11451129

11461130
error_return:
11471131
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
@@ -1366,8 +1350,10 @@ static int __init eventpoll_init(void)
13661350
struct sysinfo si;
13671351

13681352
si_meminfo(&si);
1369-
max_user_instances = 128;
1370-
max_user_watches = (((si.totalram - si.totalhigh) / 32) << PAGE_SHIFT) /
1353+
/*
1354+
* Allows top 4% of lomem to be allocated for epoll watches (per user).
1355+
*/
1356+
max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
13711357
EP_ITEM_COST;
13721358

13731359
/* Initialize the structure used to perform safe poll wait head wake ups */

include/linux/sched.h

-1
Original file line numberDiff line numberDiff line change
@@ -630,7 +630,6 @@ struct user_struct {
630630
atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
631631
#endif
632632
#ifdef CONFIG_EPOLL
633-
atomic_t epoll_devs; /* The number of epoll descriptors currently open */
634633
atomic_t epoll_watches; /* The number of file descriptors currently watched */
635634
#endif
636635
#ifdef CONFIG_POSIX_MQUEUE

0 commit comments

Comments
 (0)