/bitcoin/depends/work/build/x86_64-pc-linux-gnu/libevent/2.1.12-stable-7656baec08e/epoll.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu> |
3 | | * Copyright 2007-2012 Niels Provos, Nick Mathewson |
4 | | * |
5 | | * Redistribution and use in source and binary forms, with or without |
6 | | * modification, are permitted provided that the following conditions |
7 | | * are met: |
8 | | * 1. Redistributions of source code must retain the above copyright |
9 | | * notice, this list of conditions and the following disclaimer. |
10 | | * 2. Redistributions in binary form must reproduce the above copyright |
11 | | * notice, this list of conditions and the following disclaimer in the |
12 | | * documentation and/or other materials provided with the distribution. |
13 | | * 3. The name of the author may not be used to endorse or promote products |
14 | | * derived from this software without specific prior written permission. |
15 | | * |
16 | | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | | */ |
27 | | #include "event2/event-config.h" |
28 | | #include "evconfig-private.h" |
29 | | |
30 | | #ifdef EVENT__HAVE_EPOLL |
31 | | |
32 | | #include <stdint.h> |
33 | | #include <sys/types.h> |
34 | | #include <sys/resource.h> |
35 | | #ifdef EVENT__HAVE_SYS_TIME_H |
36 | | #include <sys/time.h> |
37 | | #endif |
38 | | #include <sys/queue.h> |
39 | | #include <sys/epoll.h> |
40 | | #include <signal.h> |
41 | | #include <limits.h> |
42 | | #include <stdio.h> |
43 | | #include <stdlib.h> |
44 | | #include <string.h> |
45 | | #include <unistd.h> |
46 | | #include <errno.h> |
47 | | #ifdef EVENT__HAVE_FCNTL_H |
48 | | #include <fcntl.h> |
49 | | #endif |
50 | | #ifdef EVENT__HAVE_SYS_TIMERFD_H |
51 | | #include <sys/timerfd.h> |
52 | | #endif |
53 | | |
54 | | #include "event-internal.h" |
55 | | #include "evsignal-internal.h" |
56 | | #include "event2/thread.h" |
57 | | #include "evthread-internal.h" |
58 | | #include "log-internal.h" |
59 | | #include "evmap-internal.h" |
60 | | #include "changelist-internal.h" |
61 | | #include "time-internal.h" |
62 | | |
63 | | /* Since Linux 2.6.17, epoll is able to report about peer half-closed connection |
64 | | using special EPOLLRDHUP flag on a read event. |
65 | | */ |
66 | | #if !defined(EPOLLRDHUP) |
67 | | #define EPOLLRDHUP 0 |
68 | | #define EARLY_CLOSE_IF_HAVE_RDHUP 0 |
69 | | #else |
70 | | #define EARLY_CLOSE_IF_HAVE_RDHUP EV_FEATURE_EARLY_CLOSE |
71 | | #endif |
72 | | |
73 | | #include "epolltable-internal.h" |
74 | | |
75 | | #if defined(EVENT__HAVE_SYS_TIMERFD_H) && \ |
76 | | defined(EVENT__HAVE_TIMERFD_CREATE) && \ |
77 | | defined(HAVE_POSIX_MONOTONIC) && defined(TFD_NONBLOCK) && \ |
78 | | defined(TFD_CLOEXEC) |
79 | | /* Note that we only use timerfd if TFD_NONBLOCK and TFD_CLOEXEC are available |
80 | | and working. This means that we can't support it on 2.6.25 (where timerfd |
81 | | was introduced) or 2.6.26, since 2.6.27 introduced those flags. |
82 | | */ |
83 | | #define USING_TIMERFD |
84 | | #endif |
85 | | |
86 | | struct epollop { |
87 | | struct epoll_event *events; |
88 | | int nevents; |
89 | | int epfd; |
90 | | #ifdef USING_TIMERFD |
91 | | int timerfd; |
92 | | #endif |
93 | | }; |
94 | | |
95 | | static void *epoll_init(struct event_base *); |
96 | | static int epoll_dispatch(struct event_base *, struct timeval *); |
97 | | static void epoll_dealloc(struct event_base *); |
98 | | |
99 | | static const struct eventop epollops_changelist = { |
100 | | "epoll (with changelist)", |
101 | | epoll_init, |
102 | | event_changelist_add_, |
103 | | event_changelist_del_, |
104 | | epoll_dispatch, |
105 | | epoll_dealloc, |
106 | | 1, /* need reinit */ |
107 | | EV_FEATURE_ET|EV_FEATURE_O1| EARLY_CLOSE_IF_HAVE_RDHUP, |
108 | | EVENT_CHANGELIST_FDINFO_SIZE |
109 | | }; |
110 | | |
111 | | |
112 | | static int epoll_nochangelist_add(struct event_base *base, evutil_socket_t fd, |
113 | | short old, short events, void *p); |
114 | | static int epoll_nochangelist_del(struct event_base *base, evutil_socket_t fd, |
115 | | short old, short events, void *p); |
116 | | |
117 | | const struct eventop epollops = { |
118 | | "epoll", |
119 | | epoll_init, |
120 | | epoll_nochangelist_add, |
121 | | epoll_nochangelist_del, |
122 | | epoll_dispatch, |
123 | | epoll_dealloc, |
124 | | 1, /* need reinit */ |
125 | | EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_EARLY_CLOSE, |
126 | | 0 |
127 | | }; |
128 | | |
129 | 11.0k | #define INITIAL_NEVENT 32 |
130 | 0 | #define MAX_NEVENT 4096 |
131 | | |
132 | | /* On Linux kernels at least up to 2.6.24.4, epoll can't handle timeout |
133 | | * values bigger than (LONG_MAX - 999ULL)/HZ. HZ in the wild can be |
134 | | * as big as 1000, and LONG_MAX can be as small as (1<<31)-1, so the |
135 | | * largest number of msec we can support here is 2147482. Let's |
136 | | * round that down by 47 seconds. |
137 | | */ |
138 | 7.06M | #define MAX_EPOLL_TIMEOUT_MSEC (35*60*1000) |
139 | | |
140 | | static void * |
141 | | epoll_init(struct event_base *base) |
142 | 11.0k | { |
143 | 11.0k | int epfd = -1; |
144 | 11.0k | struct epollop *epollop; |
145 | | |
146 | 11.0k | #ifdef EVENT__HAVE_EPOLL_CREATE1 |
147 | | /* First, try the shiny new epoll_create1 interface, if we have it. */ |
148 | 11.0k | epfd = epoll_create1(EPOLL_CLOEXEC); |
149 | 11.0k | #endif |
150 | 11.0k | if (epfd == -1) { Branch (150:6): [True: 0, False: 11.0k]
|
151 | | /* Initialize the kernel queue using the old interface. (The |
152 | | size field is ignored since 2.6.8.) */ |
153 | 0 | if ((epfd = epoll_create(32000)) == -1) { Branch (153:7): [True: 0, False: 0]
|
154 | 0 | if (errno != ENOSYS) Branch (154:8): [True: 0, False: 0]
|
155 | 0 | event_warn("epoll_create"); |
156 | 0 | return (NULL); |
157 | 0 | } |
158 | 0 | evutil_make_socket_closeonexec(epfd); |
159 | 0 | } |
160 | | |
161 | 11.0k | if (!(epollop = mm_calloc(1, sizeof(struct epollop)))) { Branch (161:6): [True: 0, False: 11.0k]
|
162 | 0 | close(epfd); |
163 | 0 | return (NULL); |
164 | 0 | } |
165 | | |
166 | 11.0k | epollop->epfd = epfd; |
167 | | |
168 | | /* Initialize fields */ |
169 | 11.0k | epollop->events = mm_calloc(INITIAL_NEVENT, sizeof(struct epoll_event)); |
170 | 11.0k | if (epollop->events == NULL) { Branch (170:6): [True: 0, False: 11.0k]
|
171 | 0 | mm_free(epollop); |
172 | 0 | close(epfd); |
173 | 0 | return (NULL); |
174 | 0 | } |
175 | 11.0k | epollop->nevents = INITIAL_NEVENT; |
176 | | |
177 | 11.0k | if ((base->flags & EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST) != 0 || Branch (177:6): [True: 0, False: 11.0k]
|
178 | 11.0k | ((base->flags & EVENT_BASE_FLAG_IGNORE_ENV) == 0 && Branch (178:7): [True: 11.0k, False: 0]
|
179 | 11.0k | evutil_getenv_("EVENT_EPOLL_USE_CHANGELIST") != NULL)) { Branch (179:3): [True: 0, False: 11.0k]
|
180 | |
|
181 | 0 | base->evsel = &epollops_changelist; |
182 | 0 | } |
183 | | |
184 | 11.0k | #ifdef USING_TIMERFD |
185 | | /* |
186 | | The epoll interface ordinarily gives us one-millisecond precision, |
187 | | so on Linux it makes perfect sense to use the CLOCK_MONOTONIC_COARSE |
188 | | timer. But when the user has set the new PRECISE_TIMER flag for an |
189 | | event_base, we can try to use timerfd to give them finer granularity. |
190 | | */ |
191 | 11.0k | if ((base->flags & EVENT_BASE_FLAG_PRECISE_TIMER) && Branch (191:6): [True: 0, False: 11.0k]
|
192 | 11.0k | base->monotonic_timer.monotonic_clock == CLOCK_MONOTONIC) { Branch (192:6): [True: 0, False: 0]
|
193 | 0 | int fd; |
194 | 0 | fd = epollop->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC); |
195 | 0 | if (epollop->timerfd >= 0) { Branch (195:7): [True: 0, False: 0]
|
196 | 0 | struct epoll_event epev; |
197 | 0 | memset(&epev, 0, sizeof(epev)); |
198 | 0 | epev.data.fd = epollop->timerfd; |
199 | 0 | epev.events = EPOLLIN; |
200 | 0 | if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, fd, &epev) < 0) { Branch (200:8): [True: 0, False: 0]
|
201 | 0 | event_warn("epoll_ctl(timerfd)"); |
202 | 0 | close(fd); |
203 | 0 | epollop->timerfd = -1; |
204 | 0 | } |
205 | 0 | } else { |
206 | 0 | if (errno != EINVAL && errno != ENOSYS) { Branch (206:8): [True: 0, False: 0]
Branch (206:27): [True: 0, False: 0]
|
207 | | /* These errors probably mean that we were |
208 | | * compiled with timerfd/TFD_* support, but |
209 | | * we're running on a kernel that lacks those. |
210 | | */ |
211 | 0 | event_warn("timerfd_create"); |
212 | 0 | } |
213 | 0 | epollop->timerfd = -1; |
214 | 0 | } |
215 | 11.0k | } else { |
216 | 11.0k | epollop->timerfd = -1; |
217 | 11.0k | } |
218 | 11.0k | #endif |
219 | | |
220 | 11.0k | evsig_init_(base); |
221 | | |
222 | 11.0k | return (epollop); |
223 | 11.0k | } |
224 | | |
225 | | static const char * |
226 | | change_to_string(int change) |
227 | 0 | { |
228 | 0 | change &= (EV_CHANGE_ADD|EV_CHANGE_DEL); |
229 | 0 | if (change == EV_CHANGE_ADD) { Branch (229:6): [True: 0, False: 0]
|
230 | 0 | return "add"; |
231 | 0 | } else if (change == EV_CHANGE_DEL) { Branch (231:13): [True: 0, False: 0]
|
232 | 0 | return "del"; |
233 | 0 | } else if (change == 0) { Branch (233:13): [True: 0, False: 0]
|
234 | 0 | return "none"; |
235 | 0 | } else { |
236 | 0 | return "???"; |
237 | 0 | } |
238 | 0 | } |
239 | | |
240 | | static const char * |
241 | | epoll_op_to_string(int op) |
242 | 0 | { |
243 | 0 | return op == EPOLL_CTL_ADD?"ADD": Branch (243:9): [True: 0, False: 0]
|
244 | 0 | op == EPOLL_CTL_DEL?"DEL": Branch (244:6): [True: 0, False: 0]
|
245 | 0 | op == EPOLL_CTL_MOD?"MOD": Branch (245:6): [True: 0, False: 0]
|
246 | 0 | "???"; |
247 | 0 | } |
248 | | |
249 | | #define PRINT_CHANGES(op, events, ch, status) \ |
250 | 0 | "Epoll %s(%d) on fd %d " status ". " \ |
251 | 0 | "Old events were %d; " \ |
252 | 0 | "read change was %d (%s); " \ |
253 | 0 | "write change was %d (%s); " \ |
254 | 0 | "close change was %d (%s)", \ |
255 | 0 | epoll_op_to_string(op), \ |
256 | 0 | events, \ |
257 | 0 | ch->fd, \ |
258 | 0 | ch->old_events, \ |
259 | 0 | ch->read_change, \ |
260 | 0 | change_to_string(ch->read_change), \ |
261 | 0 | ch->write_change, \ |
262 | 0 | change_to_string(ch->write_change), \ |
263 | 0 | ch->close_change, \ |
264 | 0 | change_to_string(ch->close_change) |
265 | | |
266 | | static int |
267 | | epoll_apply_one_change(struct event_base *base, |
268 | | struct epollop *epollop, |
269 | | const struct event_change *ch) |
270 | 18.9M | { |
271 | 18.9M | struct epoll_event epev; |
272 | 18.9M | int op, events = 0; |
273 | 18.9M | int idx; |
274 | | |
275 | 18.9M | idx = EPOLL_OP_TABLE_INDEX(ch); |
276 | 18.9M | op = epoll_op_table[idx].op; |
277 | 18.9M | events = epoll_op_table[idx].events; |
278 | | |
279 | 18.9M | if (!events) { Branch (279:6): [True: 0, False: 18.9M]
|
280 | 0 | EVUTIL_ASSERT(op == 0); |
281 | 0 | return 0; |
282 | 0 | } |
283 | | |
284 | 18.9M | if ((ch->read_change|ch->write_change|ch->close_change) & EV_CHANGE_ET) Branch (284:6): [True: 0, False: 18.9M]
|
285 | 0 | events |= EPOLLET; |
286 | | |
287 | 18.9M | memset(&epev, 0, sizeof(epev)); |
288 | 18.9M | epev.data.fd = ch->fd; |
289 | 18.9M | epev.events = events; |
290 | 18.9M | if (epoll_ctl(epollop->epfd, op, ch->fd, &epev) == 0) { Branch (290:6): [True: 18.9M, False: 11.0k]
|
291 | 18.9M | event_debug((PRINT_CHANGES(op, epev.events, ch, "okay"))); |
292 | 18.9M | return 0; |
293 | 18.9M | } |
294 | | |
295 | 11.0k | switch (op) { |
296 | 0 | case EPOLL_CTL_MOD: Branch (296:2): [True: 0, False: 11.0k]
|
297 | 0 | if (errno == ENOENT) { Branch (297:7): [True: 0, False: 0]
|
298 | | /* If a MOD operation fails with ENOENT, the |
299 | | * fd was probably closed and re-opened. We |
300 | | * should retry the operation as an ADD. |
301 | | */ |
302 | 0 | if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, ch->fd, &epev) == -1) { Branch (302:8): [True: 0, False: 0]
|
303 | 0 | event_warn("Epoll MOD(%d) on %d retried as ADD; that failed too", |
304 | 0 | (int)epev.events, ch->fd); |
305 | 0 | return -1; |
306 | 0 | } else { |
307 | 0 | event_debug(("Epoll MOD(%d) on %d retried as ADD; succeeded.", |
308 | 0 | (int)epev.events, |
309 | 0 | ch->fd)); |
310 | 0 | return 0; |
311 | 0 | } |
312 | 0 | } |
313 | 0 | break; |
314 | 0 | case EPOLL_CTL_ADD: Branch (314:2): [True: 0, False: 11.0k]
|
315 | 0 | if (errno == EEXIST) { Branch (315:7): [True: 0, False: 0]
|
316 | | /* If an ADD operation fails with EEXIST, |
317 | | * either the operation was redundant (as with a |
318 | | * precautionary add), or we ran into a fun |
319 | | * kernel bug where using dup*() to duplicate the |
320 | | * same file into the same fd gives you the same epitem |
321 | | * rather than a fresh one. For the second case, |
322 | | * we must retry with MOD. */ |
323 | 0 | if (epoll_ctl(epollop->epfd, EPOLL_CTL_MOD, ch->fd, &epev) == -1) { Branch (323:8): [True: 0, False: 0]
|
324 | 0 | event_warn("Epoll ADD(%d) on %d retried as MOD; that failed too", |
325 | 0 | (int)epev.events, ch->fd); |
326 | 0 | return -1; |
327 | 0 | } else { |
328 | 0 | event_debug(("Epoll ADD(%d) on %d retried as MOD; succeeded.", |
329 | 0 | (int)epev.events, |
330 | 0 | ch->fd)); |
331 | 0 | return 0; |
332 | 0 | } |
333 | 0 | } |
334 | 0 | break; |
335 | 11.0k | case EPOLL_CTL_DEL: Branch (335:2): [True: 11.0k, False: 0]
|
336 | 11.0k | if (errno == ENOENT || errno == EBADF || errno == EPERM) { Branch (336:7): [True: 0, False: 11.0k]
Branch (336:26): [True: 11.0k, False: 0]
Branch (336:44): [True: 0, False: 0]
|
337 | | /* If a delete fails with one of these errors, |
338 | | * that's fine too: we closed the fd before we |
339 | | * got around to calling epoll_dispatch. */ |
340 | 11.0k | event_debug(("Epoll DEL(%d) on fd %d gave %s: DEL was unnecessary.", |
341 | 11.0k | (int)epev.events, |
342 | 11.0k | ch->fd, |
343 | 11.0k | strerror(errno))); |
344 | 11.0k | return 0; |
345 | 11.0k | } |
346 | 0 | break; |
347 | 0 | default: Branch (347:2): [True: 0, False: 11.0k]
|
348 | 0 | break; |
349 | 11.0k | } |
350 | | |
351 | 0 | event_warn(PRINT_CHANGES(op, epev.events, ch, "failed")); |
352 | 0 | return -1; |
353 | 11.0k | } |
354 | | |
355 | | static int |
356 | | epoll_apply_changes(struct event_base *base) |
357 | 11.7M | { |
358 | 11.7M | struct event_changelist *changelist = &base->changelist; |
359 | 11.7M | struct epollop *epollop = base->evbase; |
360 | 11.7M | struct event_change *ch; |
361 | | |
362 | 11.7M | int r = 0; |
363 | 11.7M | int i; |
364 | | |
365 | 11.7M | for (i = 0; i < changelist->n_changes; ++i) { Branch (365:14): [True: 0, False: 11.7M]
|
366 | 0 | ch = &changelist->changes[i]; |
367 | 0 | if (epoll_apply_one_change(base, epollop, ch) < 0) Branch (367:7): [True: 0, False: 0]
|
368 | 0 | r = -1; |
369 | 0 | } |
370 | | |
371 | 11.7M | return (r); |
372 | 11.7M | } |
373 | | |
374 | | static int |
375 | | epoll_nochangelist_add(struct event_base *base, evutil_socket_t fd, |
376 | | short old, short events, void *p) |
377 | 9.45M | { |
378 | 9.45M | struct event_change ch; |
379 | 9.45M | ch.fd = fd; |
380 | 9.45M | ch.old_events = old; |
381 | 9.45M | ch.read_change = ch.write_change = ch.close_change = 0; |
382 | 9.45M | if (events & EV_WRITE) Branch (382:6): [True: 4.71M, False: 4.73M]
|
383 | 4.71M | ch.write_change = EV_CHANGE_ADD | |
384 | 4.71M | (events & EV_ET); |
385 | 9.45M | if (events & EV_READ) Branch (385:6): [True: 4.73M, False: 4.71M]
|
386 | 4.73M | ch.read_change = EV_CHANGE_ADD | |
387 | 4.73M | (events & EV_ET); |
388 | 9.45M | if (events & EV_CLOSED) Branch (388:6): [True: 0, False: 9.45M]
|
389 | 0 | ch.close_change = EV_CHANGE_ADD | |
390 | 0 | (events & EV_ET); |
391 | | |
392 | 9.45M | return epoll_apply_one_change(base, base->evbase, &ch); |
393 | 9.45M | } |
394 | | |
395 | | static int |
396 | | epoll_nochangelist_del(struct event_base *base, evutil_socket_t fd, |
397 | | short old, short events, void *p) |
398 | 9.45M | { |
399 | 9.45M | struct event_change ch; |
400 | 9.45M | ch.fd = fd; |
401 | 9.45M | ch.old_events = old; |
402 | 9.45M | ch.read_change = ch.write_change = ch.close_change = 0; |
403 | 9.45M | if (events & EV_WRITE) Branch (403:6): [True: 4.71M, False: 4.73M]
|
404 | 4.71M | ch.write_change = EV_CHANGE_DEL | |
405 | 4.71M | (events & EV_ET); |
406 | 9.45M | if (events & EV_READ) Branch (406:6): [True: 4.73M, False: 4.71M]
|
407 | 4.73M | ch.read_change = EV_CHANGE_DEL | |
408 | 4.73M | (events & EV_ET); |
409 | 9.45M | if (events & EV_CLOSED) Branch (409:6): [True: 0, False: 9.45M]
|
410 | 0 | ch.close_change = EV_CHANGE_DEL | |
411 | 0 | (events & EV_ET); |
412 | | |
413 | 9.45M | return epoll_apply_one_change(base, base->evbase, &ch); |
414 | 9.45M | } |
415 | | |
416 | | static int |
417 | | epoll_dispatch(struct event_base *base, struct timeval *tv) |
418 | 11.7M | { |
419 | 11.7M | struct epollop *epollop = base->evbase; |
420 | 11.7M | struct epoll_event *events = epollop->events; |
421 | 11.7M | int i, res; |
422 | 11.7M | long timeout = -1; |
423 | | |
424 | 11.7M | #ifdef USING_TIMERFD |
425 | 11.7M | if (epollop->timerfd >= 0) { Branch (425:6): [True: 0, False: 11.7M]
|
426 | 0 | struct itimerspec is; |
427 | 0 | is.it_interval.tv_sec = 0; |
428 | 0 | is.it_interval.tv_nsec = 0; |
429 | 0 | if (tv == NULL) { Branch (429:7): [True: 0, False: 0]
|
430 | | /* No timeout; disarm the timer. */ |
431 | 0 | is.it_value.tv_sec = 0; |
432 | 0 | is.it_value.tv_nsec = 0; |
433 | 0 | } else { |
434 | 0 | if (tv->tv_sec == 0 && tv->tv_usec == 0) { Branch (434:8): [True: 0, False: 0]
Branch (434:27): [True: 0, False: 0]
|
435 | | /* we need to exit immediately; timerfd can't |
436 | | * do that. */ |
437 | 0 | timeout = 0; |
438 | 0 | } |
439 | 0 | is.it_value.tv_sec = tv->tv_sec; |
440 | 0 | is.it_value.tv_nsec = tv->tv_usec * 1000; |
441 | 0 | } |
442 | | /* TODO: we could avoid unnecessary syscalls here by only |
443 | | calling timerfd_settime when the top timeout changes, or |
444 | | when we're called with a different timeval. |
445 | | */ |
446 | 0 | if (timerfd_settime(epollop->timerfd, 0, &is, NULL) < 0) { Branch (446:7): [True: 0, False: 0]
|
447 | 0 | event_warn("timerfd_settime"); |
448 | 0 | } |
449 | 0 | } else |
450 | 11.7M | #endif |
451 | 11.7M | if (tv != NULL) { Branch (451:6): [True: 7.06M, False: 4.72M]
|
452 | 7.06M | timeout = evutil_tv_to_msec_(tv); |
453 | 7.06M | if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) { Branch (453:7): [True: 0, False: 7.06M]
Branch (453:22): [True: 0, False: 7.06M]
|
454 | | /* Linux kernels can wait forever if the timeout is |
455 | | * too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */ |
456 | 0 | timeout = MAX_EPOLL_TIMEOUT_MSEC; |
457 | 0 | } |
458 | 7.06M | } |
459 | | |
460 | 11.7M | epoll_apply_changes(base); |
461 | 11.7M | event_changelist_remove_all_(&base->changelist, base); |
462 | | |
463 | 11.7M | EVBASE_RELEASE_LOCK(base, th_base_lock); |
464 | | |
465 | 11.7M | res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout); |
466 | | |
467 | 11.7M | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
468 | | |
469 | 11.7M | if (res == -1) { Branch (469:6): [True: 0, False: 11.7M]
|
470 | 0 | if (errno != EINTR) { Branch (470:7): [True: 0, False: 0]
|
471 | 0 | event_warn("epoll_wait"); |
472 | 0 | return (-1); |
473 | 0 | } |
474 | | |
475 | 0 | return (0); |
476 | 0 | } |
477 | | |
478 | 11.7M | event_debug(("%s: epoll_wait reports %d", __func__, res)); |
479 | 11.7M | EVUTIL_ASSERT(res <= epollop->nevents); |
480 | | |
481 | 23.5M | for (i = 0; i < res; i++) { Branch (481:14): [True: 11.7M, False: 11.7M]
|
482 | 11.7M | int what = events[i].events; |
483 | 11.7M | short ev = 0; |
484 | 11.7M | #ifdef USING_TIMERFD |
485 | 11.7M | if (events[i].data.fd == epollop->timerfd) Branch (485:7): [True: 0, False: 11.7M]
|
486 | 0 | continue; |
487 | 11.7M | #endif |
488 | | |
489 | 11.7M | if (what & EPOLLERR) { Branch (489:7): [True: 0, False: 11.7M]
|
490 | 0 | ev = EV_READ | EV_WRITE; |
491 | 11.7M | } else if ((what & EPOLLHUP) && !(what & EPOLLRDHUP)) { Branch (491:14): [True: 0, False: 11.7M]
Branch (491:35): [True: 0, False: 0]
|
492 | 0 | ev = EV_READ | EV_WRITE; |
493 | 11.7M | } else { |
494 | 11.7M | if (what & EPOLLIN) Branch (494:8): [True: 9.43M, False: 2.35M]
|
495 | 9.43M | ev |= EV_READ; |
496 | 11.7M | if (what & EPOLLOUT) Branch (496:8): [True: 2.35M, False: 9.43M]
|
497 | 2.35M | ev |= EV_WRITE; |
498 | 11.7M | if (what & EPOLLRDHUP) Branch (498:8): [True: 0, False: 11.7M]
|
499 | 0 | ev |= EV_CLOSED; |
500 | 11.7M | } |
501 | | |
502 | 11.7M | if (!ev) Branch (502:7): [True: 0, False: 11.7M]
|
503 | 0 | continue; |
504 | | |
505 | 11.7M | evmap_io_active_(base, events[i].data.fd, ev | EV_ET); |
506 | 11.7M | } |
507 | | |
508 | 11.7M | if (res == epollop->nevents && epollop->nevents < MAX_NEVENT) { Branch (508:6): [True: 0, False: 11.7M]
Branch (508:33): [True: 0, False: 0]
|
509 | | /* We used all of the event space this time. We should |
510 | | be ready for more events next time. */ |
511 | 0 | int new_nevents = epollop->nevents * 2; |
512 | 0 | struct epoll_event *new_events; |
513 | |
|
514 | 0 | new_events = mm_realloc(epollop->events, |
515 | 0 | new_nevents * sizeof(struct epoll_event)); |
516 | 0 | if (new_events) { Branch (516:7): [True: 0, False: 0]
|
517 | 0 | epollop->events = new_events; |
518 | 0 | epollop->nevents = new_nevents; |
519 | 0 | } |
520 | 0 | } |
521 | | |
522 | 11.7M | return (0); |
523 | 11.7M | } |
524 | | |
525 | | |
526 | | static void |
527 | | epoll_dealloc(struct event_base *base) |
528 | 11.0k | { |
529 | 11.0k | struct epollop *epollop = base->evbase; |
530 | | |
531 | 11.0k | evsig_dealloc_(base); |
532 | 11.0k | if (epollop->events) Branch (532:6): [True: 11.0k, False: 0]
|
533 | 11.0k | mm_free(epollop->events); |
534 | 11.0k | if (epollop->epfd >= 0) Branch (534:6): [True: 11.0k, False: 0]
|
535 | 11.0k | close(epollop->epfd); |
536 | 11.0k | #ifdef USING_TIMERFD |
537 | 11.0k | if (epollop->timerfd >= 0) Branch (537:6): [True: 0, False: 11.0k]
|
538 | 0 | close(epollop->timerfd); |
539 | 11.0k | #endif |
540 | | |
541 | 11.0k | memset(epollop, 0, sizeof(struct epollop)); |
542 | 11.0k | mm_free(epollop); |
543 | 11.0k | } |
544 | | |
545 | | #endif /* EVENT__HAVE_EPOLL */ |