/bitcoin/depends/work/build/x86_64-pc-linux-gnu/libevent/2.1.12-stable-7656baec08e/evmap.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson |
3 | | * |
4 | | * Redistribution and use in source and binary forms, with or without |
5 | | * modification, are permitted provided that the following conditions |
6 | | * are met: |
7 | | * 1. Redistributions of source code must retain the above copyright |
8 | | * notice, this list of conditions and the following disclaimer. |
9 | | * 2. Redistributions in binary form must reproduce the above copyright |
10 | | * notice, this list of conditions and the following disclaimer in the |
11 | | * documentation and/or other materials provided with the distribution. |
12 | | * 3. The name of the author may not be used to endorse or promote products |
13 | | * derived from this software without specific prior written permission. |
14 | | * |
15 | | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | | */ |
26 | | #include "event2/event-config.h" |
27 | | #include "evconfig-private.h" |
28 | | |
29 | | #ifdef _WIN32 |
30 | | #include <winsock2.h> |
31 | | #define WIN32_LEAN_AND_MEAN |
32 | | #include <windows.h> |
33 | | #undef WIN32_LEAN_AND_MEAN |
34 | | #endif |
35 | | #include <sys/types.h> |
36 | | #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H) |
37 | | #include <sys/time.h> |
38 | | #endif |
39 | | #include <sys/queue.h> |
40 | | #include <stdio.h> |
41 | | #include <stdlib.h> |
42 | | #ifndef _WIN32 |
43 | | #include <unistd.h> |
44 | | #endif |
45 | | #include <errno.h> |
46 | | #include <limits.h> |
47 | | #include <signal.h> |
48 | | #include <string.h> |
49 | | #include <time.h> |
50 | | |
51 | | #include "event-internal.h" |
52 | | #include "evmap-internal.h" |
53 | | #include "mm-internal.h" |
54 | | #include "changelist-internal.h" |
55 | | |
56 | | /** An entry for an evmap_io list: notes all the events that want to read or |
57 | | write on a given fd, and the number of each. |
58 | | */ |
59 | | struct evmap_io { |
60 | | struct event_dlist events; |
61 | | ev_uint16_t nread; |
62 | | ev_uint16_t nwrite; |
63 | | ev_uint16_t nclose; |
64 | | }; |
65 | | |
66 | | /* An entry for an evmap_signal list: notes all the events that want to know |
67 | | when a signal triggers. */ |
68 | | struct evmap_signal { |
69 | | struct event_dlist events; |
70 | | }; |
71 | | |
72 | | /* On some platforms, fds start at 0 and increment by 1 as they are |
73 | | allocated, and old numbers get used. For these platforms, we |
74 | | implement io maps just like signal maps: as an array of pointers to |
75 | | struct evmap_io. But on other platforms (windows), sockets are not |
76 | | 0-indexed, not necessarily consecutive, and not necessarily reused. |
77 | | There, we use a hashtable to implement evmap_io. |
78 | | */ |
79 | | #ifdef EVMAP_USE_HT |
80 | | struct event_map_entry { |
81 | | HT_ENTRY(event_map_entry) map_node; |
82 | | evutil_socket_t fd; |
83 | | union { /* This is a union in case we need to make more things that can |
84 | | be in the hashtable. */ |
85 | | struct evmap_io evmap_io; |
86 | | } ent; |
87 | | }; |
88 | | |
89 | | /* Helper used by the event_io_map hashtable code; tries to return a good hash |
90 | | * of the fd in e->fd. */ |
91 | | static inline unsigned |
92 | | hashsocket(struct event_map_entry *e) |
93 | | { |
94 | | /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to |
95 | | * matter. Our hashtable implementation really likes low-order bits, |
96 | | * though, so let's do the rotate-and-add trick. */ |
97 | | unsigned h = (unsigned) e->fd; |
98 | | h += (h >> 2) | (h << 30); |
99 | | return h; |
100 | | } |
101 | | |
102 | | /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2 |
103 | | * have the same e->fd. */ |
104 | | static inline int |
105 | | eqsocket(struct event_map_entry *e1, struct event_map_entry *e2) |
106 | | { |
107 | | return e1->fd == e2->fd; |
108 | | } |
109 | | |
110 | | HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket) |
111 | | HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket, |
112 | | 0.5, mm_malloc, mm_realloc, mm_free) |
113 | | |
114 | | #define GET_IO_SLOT(x, map, slot, type) \ |
115 | | do { \ |
116 | | struct event_map_entry key_, *ent_; \ |
117 | | key_.fd = slot; \ |
118 | | ent_ = HT_FIND(event_io_map, map, &key_); \ |
119 | | (x) = ent_ ? &ent_->ent.type : NULL; \ |
120 | | } while (0); |
121 | | |
122 | | #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ |
123 | | do { \ |
124 | | struct event_map_entry key_, *ent_; \ |
125 | | key_.fd = slot; \ |
126 | | HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \ |
127 | | event_map_entry, &key_, ptr, \ |
128 | | { \ |
129 | | ent_ = *ptr; \ |
130 | | }, \ |
131 | | { \ |
132 | | ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \ |
133 | | if (EVUTIL_UNLIKELY(ent_ == NULL)) \ |
134 | | return (-1); \ |
135 | | ent_->fd = slot; \ |
136 | | (ctor)(&ent_->ent.type); \ |
137 | | HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \ |
138 | | }); \ |
139 | | (x) = &ent_->ent.type; \ |
140 | | } while (0) |
141 | | |
142 | | void evmap_io_initmap_(struct event_io_map *ctx) |
143 | | { |
144 | | HT_INIT(event_io_map, ctx); |
145 | | } |
146 | | |
147 | | void evmap_io_clear_(struct event_io_map *ctx) |
148 | | { |
149 | | struct event_map_entry **ent, **next, *this; |
150 | | for (ent = HT_START(event_io_map, ctx); ent; ent = next) { |
151 | | this = *ent; |
152 | | next = HT_NEXT_RMV(event_io_map, ctx, ent); |
153 | | mm_free(this); |
154 | | } |
155 | | HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */ |
156 | | } |
157 | | #endif |
158 | | |
159 | | /* Set the variable 'x' to the field in event_map 'map' with fields of type |
160 | | 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL |
161 | | if there are no entries for 'slot'. Does no bounds-checking. */ |
162 | | #define GET_SIGNAL_SLOT(x, map, slot, type) \ |
163 | 21.2M | (x) = (struct type *)((map)->entries[slot]) |
164 | | /* As GET_SLOT, but construct the entry for 'slot' if it is not present, |
165 | | by allocating enough memory for a 'struct type', and initializing the new |
166 | | value by calling the function 'ctor' on it. Makes the function |
167 | | return -1 on allocation failure. |
168 | | */ |
169 | | #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ |
170 | 9.45M | do { \ |
171 | 9.45M | if ((map)->entries[slot] == NULL) { \ |
172 | 62.2k | (map)->entries[slot] = \ |
173 | 62.2k | mm_calloc(1,sizeof(struct type)+fdinfo_len); \ |
174 | 62.2k | if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \ |
175 | 62.2k | return (-1); \ |
176 | 62.2k | (ctor)((struct type *)(map)->entries[slot]); \ |
177 | 62.2k | } \ |
178 | 9.45M | (x) = (struct type *)((map)->entries[slot]); \ |
179 | 9.45M | } while (0) |
180 | | |
181 | | /* If we aren't using hashtables, then define the IO_SLOT macros and functions |
182 | | as thin aliases over the SIGNAL_SLOT versions. */ |
183 | | #ifndef EVMAP_USE_HT |
184 | 21.2M | #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type) |
185 | | #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \ |
186 | 9.45M | GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) |
187 | | #define FDINFO_OFFSET sizeof(struct evmap_io) |
188 | | void |
189 | | evmap_io_initmap_(struct event_io_map* ctx) |
190 | 11.0k | { |
191 | 11.0k | evmap_signal_initmap_(ctx); |
192 | 11.0k | } |
193 | | void |
194 | | evmap_io_clear_(struct event_io_map* ctx) |
195 | 11.0k | { |
196 | 11.0k | evmap_signal_clear_(ctx); |
197 | 11.0k | } |
198 | | #endif |
199 | | |
200 | | |
201 | | /** Expand 'map' with new entries of width 'msize' until it is big enough |
202 | | to store a value in 'slot'. |
203 | | */ |
204 | | static int |
205 | | evmap_make_space(struct event_signal_map *map, int slot, int msize) |
206 | 22.1k | { |
207 | 22.1k | if (map->nentries <= slot) { Branch (207:6): [True: 22.1k, False: 0]
|
208 | 22.1k | int nentries = map->nentries ? map->nentries : 32; Branch (208:18): [True: 11.0k, False: 11.0k]
|
209 | 22.1k | void **tmp; |
210 | | |
211 | 22.1k | if (slot > INT_MAX / 2) Branch (211:7): [True: 0, False: 22.1k]
|
212 | 0 | return (-1); |
213 | | |
214 | 33.2k | while (nentries <= slot) Branch (214:10): [True: 11.0k, False: 22.1k]
|
215 | 11.0k | nentries <<= 1; |
216 | | |
217 | 22.1k | if (nentries > INT_MAX / msize) Branch (217:7): [True: 0, False: 22.1k]
|
218 | 0 | return (-1); |
219 | | |
220 | 22.1k | tmp = (void **)mm_realloc(map->entries, nentries * msize); |
221 | 22.1k | if (tmp == NULL) Branch (221:7): [True: 0, False: 22.1k]
|
222 | 0 | return (-1); |
223 | | |
224 | 22.1k | memset(&tmp[map->nentries], 0, |
225 | 22.1k | (nentries - map->nentries) * msize); |
226 | | |
227 | 22.1k | map->nentries = nentries; |
228 | 22.1k | map->entries = tmp; |
229 | 22.1k | } |
230 | | |
231 | 22.1k | return (0); |
232 | 22.1k | } |
233 | | |
234 | | void |
235 | | evmap_signal_initmap_(struct event_signal_map *ctx) |
236 | 22.1k | { |
237 | 22.1k | ctx->nentries = 0; |
238 | 22.1k | ctx->entries = NULL; |
239 | 22.1k | } |
240 | | |
241 | | void |
242 | | evmap_signal_clear_(struct event_signal_map *ctx) |
243 | 22.1k | { |
244 | 22.1k | if (ctx->entries != NULL) { Branch (244:6): [True: 11.0k, False: 11.0k]
|
245 | 11.0k | int i; |
246 | 720k | for (i = 0; i < ctx->nentries; ++i) { Branch (246:15): [True: 709k, False: 11.0k]
|
247 | 709k | if (ctx->entries[i] != NULL) Branch (247:8): [True: 62.2k, False: 647k]
|
248 | 62.2k | mm_free(ctx->entries[i]); |
249 | 709k | } |
250 | 11.0k | mm_free(ctx->entries); |
251 | 11.0k | ctx->entries = NULL; |
252 | 11.0k | } |
253 | 22.1k | ctx->nentries = 0; |
254 | 22.1k | } |
255 | | |
256 | | |
257 | | /* code specific to file descriptors */ |
258 | | |
259 | | /** Constructor for struct evmap_io */ |
260 | | static void |
261 | | evmap_io_init(struct evmap_io *entry) |
262 | 62.2k | { |
263 | 62.2k | LIST_INIT(&entry->events); |
264 | 62.2k | entry->nread = 0; |
265 | 62.2k | entry->nwrite = 0; |
266 | 62.2k | entry->nclose = 0; |
267 | 62.2k | } |
268 | | |
269 | | |
270 | | /* return -1 on error, 0 on success if nothing changed in the event backend, |
271 | | * and 1 on success if something did. */ |
272 | | int |
273 | | evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev) |
274 | 9.45M | { |
275 | 9.45M | const struct eventop *evsel = base->evsel; |
276 | 9.45M | struct event_io_map *io = &base->io; |
277 | 9.45M | struct evmap_io *ctx = NULL; |
278 | 9.45M | int nread, nwrite, nclose, retval = 0; |
279 | 9.45M | short res = 0, old = 0; |
280 | 9.45M | struct event *old_ev; |
281 | | |
282 | 9.45M | EVUTIL_ASSERT(fd == ev->ev_fd); |
283 | | |
284 | 9.45M | if (fd < 0) Branch (284:6): [True: 0, False: 9.45M]
|
285 | 0 | return 0; |
286 | | |
287 | 9.45M | #ifndef EVMAP_USE_HT |
288 | 9.45M | if (fd >= io->nentries) { Branch (288:6): [True: 22.1k, False: 9.43M]
|
289 | 22.1k | if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1) Branch (289:7): [True: 0, False: 22.1k]
|
290 | 0 | return (-1); |
291 | 22.1k | } |
292 | 9.45M | #endif |
293 | 9.45M | GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init, |
294 | 9.45M | evsel->fdinfo_len); |
295 | | |
296 | 9.45M | nread = ctx->nread; |
297 | 9.45M | nwrite = ctx->nwrite; |
298 | 9.45M | nclose = ctx->nclose; |
299 | | |
300 | 9.45M | if (nread) Branch (300:6): [True: 2.35M, False: 7.09M]
|
301 | 2.35M | old |= EV_READ; |
302 | 9.45M | if (nwrite) Branch (302:6): [True: 2.35M, False: 7.09M]
|
303 | 2.35M | old |= EV_WRITE; |
304 | 9.45M | if (nclose) Branch (304:6): [True: 0, False: 9.45M]
|
305 | 0 | old |= EV_CLOSED; |
306 | | |
307 | 9.45M | if (ev->ev_events & EV_READ) { Branch (307:6): [True: 4.73M, False: 4.71M]
|
308 | 4.73M | if (++nread == 1) Branch (308:7): [True: 4.73M, False: 0]
|
309 | 4.73M | res |= EV_READ; |
310 | 4.73M | } |
311 | 9.45M | if (ev->ev_events & EV_WRITE) { Branch (311:6): [True: 4.71M, False: 4.73M]
|
312 | 4.71M | if (++nwrite == 1) Branch (312:7): [True: 4.71M, False: 0]
|
313 | 4.71M | res |= EV_WRITE; |
314 | 4.71M | } |
315 | 9.45M | if (ev->ev_events & EV_CLOSED) { Branch (315:6): [True: 0, False: 9.45M]
|
316 | 0 | if (++nclose == 1) Branch (316:7): [True: 0, False: 0]
|
317 | 0 | res |= EV_CLOSED; |
318 | 0 | } |
319 | 9.45M | if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) { |
320 | 0 | event_warnx("Too many events reading or writing on fd %d", |
321 | 0 | (int)fd); |
322 | 0 | return -1; |
323 | 0 | } |
324 | 9.45M | if (EVENT_DEBUG_MODE_IS_ON() && |
325 | 9.45M | (old_ev = LIST_FIRST(&ctx->events)) && Branch (325:6): [True: 0, False: 0]
|
326 | 9.45M | (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) { Branch (326:6): [True: 0, False: 0]
|
327 | 0 | event_warnx("Tried to mix edge-triggered and non-edge-triggered" |
328 | 0 | " events on fd %d", (int)fd); |
329 | 0 | return -1; |
330 | 0 | } |
331 | | |
332 | 9.45M | if (res) { Branch (332:6): [True: 9.45M, False: 0]
|
333 | 9.45M | void *extra = ((char*)ctx) + sizeof(struct evmap_io); |
334 | | /* XXX(niels): we cannot mix edge-triggered and |
335 | | * level-triggered, we should probably assert on |
336 | | * this. */ |
337 | 9.45M | if (evsel->add(base, ev->ev_fd, Branch (337:7): [True: 0, False: 9.45M]
|
338 | 9.45M | old, (ev->ev_events & EV_ET) | res, extra) == -1) |
339 | 0 | return (-1); |
340 | 9.45M | retval = 1; |
341 | 9.45M | } |
342 | | |
343 | 9.45M | ctx->nread = (ev_uint16_t) nread; |
344 | 9.45M | ctx->nwrite = (ev_uint16_t) nwrite; |
345 | 9.45M | ctx->nclose = (ev_uint16_t) nclose; |
346 | 9.45M | LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next); |
347 | | |
348 | 9.45M | return (retval); |
349 | 9.45M | } |
350 | | |
351 | | /* return -1 on error, 0 on success if nothing changed in the event backend, |
352 | | * and 1 on success if something did. */ |
353 | | int |
354 | | evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev) |
355 | 9.45M | { |
356 | 9.45M | const struct eventop *evsel = base->evsel; |
357 | 9.45M | struct event_io_map *io = &base->io; |
358 | 9.45M | struct evmap_io *ctx; |
359 | 9.45M | int nread, nwrite, nclose, retval = 0; |
360 | 9.45M | short res = 0, old = 0; |
361 | | |
362 | 9.45M | if (fd < 0) Branch (362:6): [True: 0, False: 9.45M]
|
363 | 0 | return 0; |
364 | | |
365 | 9.45M | EVUTIL_ASSERT(fd == ev->ev_fd); |
366 | | |
367 | 9.45M | #ifndef EVMAP_USE_HT |
368 | 9.45M | if (fd >= io->nentries) Branch (368:6): [True: 0, False: 9.45M]
|
369 | 0 | return (-1); |
370 | 9.45M | #endif |
371 | | |
372 | 9.45M | GET_IO_SLOT(ctx, io, fd, evmap_io); |
373 | | |
374 | 9.45M | nread = ctx->nread; |
375 | 9.45M | nwrite = ctx->nwrite; |
376 | 9.45M | nclose = ctx->nclose; |
377 | | |
378 | 9.45M | if (nread) Branch (378:6): [True: 9.45M, False: 0]
|
379 | 9.45M | old |= EV_READ; |
380 | 9.45M | if (nwrite) Branch (380:6): [True: 4.71M, False: 4.73M]
|
381 | 4.71M | old |= EV_WRITE; |
382 | 9.45M | if (nclose) Branch (382:6): [True: 0, False: 9.45M]
|
383 | 0 | old |= EV_CLOSED; |
384 | | |
385 | 9.45M | if (ev->ev_events & EV_READ) { Branch (385:6): [True: 4.73M, False: 4.71M]
|
386 | 4.73M | if (--nread == 0) Branch (386:7): [True: 4.73M, False: 0]
|
387 | 4.73M | res |= EV_READ; |
388 | 4.73M | EVUTIL_ASSERT(nread >= 0); |
389 | 4.73M | } |
390 | 9.45M | if (ev->ev_events & EV_WRITE) { Branch (390:6): [True: 4.71M, False: 4.73M]
|
391 | 4.71M | if (--nwrite == 0) Branch (391:7): [True: 4.71M, False: 0]
|
392 | 4.71M | res |= EV_WRITE; |
393 | 4.71M | EVUTIL_ASSERT(nwrite >= 0); |
394 | 4.71M | } |
395 | 9.45M | if (ev->ev_events & EV_CLOSED) { Branch (395:6): [True: 0, False: 9.45M]
|
396 | 0 | if (--nclose == 0) Branch (396:7): [True: 0, False: 0]
|
397 | 0 | res |= EV_CLOSED; |
398 | 0 | EVUTIL_ASSERT(nclose >= 0); |
399 | 0 | } |
400 | | |
401 | 9.45M | if (res) { Branch (401:6): [True: 9.45M, False: 0]
|
402 | 9.45M | void *extra = ((char*)ctx) + sizeof(struct evmap_io); |
403 | 9.45M | if (evsel->del(base, ev->ev_fd, Branch (403:7): [True: 0, False: 9.45M]
|
404 | 9.45M | old, (ev->ev_events & EV_ET) | res, extra) == -1) { |
405 | 0 | retval = -1; |
406 | 9.45M | } else { |
407 | 9.45M | retval = 1; |
408 | 9.45M | } |
409 | 9.45M | } |
410 | | |
411 | 9.45M | ctx->nread = nread; |
412 | 9.45M | ctx->nwrite = nwrite; |
413 | 9.45M | ctx->nclose = nclose; |
414 | 9.45M | LIST_REMOVE(ev, ev_io_next); |
415 | | |
416 | 9.45M | return (retval); |
417 | 9.45M | } |
418 | | |
419 | | void |
420 | | evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events) |
421 | 11.7M | { |
422 | 11.7M | struct event_io_map *io = &base->io; |
423 | 11.7M | struct evmap_io *ctx; |
424 | 11.7M | struct event *ev; |
425 | | |
426 | 11.7M | #ifndef EVMAP_USE_HT |
427 | 11.7M | if (fd < 0 || fd >= io->nentries) Branch (427:6): [True: 0, False: 11.7M]
Branch (427:16): [True: 0, False: 11.7M]
|
428 | 0 | return; |
429 | 11.7M | #endif |
430 | 11.7M | GET_IO_SLOT(ctx, io, fd, evmap_io); |
431 | | |
432 | 11.7M | if (NULL == ctx) Branch (432:6): [True: 0, False: 11.7M]
|
433 | 0 | return; |
434 | 14.1M | LIST_FOREACH(ev, &ctx->events, ev_io_next) { |
435 | 14.1M | if (ev->ev_events & (events & ~EV_ET)) Branch (435:7): [True: 11.7M, False: 2.35M]
|
436 | 11.7M | event_active_nolock_(ev, ev->ev_events & events, 1); |
437 | 14.1M | } |
438 | 11.7M | } |
439 | | |
440 | | /* code specific to signals */ |
441 | | |
442 | | static void |
443 | | evmap_signal_init(struct evmap_signal *entry) |
444 | 0 | { |
445 | 0 | LIST_INIT(&entry->events); |
446 | 0 | } |
447 | | |
448 | | |
449 | | int |
450 | | evmap_signal_add_(struct event_base *base, int sig, struct event *ev) |
451 | 0 | { |
452 | 0 | const struct eventop *evsel = base->evsigsel; |
453 | 0 | struct event_signal_map *map = &base->sigmap; |
454 | 0 | struct evmap_signal *ctx = NULL; |
455 | |
|
456 | 0 | if (sig < 0 || sig >= NSIG) Branch (456:6): [True: 0, False: 0]
Branch (456:17): [True: 0, False: 0]
|
457 | 0 | return (-1); |
458 | | |
459 | 0 | if (sig >= map->nentries) { Branch (459:6): [True: 0, False: 0]
|
460 | 0 | if (evmap_make_space( Branch (460:7): [True: 0, False: 0]
|
461 | 0 | map, sig, sizeof(struct evmap_signal *)) == -1) |
462 | 0 | return (-1); |
463 | 0 | } |
464 | 0 | GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init, |
465 | 0 | base->evsigsel->fdinfo_len); |
466 | | |
467 | 0 | if (LIST_EMPTY(&ctx->events)) { |
468 | 0 | if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL) Branch (468:7): [True: 0, False: 0]
|
469 | 0 | == -1) |
470 | 0 | return (-1); |
471 | 0 | } |
472 | | |
473 | 0 | LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next); |
474 | |
|
475 | 0 | return (1); |
476 | 0 | } |
477 | | |
478 | | int |
479 | | evmap_signal_del_(struct event_base *base, int sig, struct event *ev) |
480 | 0 | { |
481 | 0 | const struct eventop *evsel = base->evsigsel; |
482 | 0 | struct event_signal_map *map = &base->sigmap; |
483 | 0 | struct evmap_signal *ctx; |
484 | |
|
485 | 0 | if (sig < 0 || sig >= map->nentries) Branch (485:6): [True: 0, False: 0]
Branch (485:17): [True: 0, False: 0]
|
486 | 0 | return (-1); |
487 | | |
488 | 0 | GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); |
489 | |
|
490 | 0 | LIST_REMOVE(ev, ev_signal_next); |
491 | |
|
492 | 0 | if (LIST_FIRST(&ctx->events) == NULL) { Branch (492:6): [True: 0, False: 0]
|
493 | 0 | if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1) Branch (493:7): [True: 0, False: 0]
|
494 | 0 | return (-1); |
495 | 0 | } |
496 | | |
497 | 0 | return (1); |
498 | 0 | } |
499 | | |
500 | | void |
501 | | evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls) |
502 | 0 | { |
503 | 0 | struct event_signal_map *map = &base->sigmap; |
504 | 0 | struct evmap_signal *ctx; |
505 | 0 | struct event *ev; |
506 | |
|
507 | 0 | if (sig < 0 || sig >= map->nentries) Branch (507:6): [True: 0, False: 0]
Branch (507:17): [True: 0, False: 0]
|
508 | 0 | return; |
509 | 0 | GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); |
510 | |
|
511 | 0 | if (!ctx) Branch (511:6): [True: 0, False: 0]
|
512 | 0 | return; |
513 | 0 | LIST_FOREACH(ev, &ctx->events, ev_signal_next) |
514 | 0 | event_active_nolock_(ev, EV_SIGNAL, ncalls); |
515 | 0 | } |
516 | | |
517 | | void * |
518 | | evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd) |
519 | 0 | { |
520 | 0 | struct evmap_io *ctx; |
521 | 0 | GET_IO_SLOT(ctx, map, fd, evmap_io); |
522 | 0 | if (ctx) Branch (522:6): [True: 0, False: 0]
|
523 | 0 | return ((char*)ctx) + sizeof(struct evmap_io); |
524 | 0 | else |
525 | 0 | return NULL; |
526 | 0 | } |
527 | | |
528 | | /* Callback type for evmap_io_foreach_fd */ |
529 | | typedef int (*evmap_io_foreach_fd_cb)( |
530 | | struct event_base *, evutil_socket_t, struct evmap_io *, void *); |
531 | | |
532 | | /* Multipurpose helper function: Iterate over every file descriptor event_base |
533 | | * for which we could have EV_READ or EV_WRITE events. For each such fd, call |
534 | | * fn(base, signum, evmap_io, arg), where fn is the user-provided |
535 | | * function, base is the event_base, signum is the signal number, evmap_io |
536 | | * is an evmap_io structure containing a list of events pending on the |
537 | | * file descriptor, and arg is the user-supplied argument. |
538 | | * |
539 | | * If fn returns 0, continue on to the next signal. Otherwise, return the same |
540 | | * value that fn returned. |
541 | | * |
542 | | * Note that there is no guarantee that the file descriptors will be processed |
543 | | * in any particular order. |
544 | | */ |
545 | | static int |
546 | | evmap_io_foreach_fd(struct event_base *base, |
547 | | evmap_io_foreach_fd_cb fn, |
548 | | void *arg) |
549 | 11.0k | { |
550 | 11.0k | evutil_socket_t fd; |
551 | 11.0k | struct event_io_map *iomap = &base->io; |
552 | 11.0k | int r = 0; |
553 | | #ifdef EVMAP_USE_HT |
554 | | struct event_map_entry **mapent; |
555 | | HT_FOREACH(mapent, event_io_map, iomap) { |
556 | | struct evmap_io *ctx = &(*mapent)->ent.evmap_io; |
557 | | fd = (*mapent)->fd; |
558 | | #else |
559 | 720k | for (fd = 0; fd < iomap->nentries; ++fd) { Branch (559:15): [True: 709k, False: 11.0k]
|
560 | 709k | struct evmap_io *ctx = iomap->entries[fd]; |
561 | 709k | if (!ctx) Branch (561:7): [True: 647k, False: 62.2k]
|
562 | 647k | continue; |
563 | 62.2k | #endif |
564 | 62.2k | if ((r = fn(base, fd, ctx, arg))) Branch (564:7): [True: 0, False: 62.2k]
|
565 | 0 | break; |
566 | 62.2k | } |
567 | 11.0k | return r; |
568 | 11.0k | } |
569 | | |
570 | | /* Callback type for evmap_signal_foreach_signal */ |
571 | | typedef int (*evmap_signal_foreach_signal_cb)( |
572 | | struct event_base *, int, struct evmap_signal *, void *); |
573 | | |
574 | | /* Multipurpose helper function: Iterate over every signal number in the |
575 | | * event_base for which we could have signal events. For each such signal, |
576 | | * call fn(base, signum, evmap_signal, arg), where fn is the user-provided |
577 | | * function, base is the event_base, signum is the signal number, evmap_signal |
578 | | * is an evmap_signal structure containing a list of events pending on the |
579 | | * signal, and arg is the user-supplied argument. |
580 | | * |
581 | | * If fn returns 0, continue on to the next signal. Otherwise, return the same |
582 | | * value that fn returned. |
583 | | */ |
584 | | static int |
585 | | evmap_signal_foreach_signal(struct event_base *base, |
586 | | evmap_signal_foreach_signal_cb fn, |
587 | | void *arg) |
588 | 11.0k | { |
589 | 11.0k | struct event_signal_map *sigmap = &base->sigmap; |
590 | 11.0k | int r = 0; |
591 | 11.0k | int signum; |
592 | | |
593 | 11.0k | for (signum = 0; signum < sigmap->nentries; ++signum) { Branch (593:19): [True: 0, False: 11.0k]
|
594 | 0 | struct evmap_signal *ctx = sigmap->entries[signum]; |
595 | 0 | if (!ctx) Branch (595:7): [True: 0, False: 0]
|
596 | 0 | continue; |
597 | 0 | if ((r = fn(base, signum, ctx, arg))) Branch (597:7): [True: 0, False: 0]
|
598 | 0 | break; |
599 | 0 | } |
600 | 11.0k | return r; |
601 | 11.0k | } |
602 | | |
603 | | /* Helper for evmap_reinit_: tell the backend to add every fd for which we have |
604 | | * pending events, with the appropriate combination of EV_READ, EV_WRITE, and |
605 | | * EV_ET. */ |
606 | | static int |
607 | | evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd, |
608 | | struct evmap_io *ctx, void *arg) |
609 | 0 | { |
610 | 0 | const struct eventop *evsel = base->evsel; |
611 | 0 | void *extra; |
612 | 0 | int *result = arg; |
613 | 0 | short events = 0; |
614 | 0 | struct event *ev; |
615 | 0 | EVUTIL_ASSERT(ctx); |
616 | | |
617 | 0 | extra = ((char*)ctx) + sizeof(struct evmap_io); |
618 | 0 | if (ctx->nread) Branch (618:6): [True: 0, False: 0]
|
619 | 0 | events |= EV_READ; |
620 | 0 | if (ctx->nwrite) Branch (620:6): [True: 0, False: 0]
|
621 | 0 | events |= EV_WRITE; |
622 | 0 | if (ctx->nclose) Branch (622:6): [True: 0, False: 0]
|
623 | 0 | events |= EV_CLOSED; |
624 | 0 | if (evsel->fdinfo_len) Branch (624:6): [True: 0, False: 0]
|
625 | 0 | memset(extra, 0, evsel->fdinfo_len); |
626 | 0 | if (events && Branch (626:6): [True: 0, False: 0]
|
627 | 0 | (ev = LIST_FIRST(&ctx->events)) && Branch (627:6): [True: 0, False: 0]
|
628 | 0 | (ev->ev_events & EV_ET)) Branch (628:6): [True: 0, False: 0]
|
629 | 0 | events |= EV_ET; |
630 | 0 | if (evsel->add(base, fd, 0, events, extra) == -1) Branch (630:6): [True: 0, False: 0]
|
631 | 0 | *result = -1; |
632 | |
|
633 | 0 | return 0; |
634 | 0 | } |
635 | | |
636 | | /* Helper for evmap_reinit_: tell the backend to add every signal for which we |
637 | | * have pending events. */ |
638 | | static int |
639 | | evmap_signal_reinit_iter_fn(struct event_base *base, |
640 | | int signum, struct evmap_signal *ctx, void *arg) |
641 | 0 | { |
642 | 0 | const struct eventop *evsel = base->evsigsel; |
643 | 0 | int *result = arg; |
644 | |
|
645 | 0 | if (!LIST_EMPTY(&ctx->events)) { Branch (645:6): [True: 0, False: 0]
|
646 | 0 | if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1) Branch (646:7): [True: 0, False: 0]
|
647 | 0 | *result = -1; |
648 | 0 | } |
649 | 0 | return 0; |
650 | 0 | } |
651 | | |
652 | | int |
653 | | evmap_reinit_(struct event_base *base) |
654 | 0 | { |
655 | 0 | int result = 0; |
656 | |
|
657 | 0 | evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result); |
658 | 0 | if (result < 0) Branch (658:6): [True: 0, False: 0]
|
659 | 0 | return -1; |
660 | 0 | evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result); |
661 | 0 | if (result < 0) Branch (661:6): [True: 0, False: 0]
|
662 | 0 | return -1; |
663 | 0 | return 0; |
664 | 0 | } |
665 | | |
666 | | /* Helper for evmap_delete_all_: delete every event in an event_dlist. */ |
667 | | static int |
668 | | delete_all_in_dlist(struct event_dlist *dlist) |
669 | 62.2k | { |
670 | 62.2k | struct event *ev; |
671 | 62.2k | while ((ev = LIST_FIRST(dlist))) Branch (671:9): [True: 0, False: 62.2k]
|
672 | 0 | event_del(ev); |
673 | 62.2k | return 0; |
674 | 62.2k | } |
675 | | |
676 | | /* Helper for evmap_delete_all_: delete every event pending on an fd. */ |
677 | | static int |
678 | | evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd, |
679 | | struct evmap_io *io_info, void *arg) |
680 | 62.2k | { |
681 | 62.2k | return delete_all_in_dlist(&io_info->events); |
682 | 62.2k | } |
683 | | |
684 | | /* Helper for evmap_delete_all_: delete every event pending on a signal. */ |
685 | | static int |
686 | | evmap_signal_delete_all_iter_fn(struct event_base *base, int signum, |
687 | | struct evmap_signal *sig_info, void *arg) |
688 | 0 | { |
689 | 0 | return delete_all_in_dlist(&sig_info->events); |
690 | 0 | } |
691 | | |
692 | | void |
693 | | evmap_delete_all_(struct event_base *base) |
694 | 11.0k | { |
695 | 11.0k | evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL); |
696 | 11.0k | evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL); |
697 | 11.0k | } |
698 | | |
699 | | /** Per-fd structure for use with changelists. It keeps track, for each fd or |
700 | | * signal using the changelist, of where its entry in the changelist is. |
701 | | */ |
702 | | struct event_changelist_fdinfo { |
703 | | int idxplus1; /* this is the index +1, so that memset(0) will make it |
704 | | * a no-such-element */ |
705 | | }; |
706 | | |
707 | | void |
708 | | event_changelist_init_(struct event_changelist *changelist) |
709 | 22.1k | { |
710 | 22.1k | changelist->changes = NULL; |
711 | 22.1k | changelist->changes_size = 0; |
712 | 22.1k | changelist->n_changes = 0; |
713 | 22.1k | } |
714 | | |
715 | | /** Helper: return the changelist_fdinfo corresponding to a given change. */ |
716 | | static inline struct event_changelist_fdinfo * |
717 | | event_change_get_fdinfo(struct event_base *base, |
718 | | const struct event_change *change) |
719 | 0 | { |
720 | 0 | char *ptr; |
721 | 0 | if (change->read_change & EV_CHANGE_SIGNAL) { Branch (721:6): [True: 0, False: 0]
|
722 | 0 | struct evmap_signal *ctx; |
723 | 0 | GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal); |
724 | 0 | ptr = ((char*)ctx) + sizeof(struct evmap_signal); |
725 | 0 | } else { |
726 | 0 | struct evmap_io *ctx; |
727 | 0 | GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io); |
728 | 0 | ptr = ((char*)ctx) + sizeof(struct evmap_io); |
729 | 0 | } |
730 | 0 | return (void*)ptr; |
731 | 0 | } |
732 | | |
733 | | /** Callback helper for event_changelist_assert_ok */ |
734 | | static int |
735 | | event_changelist_assert_ok_foreach_iter_fn( |
736 | | struct event_base *base, |
737 | | evutil_socket_t fd, struct evmap_io *io, void *arg) |
738 | 0 | { |
739 | 0 | struct event_changelist *changelist = &base->changelist; |
740 | 0 | struct event_changelist_fdinfo *f; |
741 | 0 | f = (void*) |
742 | 0 | ( ((char*)io) + sizeof(struct evmap_io) ); |
743 | 0 | if (f->idxplus1) { Branch (743:6): [True: 0, False: 0]
|
744 | 0 | struct event_change *c = &changelist->changes[f->idxplus1 - 1]; |
745 | 0 | EVUTIL_ASSERT(c->fd == fd); |
746 | 0 | } |
747 | 0 | return 0; |
748 | 0 | } |
749 | | |
750 | | /** Make sure that the changelist is consistent with the evmap structures. */ |
751 | | static void |
752 | | event_changelist_assert_ok(struct event_base *base) |
753 | 0 | { |
754 | 0 | int i; |
755 | 0 | struct event_changelist *changelist = &base->changelist; |
756 | |
|
757 | 0 | EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes); |
758 | 0 | for (i = 0; i < changelist->n_changes; ++i) { Branch (758:14): [True: 0, False: 0]
|
759 | 0 | struct event_change *c = &changelist->changes[i]; |
760 | 0 | struct event_changelist_fdinfo *f; |
761 | 0 | EVUTIL_ASSERT(c->fd >= 0); |
762 | 0 | f = event_change_get_fdinfo(base, c); |
763 | 0 | EVUTIL_ASSERT(f); |
764 | 0 | EVUTIL_ASSERT(f->idxplus1 == i + 1); |
765 | 0 | } |
766 | | |
767 | 0 | evmap_io_foreach_fd(base, |
768 | 0 | event_changelist_assert_ok_foreach_iter_fn, |
769 | 0 | NULL); |
770 | 0 | } |
771 | | |
772 | | #ifdef DEBUG_CHANGELIST |
773 | | #define event_changelist_check(base) event_changelist_assert_ok((base)) |
774 | | #else |
775 | 23.5M | #define event_changelist_check(base) ((void)0) |
776 | | #endif |
777 | | |
778 | | void |
779 | | event_changelist_remove_all_(struct event_changelist *changelist, |
780 | | struct event_base *base) |
781 | 11.7M | { |
782 | 11.7M | int i; |
783 | | |
784 | 11.7M | event_changelist_check(base); |
785 | | |
786 | 11.7M | for (i = 0; i < changelist->n_changes; ++i) { Branch (786:14): [True: 0, False: 11.7M]
|
787 | 0 | struct event_change *ch = &changelist->changes[i]; |
788 | 0 | struct event_changelist_fdinfo *fdinfo = |
789 | 0 | event_change_get_fdinfo(base, ch); |
790 | 0 | EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1); |
791 | 0 | fdinfo->idxplus1 = 0; |
792 | 0 | } |
793 | | |
794 | 11.7M | changelist->n_changes = 0; |
795 | | |
796 | 11.7M | event_changelist_check(base); |
797 | 11.7M | } |
798 | | |
799 | | void |
800 | | event_changelist_freemem_(struct event_changelist *changelist) |
801 | 11.0k | { |
802 | 11.0k | if (changelist->changes) Branch (802:6): [True: 0, False: 11.0k]
|
803 | 0 | mm_free(changelist->changes); |
804 | 11.0k | event_changelist_init_(changelist); /* zero it all out. */ |
805 | 11.0k | } |
806 | | |
807 | | /** Increase the size of 'changelist' to hold more changes. */ |
808 | | static int |
809 | | event_changelist_grow(struct event_changelist *changelist) |
810 | 0 | { |
811 | 0 | int new_size; |
812 | 0 | struct event_change *new_changes; |
813 | 0 | if (changelist->changes_size < 64) Branch (813:6): [True: 0, False: 0]
|
814 | 0 | new_size = 64; |
815 | 0 | else |
816 | 0 | new_size = changelist->changes_size * 2; |
817 | |
|
818 | 0 | new_changes = mm_realloc(changelist->changes, |
819 | 0 | new_size * sizeof(struct event_change)); |
820 | |
|
821 | 0 | if (EVUTIL_UNLIKELY(new_changes == NULL)) |
822 | 0 | return (-1); |
823 | | |
824 | 0 | changelist->changes = new_changes; |
825 | 0 | changelist->changes_size = new_size; |
826 | |
|
827 | 0 | return (0); |
828 | 0 | } |
829 | | |
830 | | /** Return a pointer to the changelist entry for the file descriptor or signal |
831 | | * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its |
832 | | * old_events field to old_events. |
833 | | */ |
834 | | static struct event_change * |
835 | | event_changelist_get_or_construct(struct event_changelist *changelist, |
836 | | evutil_socket_t fd, |
837 | | short old_events, |
838 | | struct event_changelist_fdinfo *fdinfo) |
839 | 0 | { |
840 | 0 | struct event_change *change; |
841 | |
|
842 | 0 | if (fdinfo->idxplus1 == 0) { Branch (842:6): [True: 0, False: 0]
|
843 | 0 | int idx; |
844 | 0 | EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size); |
845 | | |
846 | 0 | if (changelist->n_changes == changelist->changes_size) { Branch (846:7): [True: 0, False: 0]
|
847 | 0 | if (event_changelist_grow(changelist) < 0) Branch (847:8): [True: 0, False: 0]
|
848 | 0 | return NULL; |
849 | 0 | } |
850 | | |
851 | 0 | idx = changelist->n_changes++; |
852 | 0 | change = &changelist->changes[idx]; |
853 | 0 | fdinfo->idxplus1 = idx + 1; |
854 | |
|
855 | 0 | memset(change, 0, sizeof(struct event_change)); |
856 | 0 | change->fd = fd; |
857 | 0 | change->old_events = old_events; |
858 | 0 | } else { |
859 | 0 | change = &changelist->changes[fdinfo->idxplus1 - 1]; |
860 | 0 | EVUTIL_ASSERT(change->fd == fd); |
861 | 0 | } |
862 | 0 | return change; |
863 | 0 | } |
864 | | |
865 | | int |
866 | | event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events, |
867 | | void *p) |
868 | 0 | { |
869 | 0 | struct event_changelist *changelist = &base->changelist; |
870 | 0 | struct event_changelist_fdinfo *fdinfo = p; |
871 | 0 | struct event_change *change; |
872 | 0 | ev_uint8_t evchange = EV_CHANGE_ADD | (events & (EV_ET|EV_PERSIST|EV_SIGNAL)); |
873 | |
|
874 | 0 | event_changelist_check(base); |
875 | |
|
876 | 0 | change = event_changelist_get_or_construct(changelist, fd, old, fdinfo); |
877 | 0 | if (!change) Branch (877:6): [True: 0, False: 0]
|
878 | 0 | return -1; |
879 | | |
880 | | /* An add replaces any previous delete, but doesn't result in a no-op, |
881 | | * since the delete might fail (because the fd had been closed since |
882 | | * the last add, for instance. */ |
883 | | |
884 | 0 | if (events & (EV_READ|EV_SIGNAL)) Branch (884:6): [True: 0, False: 0]
|
885 | 0 | change->read_change = evchange; |
886 | 0 | if (events & EV_WRITE) Branch (886:6): [True: 0, False: 0]
|
887 | 0 | change->write_change = evchange; |
888 | 0 | if (events & EV_CLOSED) Branch (888:6): [True: 0, False: 0]
|
889 | 0 | change->close_change = evchange; |
890 | |
|
891 | 0 | event_changelist_check(base); |
892 | 0 | return (0); |
893 | 0 | } |
894 | | |
895 | | int |
896 | | event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events, |
897 | | void *p) |
898 | 0 | { |
899 | 0 | struct event_changelist *changelist = &base->changelist; |
900 | 0 | struct event_changelist_fdinfo *fdinfo = p; |
901 | 0 | struct event_change *change; |
902 | 0 | ev_uint8_t del = EV_CHANGE_DEL | (events & EV_ET); |
903 | |
|
904 | 0 | event_changelist_check(base); |
905 | 0 | change = event_changelist_get_or_construct(changelist, fd, old, fdinfo); |
906 | 0 | event_changelist_check(base); |
907 | 0 | if (!change) Branch (907:6): [True: 0, False: 0]
|
908 | 0 | return -1; |
909 | | |
910 | | /* A delete on an event set that doesn't contain the event to be |
911 | | deleted produces a no-op. This effectively emoves any previous |
912 | | uncommitted add, rather than replacing it: on those platforms where |
913 | | "add, delete, dispatch" is not the same as "no-op, dispatch", we |
914 | | want the no-op behavior. |
915 | | |
916 | | If we have a no-op item, we could remove it it from the list |
917 | | entirely, but really there's not much point: skipping the no-op |
918 | | change when we do the dispatch later is far cheaper than rejuggling |
919 | | the array now. |
920 | | |
921 | | As this stands, it also lets through deletions of events that are |
922 | | not currently set. |
923 | | */ |
924 | | |
925 | 0 | if (events & (EV_READ|EV_SIGNAL)) { Branch (925:6): [True: 0, False: 0]
|
926 | 0 | if (!(change->old_events & (EV_READ | EV_SIGNAL))) Branch (926:7): [True: 0, False: 0]
|
927 | 0 | change->read_change = 0; |
928 | 0 | else |
929 | 0 | change->read_change = del; |
930 | 0 | } |
931 | 0 | if (events & EV_WRITE) { Branch (931:6): [True: 0, False: 0]
|
932 | 0 | if (!(change->old_events & EV_WRITE)) Branch (932:7): [True: 0, False: 0]
|
933 | 0 | change->write_change = 0; |
934 | 0 | else |
935 | 0 | change->write_change = del; |
936 | 0 | } |
937 | 0 | if (events & EV_CLOSED) { Branch (937:6): [True: 0, False: 0]
|
938 | 0 | if (!(change->old_events & EV_CLOSED)) Branch (938:7): [True: 0, False: 0]
|
939 | 0 | change->close_change = 0; |
940 | 0 | else |
941 | 0 | change->close_change = del; |
942 | 0 | } |
943 | |
|
944 | 0 | event_changelist_check(base); |
945 | 0 | return (0); |
946 | 0 | } |
947 | | |
948 | | /* Helper for evmap_check_integrity_: verify that all of the events pending on |
949 | | * given fd are set up correctly, and that the nread and nwrite counts on that |
950 | | * fd are correct. */ |
951 | | static int |
952 | | evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd, |
953 | | struct evmap_io *io_info, void *arg) |
954 | 0 | { |
955 | 0 | struct event *ev; |
956 | 0 | int n_read = 0, n_write = 0, n_close = 0; |
957 | | |
958 | | /* First, make sure the list itself isn't corrupt. Otherwise, |
959 | | * running LIST_FOREACH could be an exciting adventure. */ |
960 | 0 | EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next); |
961 | | |
962 | 0 | LIST_FOREACH(ev, &io_info->events, ev_io_next) { |
963 | 0 | EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED); |
964 | 0 | EVUTIL_ASSERT(ev->ev_fd == fd); |
965 | 0 | EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL)); |
966 | 0 | EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))); |
967 | 0 | if (ev->ev_events & EV_READ) Branch (967:7): [True: 0, False: 0]
|
968 | 0 | ++n_read; |
969 | 0 | if (ev->ev_events & EV_WRITE) Branch (969:7): [True: 0, False: 0]
|
970 | 0 | ++n_write; |
971 | 0 | if (ev->ev_events & EV_CLOSED) Branch (971:7): [True: 0, False: 0]
|
972 | 0 | ++n_close; |
973 | 0 | } |
974 | | |
975 | 0 | EVUTIL_ASSERT(n_read == io_info->nread); |
976 | 0 | EVUTIL_ASSERT(n_write == io_info->nwrite); |
977 | 0 | EVUTIL_ASSERT(n_close == io_info->nclose); |
978 | | |
979 | 0 | return 0; |
980 | 0 | } |
981 | | |
982 | | /* Helper for evmap_check_integrity_: verify that all of the events pending |
983 | | * on given signal are set up correctly. */ |
984 | | static int |
985 | | evmap_signal_check_integrity_fn(struct event_base *base, |
986 | | int signum, struct evmap_signal *sig_info, void *arg) |
987 | 0 | { |
988 | 0 | struct event *ev; |
989 | | /* First, make sure the list itself isn't corrupt. */ |
990 | 0 | EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next); |
991 | | |
992 | 0 | LIST_FOREACH(ev, &sig_info->events, ev_io_next) { |
993 | 0 | EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED); |
994 | 0 | EVUTIL_ASSERT(ev->ev_fd == signum); |
995 | 0 | EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL)); |
996 | 0 | EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))); |
997 | 0 | } |
998 | 0 | return 0; |
999 | 0 | } |
1000 | | |
1001 | | void |
1002 | | evmap_check_integrity_(struct event_base *base) |
1003 | 0 | { |
1004 | 0 | evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL); |
1005 | 0 | evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL); |
1006 | |
|
1007 | 0 | if (base->evsel->add == event_changelist_add_) Branch (1007:6): [True: 0, False: 0]
|
1008 | 0 | event_changelist_assert_ok(base); |
1009 | 0 | } |
1010 | | |
1011 | | /* Helper type for evmap_foreach_event_: Bundles a function to call on every |
1012 | | * event, and the user-provided void* to use as its third argument. */ |
1013 | | struct evmap_foreach_event_helper { |
1014 | | event_base_foreach_event_cb fn; |
1015 | | void *arg; |
1016 | | }; |
1017 | | |
1018 | | /* Helper for evmap_foreach_event_: calls a provided function on every event |
1019 | | * pending on a given fd. */ |
1020 | | static int |
1021 | | evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd, |
1022 | | struct evmap_io *io_info, void *arg) |
1023 | 0 | { |
1024 | 0 | struct evmap_foreach_event_helper *h = arg; |
1025 | 0 | struct event *ev; |
1026 | 0 | int r; |
1027 | 0 | LIST_FOREACH(ev, &io_info->events, ev_io_next) { |
1028 | 0 | if ((r = h->fn(base, ev, h->arg))) Branch (1028:7): [True: 0, False: 0]
|
1029 | 0 | return r; |
1030 | 0 | } |
1031 | 0 | return 0; |
1032 | 0 | } |
1033 | | |
1034 | | /* Helper for evmap_foreach_event_: calls a provided function on every event |
1035 | | * pending on a given signal. */ |
1036 | | static int |
1037 | | evmap_signal_foreach_event_fn(struct event_base *base, int signum, |
1038 | | struct evmap_signal *sig_info, void *arg) |
1039 | 0 | { |
1040 | 0 | struct event *ev; |
1041 | 0 | struct evmap_foreach_event_helper *h = arg; |
1042 | 0 | int r; |
1043 | 0 | LIST_FOREACH(ev, &sig_info->events, ev_signal_next) { |
1044 | 0 | if ((r = h->fn(base, ev, h->arg))) Branch (1044:7): [True: 0, False: 0]
|
1045 | 0 | return r; |
1046 | 0 | } |
1047 | 0 | return 0; |
1048 | 0 | } |
1049 | | |
1050 | | int |
1051 | | evmap_foreach_event_(struct event_base *base, |
1052 | | event_base_foreach_event_cb fn, void *arg) |
1053 | 0 | { |
1054 | 0 | struct evmap_foreach_event_helper h; |
1055 | 0 | int r; |
1056 | 0 | h.fn = fn; |
1057 | 0 | h.arg = arg; |
1058 | 0 | if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h))) Branch (1058:6): [True: 0, False: 0]
|
1059 | 0 | return r; |
1060 | 0 | return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h); |
1061 | 0 | } |
1062 | | |