Coverage Report

Created: 2025-06-10 13:21

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/bitcoin/depends/work/build/x86_64-pc-linux-gnu/libevent/2.1.12-stable-7656baec08e/event.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3
 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 * 1. Redistributions of source code must retain the above copyright
9
 *    notice, this list of conditions and the following disclaimer.
10
 * 2. Redistributions in binary form must reproduce the above copyright
11
 *    notice, this list of conditions and the following disclaimer in the
12
 *    documentation and/or other materials provided with the distribution.
13
 * 3. The name of the author may not be used to endorse or promote products
14
 *    derived from this software without specific prior written permission.
15
 *
16
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 */
27
#include "event2/event-config.h"
28
#include "evconfig-private.h"
29
30
#ifdef _WIN32
31
#include <winsock2.h>
32
#define WIN32_LEAN_AND_MEAN
33
#include <windows.h>
34
#undef WIN32_LEAN_AND_MEAN
35
#endif
36
#include <sys/types.h>
37
#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
38
#include <sys/time.h>
39
#endif
40
#include <sys/queue.h>
41
#ifdef EVENT__HAVE_SYS_SOCKET_H
42
#include <sys/socket.h>
43
#endif
44
#include <stdio.h>
45
#include <stdlib.h>
46
#ifdef EVENT__HAVE_UNISTD_H
47
#include <unistd.h>
48
#endif
49
#include <ctype.h>
50
#include <errno.h>
51
#include <signal.h>
52
#include <string.h>
53
#include <time.h>
54
#include <limits.h>
55
#ifdef EVENT__HAVE_FCNTL_H
56
#include <fcntl.h>
57
#endif
58
59
#include "event2/event.h"
60
#include "event2/event_struct.h"
61
#include "event2/event_compat.h"
62
#include "event-internal.h"
63
#include "defer-internal.h"
64
#include "evthread-internal.h"
65
#include "event2/thread.h"
66
#include "event2/util.h"
67
#include "log-internal.h"
68
#include "evmap-internal.h"
69
#include "iocp-internal.h"
70
#include "changelist-internal.h"
71
#define HT_NO_CACHE_HASH_VALUES
72
#include "ht-internal.h"
73
#include "util-internal.h"
74
75
76
#ifdef EVENT__HAVE_WORKING_KQUEUE
77
#include "kqueue-internal.h"
78
#endif
79
80
#ifdef EVENT__HAVE_EVENT_PORTS
81
extern const struct eventop evportops;
82
#endif
83
#ifdef EVENT__HAVE_SELECT
84
extern const struct eventop selectops;
85
#endif
86
#ifdef EVENT__HAVE_POLL
87
extern const struct eventop pollops;
88
#endif
89
#ifdef EVENT__HAVE_EPOLL
90
extern const struct eventop epollops;
91
#endif
92
#ifdef EVENT__HAVE_WORKING_KQUEUE
93
extern const struct eventop kqops;
94
#endif
95
#ifdef EVENT__HAVE_DEVPOLL
96
extern const struct eventop devpollops;
97
#endif
98
#ifdef _WIN32
99
extern const struct eventop win32ops;
100
#endif
101
102
/* Array of backends in order of preference. */
103
static const struct eventop *eventops[] = {
104
#ifdef EVENT__HAVE_EVENT_PORTS
105
  &evportops,
106
#endif
107
#ifdef EVENT__HAVE_WORKING_KQUEUE
108
  &kqops,
109
#endif
110
#ifdef EVENT__HAVE_EPOLL
111
  &epollops,
112
#endif
113
#ifdef EVENT__HAVE_DEVPOLL
114
  &devpollops,
115
#endif
116
#ifdef EVENT__HAVE_POLL
117
  &pollops,
118
#endif
119
#ifdef EVENT__HAVE_SELECT
120
  &selectops,
121
#endif
122
#ifdef _WIN32
123
  &win32ops,
124
#endif
125
  NULL
126
};
127
128
/* Global state; deprecated */
129
EVENT2_EXPORT_SYMBOL
130
struct event_base *event_global_current_base_ = NULL;
131
11.0k
#define current_base event_global_current_base_
132
133
/* Global state */
134
135
static void *event_self_cbarg_ptr_ = NULL;
136
137
/* Prototypes */
138
static void event_queue_insert_active(struct event_base *, struct event_callback *);
139
static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
140
static void event_queue_insert_timeout(struct event_base *, struct event *);
141
static void event_queue_insert_inserted(struct event_base *, struct event *);
142
static void event_queue_remove_active(struct event_base *, struct event_callback *);
143
static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
144
static void event_queue_remove_timeout(struct event_base *, struct event *);
145
static void event_queue_remove_inserted(struct event_base *, struct event *);
146
static void event_queue_make_later_events_active(struct event_base *base);
147
148
static int evthread_make_base_notifiable_nolock_(struct event_base *base);
149
static int event_del_(struct event *ev, int blocking);
150
151
#ifdef USE_REINSERT_TIMEOUT
152
/* This code seems buggy; only turn it on if we find out what the trouble is. */
153
static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
154
#endif
155
156
static int  event_haveevents(struct event_base *);
157
158
static int  event_process_active(struct event_base *);
159
160
static int  timeout_next(struct event_base *, struct timeval **);
161
static void timeout_process(struct event_base *);
162
163
static inline void  event_signal_closure(struct event_base *, struct event *ev);
164
static inline void  event_persist_closure(struct event_base *, struct event *ev);
165
166
static int  evthread_notify_base(struct event_base *base);
167
168
static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
169
    struct event *ev);
170
171
#ifndef EVENT__DISABLE_DEBUG_MODE
172
/* These functions implement a hashtable of which 'struct event *' structures
173
 * have been setup or added.  We don't want to trust the content of the struct
174
 * event itself, since we're trying to work through cases where an event gets
175
 * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
176
 */
177
178
struct event_debug_entry {
179
  HT_ENTRY(event_debug_entry) node;
180
  const struct event *ptr;
181
  unsigned added : 1;
182
};
183
184
static inline unsigned
185
hash_debug_entry(const struct event_debug_entry *e)
186
0
{
187
  /* We need to do this silliness to convince compilers that we
188
   * honestly mean to cast e->ptr to an integer, and discard any
189
   * part of it that doesn't fit in an unsigned.
190
   */
191
0
  unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
192
  /* Our hashtable implementation is pretty sensitive to low bits,
193
   * and every struct event is over 64 bytes in size, so we can
194
   * just say >>6. */
195
0
  return (u >> 6);
196
0
}
197
198
static inline int
199
eq_debug_entry(const struct event_debug_entry *a,
200
    const struct event_debug_entry *b)
201
0
{
202
0
  return a->ptr == b->ptr;
203
0
}
204
205
int event_debug_mode_on_ = 0;
206
207
208
#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
209
/**
210
 * @brief debug mode variable which is set for any function/structure that needs
211
 *        to be shared across threads (if thread support is enabled).
212
 *
213
 *        When and if evthreads are initialized, this variable will be evaluated,
214
 *        and if set to something other than zero, this means the evthread setup 
215
 *        functions were called out of order.
216
 *
217
 *        See: "Locks and threading" in the documentation.
218
 */
219
int event_debug_created_threadable_ctx_ = 0;
220
#endif
221
222
/* Set if it's too late to enable event_debug_mode. */
223
static int event_debug_mode_too_late = 0;
224
#ifndef EVENT__DISABLE_THREAD_SUPPORT
225
static void *event_debug_map_lock_ = NULL;
226
#endif
227
static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
228
  HT_INITIALIZER();
229
230
0
HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
Unexecuted instantiation: event.c:event_debug_map_HT_INIT
Unexecuted instantiation: event.c:event_debug_map_HT_START
Unexecuted instantiation: event.c:event_debug_map_HT_NEXT_RMV
Unexecuted instantiation: event.c:event_debug_map_HT_FIND
Unexecuted instantiation: event.c:event_debug_map_HT_FIND_P_
Unexecuted instantiation: event.c:event_debug_map_HT_REMOVE
231
    eq_debug_entry)
232
0
HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
233
0
    eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
234
235
/* record that ev is now setup (that is, ready for an add) */
236
static void event_debug_note_setup_(const struct event *ev)
237
11.8M
{
238
11.8M
  struct event_debug_entry *dent, find;
239
240
11.8M
  if (!event_debug_mode_on_)
  Branch (240:6): [True: 11.8M, False: 0]
241
11.8M
    goto out;
242
243
0
  find.ptr = ev;
244
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
245
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
246
0
  if (dent) {
  Branch (246:6): [True: 0, False: 0]
247
0
    dent->added = 0;
248
0
  } else {
249
0
    dent = mm_malloc(sizeof(*dent));
250
0
    if (!dent)
  Branch (250:7): [True: 0, False: 0]
251
0
      event_err(1,
252
0
          "Out of memory in debugging code");
253
0
    dent->ptr = ev;
254
0
    dent->added = 0;
255
0
    HT_INSERT(event_debug_map, &global_debug_map, dent);
256
0
  }
257
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
258
259
11.8M
out:
260
11.8M
  event_debug_mode_too_late = 1;
261
11.8M
}
262
/* record that ev is no longer setup */
263
static void event_debug_note_teardown_(const struct event *ev)
264
2.39M
{
265
2.39M
  struct event_debug_entry *dent, find;
266
267
2.39M
  if (!event_debug_mode_on_)
  Branch (267:6): [True: 2.39M, False: 0]
268
2.39M
    goto out;
269
270
0
  find.ptr = ev;
271
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
272
0
  dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
273
0
  if (dent)
  Branch (273:6): [True: 0, False: 0]
274
0
    mm_free(dent);
275
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
276
277
2.39M
out:
278
2.39M
  event_debug_mode_too_late = 1;
279
2.39M
}
280
/* Macro: record that ev is now added */
281
static void event_debug_note_add_(const struct event *ev)
282
23.5M
{
283
23.5M
  struct event_debug_entry *dent,find;
284
285
23.5M
  if (!event_debug_mode_on_)
  Branch (285:6): [True: 23.5M, False: 0]
286
23.5M
    goto out;
287
288
0
  find.ptr = ev;
289
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
290
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
291
0
  if (dent) {
  Branch (291:6): [True: 0, False: 0]
292
0
    dent->added = 1;
293
0
  } else {
294
0
    event_errx(EVENT_ERR_ABORT_,
295
0
        "%s: noting an add on a non-setup event %p"
296
0
        " (events: 0x%x, fd: "EV_SOCK_FMT
297
0
        ", flags: 0x%x)",
298
0
        __func__, ev, ev->ev_events,
299
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
300
0
  }
301
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
302
303
23.5M
out:
304
23.5M
  event_debug_mode_too_late = 1;
305
23.5M
}
306
/* record that ev is no longer added */
307
static void event_debug_note_del_(const struct event *ev)
308
35.3M
{
309
35.3M
  struct event_debug_entry *dent, find;
310
311
35.3M
  if (!event_debug_mode_on_)
  Branch (311:6): [True: 35.3M, False: 0]
312
35.3M
    goto out;
313
314
0
  find.ptr = ev;
315
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
316
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
317
0
  if (dent) {
  Branch (317:6): [True: 0, False: 0]
318
0
    dent->added = 0;
319
0
  } else {
320
0
    event_errx(EVENT_ERR_ABORT_,
321
0
        "%s: noting a del on a non-setup event %p"
322
0
        " (events: 0x%x, fd: "EV_SOCK_FMT
323
0
        ", flags: 0x%x)",
324
0
        __func__, ev, ev->ev_events,
325
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
326
0
  }
327
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
328
329
35.3M
out:
330
35.3M
  event_debug_mode_too_late = 1;
331
35.3M
}
332
/* assert that ev is setup (i.e., okay to add or inspect) */
333
static void event_debug_assert_is_setup_(const struct event *ev)
334
35.4M
{
335
35.4M
  struct event_debug_entry *dent, find;
336
337
35.4M
  if (!event_debug_mode_on_)
  Branch (337:6): [True: 35.4M, False: 0]
338
35.4M
    return;
339
340
0
  find.ptr = ev;
341
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
342
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
343
0
  if (!dent) {
  Branch (343:6): [True: 0, False: 0]
344
0
    event_errx(EVENT_ERR_ABORT_,
345
0
        "%s called on a non-initialized event %p"
346
0
        " (events: 0x%x, fd: "EV_SOCK_FMT
347
0
        ", flags: 0x%x)",
348
0
        __func__, ev, ev->ev_events,
349
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
350
0
  }
351
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
352
0
}
353
/* assert that ev is not added (i.e., okay to tear down or set up again) */
354
static void event_debug_assert_not_added_(const struct event *ev)
355
11.8M
{
356
11.8M
  struct event_debug_entry *dent, find;
357
358
11.8M
  if (!event_debug_mode_on_)
  Branch (358:6): [True: 11.8M, False: 0]
359
11.8M
    return;
360
361
0
  find.ptr = ev;
362
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
363
0
  dent = HT_FIND(event_debug_map, &global_debug_map, &find);
364
0
  if (dent && dent->added) {
  Branch (364:6): [True: 0, False: 0]
  Branch (364:14): [True: 0, False: 0]
365
0
    event_errx(EVENT_ERR_ABORT_,
366
0
        "%s called on an already added event %p"
367
0
        " (events: 0x%x, fd: "EV_SOCK_FMT", "
368
0
        "flags: 0x%x)",
369
0
        __func__, ev, ev->ev_events,
370
0
        EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
371
0
  }
372
0
  EVLOCK_UNLOCK(event_debug_map_lock_, 0);
373
0
}
374
static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
375
11.8M
{
376
11.8M
  if (!event_debug_mode_on_)
  Branch (376:6): [True: 11.8M, False: 0]
377
11.8M
    return;
378
0
  if (fd < 0)
  Branch (378:6): [True: 0, False: 0]
379
0
    return;
380
381
0
#ifndef _WIN32
382
0
  {
383
0
    int flags;
384
0
    if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
  Branch (384:7): [True: 0, False: 0]
385
0
      EVUTIL_ASSERT(flags & O_NONBLOCK);
386
0
    }
387
0
  }
388
0
#endif
389
0
}
390
#else
391
static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
392
static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
393
static void event_debug_note_add_(const struct event *ev) { (void)ev; }
394
static void event_debug_note_del_(const struct event *ev) { (void)ev; }
395
static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
396
static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
397
static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
398
#endif
399
400
#define EVENT_BASE_ASSERT_LOCKED(base)    \
401
228M
  EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
402
403
/* How often (in seconds) do we check for changes in wall clock time relative
404
 * to monotonic time?  Set this to -1 for 'never.' */
405
11.8M
#define CLOCK_SYNC_INTERVAL 5
406
407
/** Set 'tp' to the current time according to 'base'.  We must hold the lock
408
 * on 'base'.  If there is a cached time, return it.  Otherwise, use
409
 * clock_gettime or gettimeofday as appropriate to find out the right time.
410
 * Return 0 on success, -1 on failure.
411
 */
412
static int
413
gettime(struct event_base *base, struct timeval *tp)
414
51.8M
{
415
51.8M
  EVENT_BASE_ASSERT_LOCKED(base);
416
417
51.8M
  if (base->tv_cache.tv_sec) {
  Branch (417:6): [True: 40.0M, False: 11.8M]
418
40.0M
    *tp = base->tv_cache;
419
40.0M
    return (0);
420
40.0M
  }
421
422
11.8M
  if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
  Branch (422:6): [True: 0, False: 11.8M]
423
0
    return -1;
424
0
  }
425
426
11.8M
  if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
  Branch (426:6): [True: 11.0k, False: 11.7M]
427
11.8M
      < tp->tv_sec) {
428
11.0k
    struct timeval tv;
429
11.0k
    evutil_gettimeofday(&tv,NULL);
430
11.0k
    evutil_timersub(&tv, tp, &base->tv_clock_diff);
  Branch (430:3): [True: 10.5k, False: 545]
  Branch (430:3): [Folded - Ignored]
431
11.0k
    base->last_updated_clock_diff = tp->tv_sec;
432
11.0k
  }
433
434
11.8M
  return 0;
435
11.8M
}
436
437
int
438
event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
439
0
{
440
0
  int r;
441
0
  if (!base) {
  Branch (441:6): [True: 0, False: 0]
442
0
    base = current_base;
443
0
    if (!current_base)
  Branch (443:7): [True: 0, False: 0]
444
0
      return evutil_gettimeofday(tv, NULL);
445
0
  }
446
447
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
448
0
  if (base->tv_cache.tv_sec == 0) {
  Branch (448:6): [True: 0, False: 0]
449
0
    r = evutil_gettimeofday(tv, NULL);
450
0
  } else {
451
0
    evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
  Branch (451:3): [True: 0, False: 0]
  Branch (451:3): [Folded - Ignored]
452
0
    r = 0;
453
0
  }
454
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
455
0
  return r;
456
0
}
457
458
/** Make 'base' have no current cached time. */
459
static inline void
460
clear_time_cache(struct event_base *base)
461
11.8M
{
462
11.8M
  base->tv_cache.tv_sec = 0;
463
11.8M
}
464
465
/** Replace the cached time in 'base' with the current time. */
466
static inline void
467
update_time_cache(struct event_base *base)
468
11.7M
{
469
11.7M
  base->tv_cache.tv_sec = 0;
470
11.7M
  if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
  Branch (470:6): [True: 11.7M, False: 0]
471
11.7M
      gettime(base, &base->tv_cache);
472
11.7M
}
473
474
int
475
event_base_update_cache_time(struct event_base *base)
476
0
{
477
478
0
  if (!base) {
  Branch (478:6): [True: 0, False: 0]
479
0
    base = current_base;
480
0
    if (!current_base)
  Branch (480:7): [True: 0, False: 0]
481
0
      return -1;
482
0
  }
483
484
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
485
0
  if (base->running_loop)
  Branch (485:6): [True: 0, False: 0]
486
0
    update_time_cache(base);
487
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
488
0
  return 0;
489
0
}
490
491
static inline struct event *
492
event_callback_to_event(struct event_callback *evcb)
493
21.2M
{
494
21.2M
  EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
495
21.2M
  return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
496
21.2M
}
497
498
static inline struct event_callback *
499
event_to_event_callback(struct event *ev)
500
70.7M
{
501
70.7M
  return &ev->ev_evcallback;
502
70.7M
}
503
504
struct event_base *
505
event_init(void)
506
0
{
507
0
  struct event_base *base = event_base_new_with_config(NULL);
508
509
0
  if (base == NULL) {
  Branch (509:6): [True: 0, False: 0]
510
0
    event_errx(1, "%s: Unable to construct event_base", __func__);
511
0
    return NULL;
512
0
  }
513
514
0
  current_base = base;
515
516
0
  return (base);
517
0
}
518
519
struct event_base *
520
event_base_new(void)
521
11.0k
{
522
11.0k
  struct event_base *base = NULL;
523
11.0k
  struct event_config *cfg = event_config_new();
524
11.0k
  if (cfg) {
  Branch (524:6): [True: 11.0k, False: 0]
525
11.0k
    base = event_base_new_with_config(cfg);
526
11.0k
    event_config_free(cfg);
527
11.0k
  }
528
11.0k
  return base;
529
11.0k
}
530
531
/** Return true iff 'method' is the name of a method that 'cfg' tells us to
532
 * avoid. */
533
static int
534
event_config_is_avoided_method(const struct event_config *cfg,
535
    const char *method)
536
11.0k
{
537
11.0k
  struct event_config_entry *entry;
538
539
11.0k
  TAILQ_FOREACH(entry, &cfg->entries, next) {
540
0
    if (entry->avoid_method != NULL &&
  Branch (540:7): [True: 0, False: 0]
541
0
        strcmp(entry->avoid_method, method) == 0)
  Branch (541:7): [True: 0, False: 0]
542
0
      return (1);
543
0
  }
544
545
11.0k
  return (0);
546
11.0k
}
547
548
/** Return true iff 'method' is disabled according to the environment. */
549
static int
550
event_is_method_disabled(const char *name)
551
11.0k
{
552
11.0k
  char environment[64];
553
11.0k
  int i;
554
555
11.0k
  evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
556
66.5k
  for (i = 8; environment[i] != '\0'; ++i)
  Branch (556:14): [True: 55.4k, False: 11.0k]
557
55.4k
    environment[i] = EVUTIL_TOUPPER_(environment[i]);
558
  /* Note that evutil_getenv_() ignores the environment entirely if
559
   * we're setuid */
560
11.0k
  return (evutil_getenv_(environment) != NULL);
561
11.0k
}
562
563
int
564
event_base_get_features(const struct event_base *base)
565
0
{
566
0
  return base->evsel->features;
567
0
}
568
569
void
570
event_enable_debug_mode(void)
571
0
{
572
0
#ifndef EVENT__DISABLE_DEBUG_MODE
573
0
  if (event_debug_mode_on_)
  Branch (573:6): [True: 0, False: 0]
574
0
    event_errx(1, "%s was called twice!", __func__);
575
0
  if (event_debug_mode_too_late)
  Branch (575:6): [True: 0, False: 0]
576
0
    event_errx(1, "%s must be called *before* creating any events "
577
0
        "or event_bases",__func__);
578
579
0
  event_debug_mode_on_ = 1;
580
581
0
  HT_INIT(event_debug_map, &global_debug_map);
582
0
#endif
583
0
}
584
585
void
586
event_disable_debug_mode(void)
587
0
{
588
0
#ifndef EVENT__DISABLE_DEBUG_MODE
589
0
  struct event_debug_entry **ent, *victim;
590
591
0
  EVLOCK_LOCK(event_debug_map_lock_, 0);
592
0
  for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
  Branch (592:59): [True: 0, False: 0]
593
0
    victim = *ent;
594
0
    ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
595
0
    mm_free(victim);
596
0
  }
597
0
  HT_CLEAR(event_debug_map, &global_debug_map);
598
0
  EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
599
600
0
  event_debug_mode_on_  = 0;
601
0
#endif
602
0
}
603
604
struct event_base *
605
event_base_new_with_config(const struct event_config *cfg)
606
11.0k
{
607
11.0k
  int i;
608
11.0k
  struct event_base *base;
609
11.0k
  int should_check_environment;
610
611
11.0k
#ifndef EVENT__DISABLE_DEBUG_MODE
612
11.0k
  event_debug_mode_too_late = 1;
613
11.0k
#endif
614
615
11.0k
  if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
  Branch (615:6): [True: 0, False: 11.0k]
616
0
    event_warn("%s: calloc", __func__);
617
0
    return NULL;
618
0
  }
619
620
11.0k
  if (cfg)
  Branch (620:6): [True: 11.0k, False: 0]
621
11.0k
    base->flags = cfg->flags;
622
623
11.0k
  should_check_environment =
624
11.0k
      !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
  Branch (624:8): [True: 11.0k, False: 0]
  Branch (624:15): [True: 0, False: 11.0k]
625
626
11.0k
  {
627
11.0k
    struct timeval tmp;
628
11.0k
    int precise_time =
629
11.0k
        cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
  Branch (629:7): [True: 11.0k, False: 0]
  Branch (629:14): [True: 0, False: 11.0k]
630
11.0k
    int flags;
631
11.0k
    if (should_check_environment && !precise_time) {
  Branch (631:7): [True: 11.0k, False: 0]
  Branch (631:35): [True: 11.0k, False: 0]
632
11.0k
      precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
633
11.0k
      if (precise_time) {
  Branch (633:8): [True: 0, False: 11.0k]
634
0
        base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
635
0
      }
636
11.0k
    }
637
11.0k
    flags = precise_time ? EV_MONOT_PRECISE : 0;
  Branch (637:11): [True: 0, False: 11.0k]
638
11.0k
    evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
639
640
11.0k
    gettime(base, &tmp);
641
11.0k
  }
642
643
11.0k
  min_heap_ctor_(&base->timeheap);
644
645
11.0k
  base->sig.ev_signal_pair[0] = -1;
646
11.0k
  base->sig.ev_signal_pair[1] = -1;
647
11.0k
  base->th_notify_fd[0] = -1;
648
11.0k
  base->th_notify_fd[1] = -1;
649
650
11.0k
  TAILQ_INIT(&base->active_later_queue);
651
652
11.0k
  evmap_io_initmap_(&base->io);
653
11.0k
  evmap_signal_initmap_(&base->sigmap);
654
11.0k
  event_changelist_init_(&base->changelist);
655
656
11.0k
  base->evbase = NULL;
657
658
11.0k
  if (cfg) {
  Branch (658:6): [True: 11.0k, False: 0]
659
11.0k
    memcpy(&base->max_dispatch_time,
660
11.0k
        &cfg->max_dispatch_interval, sizeof(struct timeval));
661
11.0k
    base->limit_callbacks_after_prio =
662
11.0k
        cfg->limit_callbacks_after_prio;
663
11.0k
  } else {
664
0
    base->max_dispatch_time.tv_sec = -1;
665
0
    base->limit_callbacks_after_prio = 1;
666
0
  }
667
11.0k
  if (cfg && cfg->max_dispatch_callbacks >= 0) {
  Branch (667:6): [True: 11.0k, False: 0]
  Branch (667:13): [True: 11.0k, False: 0]
668
11.0k
    base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
669
11.0k
  } else {
670
0
    base->max_dispatch_callbacks = INT_MAX;
671
0
  }
672
11.0k
  if (base->max_dispatch_callbacks == INT_MAX &&
  Branch (672:6): [True: 11.0k, False: 0]
673
11.0k
      base->max_dispatch_time.tv_sec == -1)
  Branch (673:6): [True: 11.0k, False: 0]
674
11.0k
    base->limit_callbacks_after_prio = INT_MAX;
675
676
22.1k
  for (i = 0; eventops[i] && !base->evbase; i++) {
  Branch (676:14): [True: 22.1k, False: 0]
  Branch (676:29): [True: 11.0k, False: 11.0k]
677
11.0k
    if (cfg != NULL) {
  Branch (677:7): [True: 11.0k, False: 0]
678
      /* determine if this backend should be avoided */
679
11.0k
      if (event_config_is_avoided_method(cfg,
  Branch (679:8): [True: 0, False: 11.0k]
680
11.0k
        eventops[i]->name))
681
0
        continue;
682
11.0k
      if ((eventops[i]->features & cfg->require_features)
  Branch (682:8): [True: 0, False: 11.0k]
683
11.0k
          != cfg->require_features)
684
0
        continue;
685
11.0k
    }
686
687
    /* also obey the environment variables */
688
11.0k
    if (should_check_environment &&
  Branch (688:7): [True: 11.0k, False: 0]
689
11.0k
        event_is_method_disabled(eventops[i]->name))
  Branch (689:7): [True: 0, False: 11.0k]
690
0
      continue;
691
692
11.0k
    base->evsel = eventops[i];
693
694
11.0k
    base->evbase = base->evsel->init(base);
695
11.0k
  }
696
697
11.0k
  if (base->evbase == NULL) {
  Branch (697:6): [True: 0, False: 11.0k]
698
0
    event_warnx("%s: no event mechanism available",
699
0
        __func__);
700
0
    base->evsel = NULL;
701
0
    event_base_free(base);
702
0
    return NULL;
703
0
  }
704
705
11.0k
  if (evutil_getenv_("EVENT_SHOW_METHOD"))
  Branch (705:6): [True: 0, False: 11.0k]
706
0
    event_msgx("libevent using: %s", base->evsel->name);
707
708
  /* allocate a single active event queue */
709
11.0k
  if (event_base_priority_init(base, 1) < 0) {
  Branch (709:6): [True: 0, False: 11.0k]
710
0
    event_base_free(base);
711
0
    return NULL;
712
0
  }
713
714
  /* prepare for threading */
715
716
11.0k
#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
717
11.0k
  event_debug_created_threadable_ctx_ = 1;
718
11.0k
#endif
719
720
11.0k
#ifndef EVENT__DISABLE_THREAD_SUPPORT
721
11.0k
  if (EVTHREAD_LOCKING_ENABLED() &&
722
11.0k
      (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
  Branch (722:7): [True: 0, False: 11.0k]
  Branch (722:15): [True: 11.0k, False: 0]
723
11.0k
    int r;
724
11.0k
    EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
725
11.0k
    EVTHREAD_ALLOC_COND(base->current_event_cond);
726
11.0k
    r = evthread_make_base_notifiable(base);
727
11.0k
    if (r<0) {
  Branch (727:7): [True: 0, False: 11.0k]
728
0
      event_warnx("%s: Unable to make base notifiable.", __func__);
729
0
      event_base_free(base);
730
0
      return NULL;
731
0
    }
732
11.0k
  }
733
11.0k
#endif
734
735
#ifdef _WIN32
736
  if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
737
    event_base_start_iocp_(base, cfg->n_cpus_hint);
738
#endif
739
740
11.0k
  return (base);
741
11.0k
}
742
743
int
744
event_base_start_iocp_(struct event_base *base, int n_cpus)
745
0
{
746
#ifdef _WIN32
747
  if (base->iocp)
748
    return 0;
749
  base->iocp = event_iocp_port_launch_(n_cpus);
750
  if (!base->iocp) {
751
    event_warnx("%s: Couldn't launch IOCP", __func__);
752
    return -1;
753
  }
754
  return 0;
755
#else
756
0
  return -1;
757
0
#endif
758
0
}
759
760
void
761
event_base_stop_iocp_(struct event_base *base)
762
0
{
763
#ifdef _WIN32
764
  int rv;
765
766
  if (!base->iocp)
767
    return;
768
  rv = event_iocp_shutdown_(base->iocp, -1);
769
  EVUTIL_ASSERT(rv >= 0);
770
  base->iocp = NULL;
771
#endif
772
0
}
773
774
static int
775
event_base_cancel_single_callback_(struct event_base *base,
776
    struct event_callback *evcb,
777
    int run_finalizers)
778
8.93k
{
779
8.93k
  int result = 0;
780
781
8.93k
  if (evcb->evcb_flags & EVLIST_INIT) {
  Branch (781:6): [True: 8.93k, False: 0]
782
8.93k
    struct event *ev = event_callback_to_event(evcb);
783
8.93k
    if (!(ev->ev_flags & EVLIST_INTERNAL)) {
  Branch (783:7): [True: 8.93k, False: 0]
784
8.93k
      event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
785
8.93k
      result = 1;
786
8.93k
    }
787
8.93k
  } else {
788
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
789
0
    event_callback_cancel_nolock_(base, evcb, 1);
790
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
791
0
    result = 1;
792
0
  }
793
794
8.93k
  if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
  Branch (794:6): [True: 8.93k, False: 0]
  Branch (794:24): [True: 0, False: 8.93k]
795
0
    switch (evcb->evcb_closure) {
796
0
    case EV_CLOSURE_EVENT_FINALIZE:
  Branch (796:3): [True: 0, False: 0]
797
0
    case EV_CLOSURE_EVENT_FINALIZE_FREE: {
  Branch (797:3): [True: 0, False: 0]
798
0
      struct event *ev = event_callback_to_event(evcb);
799
0
      ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
800
0
      if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
  Branch (800:8): [True: 0, False: 0]
801
0
        mm_free(ev);
802
0
      break;
803
0
    }
804
0
    case EV_CLOSURE_CB_FINALIZE:
  Branch (804:3): [True: 0, False: 0]
805
0
      evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
806
0
      break;
807
0
    default:
  Branch (807:3): [True: 0, False: 0]
808
0
      break;
809
0
    }
810
0
  }
811
8.93k
  return result;
812
8.93k
}
813
814
static int event_base_free_queues_(struct event_base *base, int run_finalizers)
815
20.0k
{
816
20.0k
  int deleted = 0, i;
817
818
40.0k
  for (i = 0; i < base->nactivequeues; ++i) {
  Branch (818:14): [True: 20.0k, False: 20.0k]
819
20.0k
    struct event_callback *evcb, *next;
820
28.9k
    for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
  Branch (820:52): [True: 8.93k, False: 20.0k]
821
8.93k
      next = TAILQ_NEXT(evcb, evcb_active_next);
822
8.93k
      deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
823
8.93k
      evcb = next;
824
8.93k
    }
825
20.0k
  }
826
827
20.0k
  {
828
20.0k
    struct event_callback *evcb;
829
20.0k
    while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
  Branch (829:10): [True: 0, False: 20.0k]
830
0
      deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
831
0
    }
832
20.0k
  }
833
834
20.0k
  return deleted;
835
20.0k
}
836
837
static void
838
event_base_free_(struct event_base *base, int run_finalizers)
839
11.0k
{
840
11.0k
  int i, n_deleted=0;
841
11.0k
  struct event *ev;
842
  /* XXXX grab the lock? If there is contention when one thread frees
843
   * the base, then the contending thread will be very sad soon. */
844
845
  /* event_base_free(NULL) is how to free the current_base if we
846
   * made it with event_init and forgot to hold a reference to it. */
847
11.0k
  if (base == NULL && current_base)
  Branch (847:6): [True: 0, False: 11.0k]
848
0
    base = current_base;
849
  /* Don't actually free NULL. */
850
11.0k
  if (base == NULL) {
  Branch (850:6): [True: 0, False: 11.0k]
851
0
    event_warnx("%s: no base to free", __func__);
852
0
    return;
853
0
  }
854
  /* XXX(niels) - check for internal events first */
855
856
#ifdef _WIN32
857
  event_base_stop_iocp_(base);
858
#endif
859
860
  /* threading fds if we have them */
861
11.0k
  if (base->th_notify_fd[0] != -1) {
  Branch (861:6): [True: 11.0k, False: 0]
862
11.0k
    event_del(&base->th_notify);
863
11.0k
    EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
864
11.0k
    if (base->th_notify_fd[1] != -1)
  Branch (864:7): [True: 0, False: 11.0k]
865
0
      EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
866
11.0k
    base->th_notify_fd[0] = -1;
867
11.0k
    base->th_notify_fd[1] = -1;
868
11.0k
    event_debug_unassign(&base->th_notify);
869
11.0k
  }
870
871
  /* Delete all non-internal events. */
872
11.0k
  evmap_delete_all_(base);
873
874
11.0k
  while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
  Branch (874:9): [True: 0, False: 11.0k]
875
0
    event_del(ev);
876
0
    ++n_deleted;
877
0
  }
878
11.0k
  for (i = 0; i < base->n_common_timeouts; ++i) {
  Branch (878:14): [True: 0, False: 11.0k]
879
0
    struct common_timeout_list *ctl =
880
0
        base->common_timeout_queues[i];
881
0
    event_del(&ctl->timeout_event); /* Internal; doesn't count */
882
0
    event_debug_unassign(&ctl->timeout_event);
883
0
    for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
  Branch (883:40): [True: 0, False: 0]
884
0
      struct event *next = TAILQ_NEXT(ev,
885
0
          ev_timeout_pos.ev_next_with_common_timeout);
886
0
      if (!(ev->ev_flags & EVLIST_INTERNAL)) {
  Branch (886:8): [True: 0, False: 0]
887
0
        event_del(ev);
888
0
        ++n_deleted;
889
0
      }
890
0
      ev = next;
891
0
    }
892
0
    mm_free(ctl);
893
0
  }
894
11.0k
  if (base->common_timeout_queues)
  Branch (894:6): [True: 0, False: 11.0k]
895
0
    mm_free(base->common_timeout_queues);
896
897
20.0k
  for (;;) {
898
    /* For finalizers we can register yet another finalizer out from
899
     * finalizer, and iff finalizer will be in active_later_queue we can
900
     * add finalizer to activequeues, and we will have events in
901
     * activequeues after this function returns, which is not what we want
902
     * (we even have an assertion for this).
903
     *
904
     * A simple case is bufferevent with underlying (i.e. filters).
905
     */
906
20.0k
    int i = event_base_free_queues_(base, run_finalizers);
907
20.0k
    event_debug(("%s: %d events freed", __func__, i));
908
20.0k
    if (!i) {
  Branch (908:7): [True: 11.0k, False: 8.93k]
909
11.0k
      break;
910
11.0k
    }
911
8.93k
    n_deleted += i;
912
8.93k
  }
913
914
11.0k
  if (n_deleted)
  Branch (914:6): [True: 8.93k, False: 2.15k]
915
8.93k
    event_debug(("%s: %d events were still set in base",
916
11.0k
      __func__, n_deleted));
917
918
20.0k
  while (LIST_FIRST(&base->once_events)) {
919
8.93k
    struct event_once *eonce = LIST_FIRST(&base->once_events);
920
8.93k
    LIST_REMOVE(eonce, next_once);
921
8.93k
    mm_free(eonce);
922
8.93k
  }
923
924
11.0k
  if (base->evsel != NULL && base->evsel->dealloc != NULL)
  Branch (924:6): [True: 11.0k, False: 0]
  Branch (924:29): [True: 11.0k, False: 0]
925
11.0k
    base->evsel->dealloc(base);
926
927
22.1k
  for (i = 0; i < base->nactivequeues; ++i)
  Branch (927:14): [True: 11.0k, False: 11.0k]
928
11.0k
    EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
929
930
11.0k
  EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
931
11.0k
  min_heap_dtor_(&base->timeheap);
932
933
11.0k
  mm_free(base->activequeues);
934
935
11.0k
  evmap_io_clear_(&base->io);
936
11.0k
  evmap_signal_clear_(&base->sigmap);
937
11.0k
  event_changelist_freemem_(&base->changelist);
938
939
11.0k
  EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
940
11.0k
  EVTHREAD_FREE_COND(base->current_event_cond);
941
942
  /* If we're freeing current_base, there won't be a current_base. */
943
11.0k
  if (base == current_base)
  Branch (943:6): [True: 0, False: 11.0k]
944
0
    current_base = NULL;
945
11.0k
  mm_free(base);
946
11.0k
}
947
948
void
949
event_base_free_nofinalize(struct event_base *base)
950
0
{
951
0
  event_base_free_(base, 0);
952
0
}
953
954
void
955
event_base_free(struct event_base *base)
956
11.0k
{
957
11.0k
  event_base_free_(base, 1);
958
11.0k
}
959
960
/* Fake eventop; used to disable the backend temporarily inside event_reinit
961
 * so that we can call event_del() on an event without telling the backend.
962
 */
963
static int
964
nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
965
    short events, void *fdinfo)
966
0
{
967
0
  return 0;
968
0
}
969
const struct eventop nil_eventop = {
970
  "nil",
971
  NULL, /* init: unused. */
972
  NULL, /* add: unused. */
973
  nil_backend_del, /* del: used, so needs to be killed. */
974
  NULL, /* dispatch: unused. */
975
  NULL, /* dealloc: unused. */
976
  0, 0, 0
977
};
978
979
/* reinitialize the event base after a fork */
980
int
981
event_reinit(struct event_base *base)
982
0
{
983
0
  const struct eventop *evsel;
984
0
  int res = 0;
985
0
  int was_notifiable = 0;
986
0
  int had_signal_added = 0;
987
988
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
989
990
0
  evsel = base->evsel;
991
992
  /* check if this event mechanism requires reinit on the backend */
993
0
  if (evsel->need_reinit) {
  Branch (993:6): [True: 0, False: 0]
994
    /* We're going to call event_del() on our notify events (the
995
     * ones that tell about signals and wakeup events).  But we
996
     * don't actually want to tell the backend to change its
997
     * state, since it might still share some resource (a kqueue,
998
     * an epoll fd) with the parent process, and we don't want to
999
     * delete the fds from _that_ backend, we temporarily stub out
1000
     * the evsel with a replacement.
1001
     */
1002
0
    base->evsel = &nil_eventop;
1003
0
  }
1004
1005
  /* We need to re-create a new signal-notification fd and a new
1006
   * thread-notification fd.  Otherwise, we'll still share those with
1007
   * the parent process, which would make any notification sent to them
1008
   * get received by one or both of the event loops, more or less at
1009
   * random.
1010
   */
1011
0
  if (base->sig.ev_signal_added) {
  Branch (1011:6): [True: 0, False: 0]
1012
0
    event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1013
0
    event_debug_unassign(&base->sig.ev_signal);
1014
0
    memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1015
0
    had_signal_added = 1;
1016
0
    base->sig.ev_signal_added = 0;
1017
0
  }
1018
0
  if (base->sig.ev_signal_pair[0] != -1)
  Branch (1018:6): [True: 0, False: 0]
1019
0
    EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1020
0
  if (base->sig.ev_signal_pair[1] != -1)
  Branch (1020:6): [True: 0, False: 0]
1021
0
    EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1022
0
  if (base->th_notify_fn != NULL) {
  Branch (1022:6): [True: 0, False: 0]
1023
0
    was_notifiable = 1;
1024
0
    base->th_notify_fn = NULL;
1025
0
  }
1026
0
  if (base->th_notify_fd[0] != -1) {
  Branch (1026:6): [True: 0, False: 0]
1027
0
    event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1028
0
    EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1029
0
    if (base->th_notify_fd[1] != -1)
  Branch (1029:7): [True: 0, False: 0]
1030
0
      EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1031
0
    base->th_notify_fd[0] = -1;
1032
0
    base->th_notify_fd[1] = -1;
1033
0
    event_debug_unassign(&base->th_notify);
1034
0
  }
1035
1036
  /* Replace the original evsel. */
1037
0
        base->evsel = evsel;
1038
1039
0
  if (evsel->need_reinit) {
  Branch (1039:6): [True: 0, False: 0]
1040
    /* Reconstruct the backend through brute-force, so that we do
1041
     * not share any structures with the parent process. For some
1042
     * backends, this is necessary: epoll and kqueue, for
1043
     * instance, have events associated with a kernel
1044
     * structure. If didn't reinitialize, we'd share that
1045
     * structure with the parent process, and any changes made by
1046
     * the parent would affect our backend's behavior (and vice
1047
     * versa).
1048
     */
1049
0
    if (base->evsel->dealloc != NULL)
  Branch (1049:7): [True: 0, False: 0]
1050
0
      base->evsel->dealloc(base);
1051
0
    base->evbase = evsel->init(base);
1052
0
    if (base->evbase == NULL) {
  Branch (1052:7): [True: 0, False: 0]
1053
0
      event_errx(1,
1054
0
         "%s: could not reinitialize event mechanism",
1055
0
         __func__);
1056
0
      res = -1;
1057
0
      goto done;
1058
0
    }
1059
1060
    /* Empty out the changelist (if any): we are starting from a
1061
     * blank slate. */
1062
0
    event_changelist_freemem_(&base->changelist);
1063
1064
    /* Tell the event maps to re-inform the backend about all
1065
     * pending events. This will make the signal notification
1066
     * event get re-created if necessary. */
1067
0
    if (evmap_reinit_(base) < 0)
  Branch (1067:7): [True: 0, False: 0]
1068
0
      res = -1;
1069
0
  } else {
1070
0
    res = evsig_init_(base);
1071
0
    if (res == 0 && had_signal_added) {
  Branch (1071:7): [True: 0, False: 0]
  Branch (1071:19): [True: 0, False: 0]
1072
0
      res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1073
0
      if (res == 0)
  Branch (1073:8): [True: 0, False: 0]
1074
0
        base->sig.ev_signal_added = 1;
1075
0
    }
1076
0
  }
1077
1078
  /* If we were notifiable before, and nothing just exploded, become
1079
   * notifiable again. */
1080
0
  if (was_notifiable && res == 0)
  Branch (1080:6): [True: 0, False: 0]
  Branch (1080:24): [True: 0, False: 0]
1081
0
    res = evthread_make_base_notifiable_nolock_(base);
1082
1083
0
done:
1084
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1085
0
  return (res);
1086
0
}
1087
1088
/* Get the monotonic time for this event_base' timer */
1089
int
1090
event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1091
0
{
1092
0
  int rv = -1;
1093
1094
0
  if (base && tv) {
  Branch (1094:7): [True: 0, False: 0]
  Branch (1094:15): [True: 0, False: 0]
1095
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1096
0
    rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1097
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
1098
0
  }
1099
1100
0
  return rv;
1101
0
}
1102
1103
const char **
1104
event_get_supported_methods(void)
1105
0
{
1106
0
  static const char **methods = NULL;
1107
0
  const struct eventop **method;
1108
0
  const char **tmp;
1109
0
  int i = 0, k;
1110
1111
  /* count all methods */
1112
0
  for (method = &eventops[0]; *method != NULL; ++method) {
  Branch (1112:30): [True: 0, False: 0]
1113
0
    ++i;
1114
0
  }
1115
1116
  /* allocate one more than we need for the NULL pointer */
1117
0
  tmp = mm_calloc((i + 1), sizeof(char *));
1118
0
  if (tmp == NULL)
  Branch (1118:6): [True: 0, False: 0]
1119
0
    return (NULL);
1120
1121
  /* populate the array with the supported methods */
1122
0
  for (k = 0, i = 0; eventops[k] != NULL; ++k) {
  Branch (1122:21): [True: 0, False: 0]
1123
0
    tmp[i++] = eventops[k]->name;
1124
0
  }
1125
0
  tmp[i] = NULL;
1126
1127
0
  if (methods != NULL)
  Branch (1127:6): [True: 0, False: 0]
1128
0
    mm_free((char**)methods);
1129
1130
0
  methods = tmp;
1131
1132
0
  return (methods);
1133
0
}
1134
1135
struct event_config *
1136
event_config_new(void)
1137
11.0k
{
1138
11.0k
  struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1139
1140
11.0k
  if (cfg == NULL)
  Branch (1140:6): [True: 0, False: 11.0k]
1141
0
    return (NULL);
1142
1143
11.0k
  TAILQ_INIT(&cfg->entries);
1144
11.0k
  cfg->max_dispatch_interval.tv_sec = -1;
1145
11.0k
  cfg->max_dispatch_callbacks = INT_MAX;
1146
11.0k
  cfg->limit_callbacks_after_prio = 1;
1147
1148
11.0k
  return (cfg);
1149
11.0k
}
1150
1151
static void
1152
event_config_entry_free(struct event_config_entry *entry)
1153
0
{
1154
0
  if (entry->avoid_method != NULL)
  Branch (1154:6): [True: 0, False: 0]
1155
0
    mm_free((char *)entry->avoid_method);
1156
0
  mm_free(entry);
1157
0
}
1158
1159
void
1160
event_config_free(struct event_config *cfg)
1161
11.0k
{
1162
11.0k
  struct event_config_entry *entry;
1163
1164
11.0k
  while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
  Branch (1164:9): [True: 0, False: 11.0k]
1165
0
    TAILQ_REMOVE(&cfg->entries, entry, next);
1166
0
    event_config_entry_free(entry);
1167
0
  }
1168
11.0k
  mm_free(cfg);
1169
11.0k
}
1170
1171
int
1172
event_config_set_flag(struct event_config *cfg, int flag)
1173
0
{
1174
0
  if (!cfg)
  Branch (1174:6): [True: 0, False: 0]
1175
0
    return -1;
1176
0
  cfg->flags |= flag;
1177
0
  return 0;
1178
0
}
1179
1180
int
1181
event_config_avoid_method(struct event_config *cfg, const char *method)
1182
0
{
1183
0
  struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1184
0
  if (entry == NULL)
  Branch (1184:6): [True: 0, False: 0]
1185
0
    return (-1);
1186
1187
0
  if ((entry->avoid_method = mm_strdup(method)) == NULL) {
  Branch (1187:6): [True: 0, False: 0]
1188
0
    mm_free(entry);
1189
0
    return (-1);
1190
0
  }
1191
1192
0
  TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1193
1194
0
  return (0);
1195
0
}
1196
1197
int
1198
event_config_require_features(struct event_config *cfg,
1199
    int features)
1200
0
{
1201
0
  if (!cfg)
  Branch (1201:6): [True: 0, False: 0]
1202
0
    return (-1);
1203
0
  cfg->require_features = features;
1204
0
  return (0);
1205
0
}
1206
1207
int
1208
event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1209
0
{
1210
0
  if (!cfg)
  Branch (1210:6): [True: 0, False: 0]
1211
0
    return (-1);
1212
0
  cfg->n_cpus_hint = cpus;
1213
0
  return (0);
1214
0
}
1215
1216
int
1217
event_config_set_max_dispatch_interval(struct event_config *cfg,
1218
    const struct timeval *max_interval, int max_callbacks, int min_priority)
1219
0
{
1220
0
  if (max_interval)
  Branch (1220:6): [True: 0, False: 0]
1221
0
    memcpy(&cfg->max_dispatch_interval, max_interval,
1222
0
        sizeof(struct timeval));
1223
0
  else
1224
0
    cfg->max_dispatch_interval.tv_sec = -1;
1225
0
  cfg->max_dispatch_callbacks =
1226
0
      max_callbacks >= 0 ? max_callbacks : INT_MAX;
  Branch (1226:6): [True: 0, False: 0]
1227
0
  if (min_priority < 0)
  Branch (1227:6): [True: 0, False: 0]
1228
0
    min_priority = 0;
1229
0
  cfg->limit_callbacks_after_prio = min_priority;
1230
0
  return (0);
1231
0
}
1232
1233
int
1234
event_priority_init(int npriorities)
1235
0
{
1236
0
  return event_base_priority_init(current_base, npriorities);
1237
0
}
1238
1239
int
1240
event_base_priority_init(struct event_base *base, int npriorities)
1241
11.0k
{
1242
11.0k
  int i, r;
1243
11.0k
  r = -1;
1244
1245
11.0k
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1246
1247
11.0k
  if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
  Branch (1247:34): [True: 0, False: 11.0k]
1248
11.0k
      || npriorities >= EVENT_MAX_PRIORITIES)
  Branch (1248:9): [True: 0, False: 11.0k]
1249
0
    goto err;
1250
1251
11.0k
  if (npriorities == base->nactivequeues)
  Branch (1251:6): [True: 0, False: 11.0k]
1252
0
    goto ok;
1253
1254
11.0k
  if (base->nactivequeues) {
  Branch (1254:6): [True: 0, False: 11.0k]
1255
0
    mm_free(base->activequeues);
1256
0
    base->nactivequeues = 0;
1257
0
  }
1258
1259
  /* Allocate our priority queues */
1260
11.0k
  base->activequeues = (struct evcallback_list *)
1261
11.0k
    mm_calloc(npriorities, sizeof(struct evcallback_list));
1262
11.0k
  if (base->activequeues == NULL) {
  Branch (1262:6): [True: 0, False: 11.0k]
1263
0
    event_warn("%s: calloc", __func__);
1264
0
    goto err;
1265
0
  }
1266
11.0k
  base->nactivequeues = npriorities;
1267
1268
22.1k
  for (i = 0; i < base->nactivequeues; ++i) {
  Branch (1268:14): [True: 11.0k, False: 11.0k]
1269
11.0k
    TAILQ_INIT(&base->activequeues[i]);
1270
11.0k
  }
1271
1272
11.0k
ok:
1273
11.0k
  r = 0;
1274
11.0k
err:
1275
11.0k
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1276
11.0k
  return (r);
1277
11.0k
}
1278
1279
int
1280
event_base_get_npriorities(struct event_base *base)
1281
2.35M
{
1282
1283
2.35M
  int n;
1284
2.35M
  if (base == NULL)
  Branch (1284:6): [True: 0, False: 2.35M]
1285
0
    base = current_base;
1286
1287
2.35M
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1288
2.35M
  n = base->nactivequeues;
1289
2.35M
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1290
2.35M
  return (n);
1291
2.35M
}
1292
1293
int
1294
event_base_get_num_events(struct event_base *base, unsigned int type)
1295
0
{
1296
0
  int r = 0;
1297
1298
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1299
1300
0
  if (type & EVENT_BASE_COUNT_ACTIVE)
  Branch (1300:6): [True: 0, False: 0]
1301
0
    r += base->event_count_active;
1302
1303
0
  if (type & EVENT_BASE_COUNT_VIRTUAL)
  Branch (1303:6): [True: 0, False: 0]
1304
0
    r += base->virtual_event_count;
1305
1306
0
  if (type & EVENT_BASE_COUNT_ADDED)
  Branch (1306:6): [True: 0, False: 0]
1307
0
    r += base->event_count;
1308
1309
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1310
1311
0
  return r;
1312
0
}
1313
1314
int
1315
event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1316
0
{
1317
0
  int r = 0;
1318
1319
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1320
1321
0
  if (type & EVENT_BASE_COUNT_ACTIVE) {
  Branch (1321:6): [True: 0, False: 0]
1322
0
    r += base->event_count_active_max;
1323
0
    if (clear)
  Branch (1323:7): [True: 0, False: 0]
1324
0
      base->event_count_active_max = 0;
1325
0
  }
1326
1327
0
  if (type & EVENT_BASE_COUNT_VIRTUAL) {
  Branch (1327:6): [True: 0, False: 0]
1328
0
    r += base->virtual_event_count_max;
1329
0
    if (clear)
  Branch (1329:7): [True: 0, False: 0]
1330
0
      base->virtual_event_count_max = 0;
1331
0
  }
1332
1333
0
  if (type & EVENT_BASE_COUNT_ADDED) {
  Branch (1333:6): [True: 0, False: 0]
1334
0
    r += base->event_count_max;
1335
0
    if (clear)
  Branch (1335:7): [True: 0, False: 0]
1336
0
      base->event_count_max = 0;
1337
0
  }
1338
1339
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1340
1341
0
  return r;
1342
0
}
1343
1344
/* Returns true iff we're currently watching any events. */
1345
static int
1346
event_haveevents(struct event_base *base)
1347
21.2M
{
1348
  /* Caller must hold th_base_lock */
1349
21.2M
  return (base->virtual_event_count > 0 || base->event_count > 0);
  Branch (1349:10): [True: 0, False: 21.2M]
  Branch (1349:43): [True: 21.2M, False: 24.3k]
1350
21.2M
}
1351
1352
/* "closure" function called when processing active signal events */
1353
static inline void
1354
event_signal_closure(struct event_base *base, struct event *ev)
1355
0
{
1356
0
  short ncalls;
1357
0
  int should_break;
1358
1359
  /* Allows deletes to work */
1360
0
  ncalls = ev->ev_ncalls;
1361
0
  if (ncalls != 0)
  Branch (1361:6): [True: 0, False: 0]
1362
0
    ev->ev_pncalls = &ncalls;
1363
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1364
0
  while (ncalls) {
  Branch (1364:9): [True: 0, False: 0]
1365
0
    ncalls--;
1366
0
    ev->ev_ncalls = ncalls;
1367
0
    if (ncalls == 0)
  Branch (1367:7): [True: 0, False: 0]
1368
0
      ev->ev_pncalls = NULL;
1369
0
    (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1370
1371
0
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1372
0
    should_break = base->event_break;
1373
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
1374
1375
0
    if (should_break) {
  Branch (1375:7): [True: 0, False: 0]
1376
0
      if (ncalls != 0)
  Branch (1376:8): [True: 0, False: 0]
1377
0
        ev->ev_pncalls = NULL;
1378
0
      return;
1379
0
    }
1380
0
  }
1381
0
}
1382
1383
/* Common timeouts are special timeouts that are handled as queues rather than
1384
 * in the minheap.  This is more efficient than the minheap if we happen to
1385
 * know that we're going to get several thousands of timeout events all with
1386
 * the same timeout value.
1387
 *
1388
 * Since all our timeout handling code assumes timevals can be copied,
1389
 * assigned, etc, we can't use "magic pointer" to encode these common
1390
 * timeouts.  Searching through a list to see if every timeout is common could
1391
 * also get inefficient.  Instead, we take advantage of the fact that tv_usec
1392
 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1393
 * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
1394
 * of index into the event_base's aray of common timeouts.
1395
 */
1396
1397
14.1M
#define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
1398
0
#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1399
0
#define COMMON_TIMEOUT_IDX_SHIFT 20
1400
63.6M
#define COMMON_TIMEOUT_MASK     0xf0000000
1401
63.6M
#define COMMON_TIMEOUT_MAGIC    0x50000000
1402
1403
#define COMMON_TIMEOUT_IDX(tv) \
1404
0
  (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1405
1406
/** Return true iff if 'tv' is a common timeout in 'base' */
1407
static inline int
1408
is_common_timeout(const struct timeval *tv,
1409
    const struct event_base *base)
1410
63.6M
{
1411
63.6M
  int idx;
1412
63.6M
  if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
  Branch (1412:6): [True: 63.6M, False: 0]
1413
63.6M
    return 0;
1414
0
  idx = COMMON_TIMEOUT_IDX(tv);
1415
0
  return idx < base->n_common_timeouts;
1416
63.6M
}
1417
1418
/* True iff tv1 and tv2 have the same common-timeout index, or if neither
1419
 * one is a common timeout. */
1420
static inline int
1421
is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1422
7.06M
{
1423
7.06M
  return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1424
7.06M
      (tv2->tv_usec & ~MICROSECONDS_MASK);
1425
7.06M
}
1426
1427
/** Requires that 'tv' is a common timeout.  Return the corresponding
1428
 * common_timeout_list. */
1429
static inline struct common_timeout_list *
1430
get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1431
0
{
1432
0
  return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1433
0
}
1434
1435
#if 0
1436
static inline int
1437
common_timeout_ok(const struct timeval *tv,
1438
    struct event_base *base)
1439
{
1440
  const struct timeval *expect =
1441
      &get_common_timeout_list(base, tv)->duration;
1442
  return tv->tv_sec == expect->tv_sec &&
1443
      tv->tv_usec == expect->tv_usec;
1444
}
1445
#endif
1446
1447
/* Add the timeout for the first event in given common timeout list to the
1448
 * event_base's minheap. */
1449
static void
1450
common_timeout_schedule(struct common_timeout_list *ctl,
1451
    const struct timeval *now, struct event *head)
1452
0
{
1453
0
  struct timeval timeout = head->ev_timeout;
1454
0
  timeout.tv_usec &= MICROSECONDS_MASK;
1455
0
  event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1456
0
}
1457
1458
/* Callback: invoked when the timeout for a common timeout queue triggers.
1459
 * This means that (at least) the first event in that queue should be run,
1460
 * and the timeout should be rescheduled if there are more events. */
1461
static void
1462
common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1463
0
{
1464
0
  struct timeval now;
1465
0
  struct common_timeout_list *ctl = arg;
1466
0
  struct event_base *base = ctl->base;
1467
0
  struct event *ev = NULL;
1468
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1469
0
  gettime(base, &now);
1470
0
  while (1) {
  Branch (1470:9): [Folded - Ignored]
1471
0
    ev = TAILQ_FIRST(&ctl->events);
1472
0
    if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
  Branch (1472:7): [True: 0, False: 0]
  Branch (1472:14): [True: 0, False: 0]
1473
0
        (ev->ev_timeout.tv_sec == now.tv_sec &&
  Branch (1473:8): [True: 0, False: 0]
1474
0
      (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
  Branch (1474:4): [True: 0, False: 0]
1475
0
      break;
1476
0
    event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1477
0
    event_active_nolock_(ev, EV_TIMEOUT, 1);
1478
0
  }
1479
0
  if (ev)
  Branch (1479:6): [True: 0, False: 0]
1480
0
    common_timeout_schedule(ctl, &now, ev);
1481
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1482
0
}
1483
1484
0
#define MAX_COMMON_TIMEOUTS 256
1485
1486
const struct timeval *
1487
event_base_init_common_timeout(struct event_base *base,
1488
    const struct timeval *duration)
1489
0
{
1490
0
  int i;
1491
0
  struct timeval tv;
1492
0
  const struct timeval *result=NULL;
1493
0
  struct common_timeout_list *new_ctl;
1494
1495
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1496
0
  if (duration->tv_usec > 1000000) {
  Branch (1496:6): [True: 0, False: 0]
1497
0
    memcpy(&tv, duration, sizeof(struct timeval));
1498
0
    if (is_common_timeout(duration, base))
  Branch (1498:7): [True: 0, False: 0]
1499
0
      tv.tv_usec &= MICROSECONDS_MASK;
1500
0
    tv.tv_sec += tv.tv_usec / 1000000;
1501
0
    tv.tv_usec %= 1000000;
1502
0
    duration = &tv;
1503
0
  }
1504
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
  Branch (1504:14): [True: 0, False: 0]
1505
0
    const struct common_timeout_list *ctl =
1506
0
        base->common_timeout_queues[i];
1507
0
    if (duration->tv_sec == ctl->duration.tv_sec &&
  Branch (1507:7): [True: 0, False: 0]
1508
0
        duration->tv_usec ==
  Branch (1508:7): [True: 0, False: 0]
1509
0
        (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1510
0
      EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1511
0
      result = &ctl->duration;
1512
0
      goto done;
1513
0
    }
1514
0
  }
1515
0
  if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
  Branch (1515:6): [True: 0, False: 0]
1516
0
    event_warnx("%s: Too many common timeouts already in use; "
1517
0
        "we only support %d per event_base", __func__,
1518
0
        MAX_COMMON_TIMEOUTS);
1519
0
    goto done;
1520
0
  }
1521
0
  if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
  Branch (1521:6): [True: 0, False: 0]
1522
0
    int n = base->n_common_timeouts < 16 ? 16 :
  Branch (1522:11): [True: 0, False: 0]
1523
0
        base->n_common_timeouts*2;
1524
0
    struct common_timeout_list **newqueues =
1525
0
        mm_realloc(base->common_timeout_queues,
1526
0
      n*sizeof(struct common_timeout_queue *));
1527
0
    if (!newqueues) {
  Branch (1527:7): [True: 0, False: 0]
1528
0
      event_warn("%s: realloc",__func__);
1529
0
      goto done;
1530
0
    }
1531
0
    base->n_common_timeouts_allocated = n;
1532
0
    base->common_timeout_queues = newqueues;
1533
0
  }
1534
0
  new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1535
0
  if (!new_ctl) {
  Branch (1535:6): [True: 0, False: 0]
1536
0
    event_warn("%s: calloc",__func__);
1537
0
    goto done;
1538
0
  }
1539
0
  TAILQ_INIT(&new_ctl->events);
1540
0
  new_ctl->duration.tv_sec = duration->tv_sec;
1541
0
  new_ctl->duration.tv_usec =
1542
0
      duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1543
0
      (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1544
0
  evtimer_assign(&new_ctl->timeout_event, base,
1545
0
      common_timeout_callback, new_ctl);
1546
0
  new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1547
0
  event_priority_set(&new_ctl->timeout_event, 0);
1548
0
  new_ctl->base = base;
1549
0
  base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1550
0
  result = &new_ctl->duration;
1551
1552
0
done:
1553
0
  if (result)
  Branch (1553:6): [True: 0, False: 0]
1554
0
    EVUTIL_ASSERT(is_common_timeout(result, base));
1555
1556
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1557
0
  return result;
1558
0
}
1559
1560
/* Closure function invoked when we're activating a persistent event. */
1561
static inline void
1562
event_persist_closure(struct event_base *base, struct event *ev)
1563
11.7M
{
1564
11.7M
  void (*evcb_callback)(evutil_socket_t, short, void *);
1565
1566
        // Other fields of *ev that must be stored before executing
1567
11.7M
        evutil_socket_t evcb_fd;
1568
11.7M
        short evcb_res;
1569
11.7M
        void *evcb_arg;
1570
1571
  /* reschedule the persistent event if we have a timeout. */
1572
11.7M
  if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
  Branch (1572:6): [True: 7.06M, False: 4.72M]
  Branch (1572:34): [True: 0, False: 4.72M]
1573
    /* If there was a timeout, we want it to run at an interval of
1574
     * ev_io_timeout after the last time it was _scheduled_ for,
1575
     * not ev_io_timeout after _now_.  If it fired for another
1576
     * reason, though, the timeout ought to start ticking _now_. */
1577
7.06M
    struct timeval run_at, relative_to, delay, now;
1578
7.06M
    ev_uint32_t usec_mask = 0;
1579
7.06M
    EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1580
7.06M
      &ev->ev_io_timeout));
1581
7.06M
    gettime(base, &now);
1582
7.06M
    if (is_common_timeout(&ev->ev_timeout, base)) {
  Branch (1582:7): [True: 0, False: 7.06M]
1583
0
      delay = ev->ev_io_timeout;
1584
0
      usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1585
0
      delay.tv_usec &= MICROSECONDS_MASK;
1586
0
      if (ev->ev_res & EV_TIMEOUT) {
  Branch (1586:8): [True: 0, False: 0]
1587
0
        relative_to = ev->ev_timeout;
1588
0
        relative_to.tv_usec &= MICROSECONDS_MASK;
1589
0
      } else {
1590
0
        relative_to = now;
1591
0
      }
1592
7.06M
    } else {
1593
7.06M
      delay = ev->ev_io_timeout;
1594
7.06M
      if (ev->ev_res & EV_TIMEOUT) {
  Branch (1594:8): [True: 0, False: 7.06M]
1595
0
        relative_to = ev->ev_timeout;
1596
7.06M
      } else {
1597
7.06M
        relative_to = now;
1598
7.06M
      }
1599
7.06M
    }
1600
7.06M
    evutil_timeradd(&relative_to, &delay, &run_at);
  Branch (1600:3): [True: 0, False: 7.06M]
  Branch (1600:3): [Folded - Ignored]
1601
7.06M
    if (evutil_timercmp(&run_at, &now, <)) {
1602
      /* Looks like we missed at least one invocation due to
1603
       * a clock jump, not running the event loop for a
1604
       * while, really slow callbacks, or
1605
       * something. Reschedule relative to now.
1606
       */
1607
0
      evutil_timeradd(&now, &delay, &run_at);
  Branch (1607:4): [True: 0, False: 0]
  Branch (1607:4): [Folded - Ignored]
1608
0
    }
1609
7.06M
    run_at.tv_usec |= usec_mask;
1610
7.06M
    event_add_nolock_(ev, &run_at, 1);
1611
7.06M
  }
1612
1613
  // Save our callback before we release the lock
1614
11.7M
  evcb_callback = ev->ev_callback;
1615
11.7M
        evcb_fd = ev->ev_fd;
1616
11.7M
        evcb_res = ev->ev_res;
1617
11.7M
        evcb_arg = ev->ev_arg;
1618
1619
  // Release the lock
1620
11.7M
  EVBASE_RELEASE_LOCK(base, th_base_lock);
1621
1622
  // Execute the callback
1623
11.7M
        (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1624
11.7M
}
1625
1626
/*
1627
  Helper for event_process_active to process all the events in a single queue,
1628
  releasing the lock as we go.  This function requires that the lock be held
1629
  when it's invoked.  Returns -1 if we get a signal or an event_break that
1630
  means we should stop processing any active events now.  Otherwise returns
1631
  the number of non-internal event_callbacks that we processed.
1632
*/
1633
static int
1634
event_process_active_single_queue(struct event_base *base,
1635
    struct evcallback_list *activeq,
1636
    int max_to_process, const struct timeval *endtime)
1637
11.7M
{
1638
11.7M
  struct event_callback *evcb;
1639
11.7M
  int count = 0;
1640
1641
11.7M
  EVUTIL_ASSERT(activeq != NULL);
1642
1643
28.3M
  for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
  Branch (1643:36): [True: 16.5M, False: 11.7M]
1644
16.5M
    struct event *ev=NULL;
1645
16.5M
    if (evcb->evcb_flags & EVLIST_INIT) {
  Branch (1645:7): [True: 16.5M, False: 0]
1646
16.5M
      ev = event_callback_to_event(evcb);
1647
1648
16.5M
      if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
  Branch (1648:8): [True: 14.1M, False: 2.36M]
  Branch (1648:38): [True: 0, False: 2.36M]
1649
14.1M
        event_queue_remove_active(base, evcb);
1650
2.36M
      else
1651
2.36M
        event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1652
16.5M
      event_debug((
1653
16.5M
          "event_process_active: event: %p, %s%s%scall %p",
1654
16.5M
          ev,
1655
16.5M
          ev->ev_res & EV_READ ? "EV_READ " : " ",
1656
16.5M
          ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1657
16.5M
          ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1658
16.5M
          ev->ev_callback));
1659
16.5M
    } else {
1660
0
      event_queue_remove_active(base, evcb);
1661
0
      event_debug(("event_process_active: event_callback %p, "
1662
0
        "closure %d, call %p",
1663
0
        evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1664
0
    }
1665
1666
16.5M
    if (!(evcb->evcb_flags & EVLIST_INTERNAL))
  Branch (1666:7): [True: 14.1M, False: 2.36M]
1667
14.1M
      ++count;
1668
1669
1670
16.5M
    base->current_event = evcb;
1671
16.5M
#ifndef EVENT__DISABLE_THREAD_SUPPORT
1672
16.5M
    base->current_event_waiters = 0;
1673
16.5M
#endif
1674
1675
16.5M
    switch (evcb->evcb_closure) {
1676
0
    case EV_CLOSURE_EVENT_SIGNAL:
  Branch (1676:3): [True: 0, False: 16.5M]
1677
0
      EVUTIL_ASSERT(ev != NULL);
1678
0
      event_signal_closure(base, ev);
1679
0
      break;
1680
11.7M
    case EV_CLOSURE_EVENT_PERSIST:
  Branch (1680:3): [True: 11.7M, False: 4.71M]
1681
11.7M
      EVUTIL_ASSERT(ev != NULL);
1682
11.7M
      event_persist_closure(base, ev);
1683
11.7M
      break;
1684
2.36M
    case EV_CLOSURE_EVENT: {
  Branch (1684:3): [True: 2.36M, False: 14.1M]
1685
2.36M
      void (*evcb_callback)(evutil_socket_t, short, void *);
1686
2.36M
      short res;
1687
2.36M
      EVUTIL_ASSERT(ev != NULL);
1688
2.36M
      evcb_callback = *ev->ev_callback;
1689
2.36M
      res = ev->ev_res;
1690
2.36M
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1691
2.36M
      evcb_callback(ev->ev_fd, res, ev->ev_arg);
1692
2.36M
    }
1693
0
    break;
1694
0
    case EV_CLOSURE_CB_SELF: {
  Branch (1694:3): [True: 0, False: 16.5M]
1695
0
      void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1696
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1697
0
      evcb_selfcb(evcb, evcb->evcb_arg);
1698
0
    }
1699
0
    break;
1700
0
    case EV_CLOSURE_EVENT_FINALIZE:
  Branch (1700:3): [True: 0, False: 16.5M]
1701
0
    case EV_CLOSURE_EVENT_FINALIZE_FREE: {
  Branch (1701:3): [True: 0, False: 16.5M]
1702
0
      void (*evcb_evfinalize)(struct event *, void *);
1703
0
      int evcb_closure = evcb->evcb_closure;
1704
0
      EVUTIL_ASSERT(ev != NULL);
1705
0
      base->current_event = NULL;
1706
0
      evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1707
0
      EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1708
0
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1709
0
      event_debug_note_teardown_(ev);
1710
0
      evcb_evfinalize(ev, ev->ev_arg);
1711
0
      if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
  Branch (1711:8): [True: 0, False: 0]
1712
0
        mm_free(ev);
1713
0
    }
1714
0
    break;
1715
2.35M
    case EV_CLOSURE_CB_FINALIZE: {
  Branch (1715:3): [True: 2.35M, False: 14.1M]
1716
2.35M
      void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1717
2.35M
      base->current_event = NULL;
1718
2.35M
      EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1719
2.35M
      EVBASE_RELEASE_LOCK(base, th_base_lock);
1720
2.35M
      evcb_cbfinalize(evcb, evcb->evcb_arg);
1721
2.35M
    }
1722
0
    break;
1723
0
    default:
  Branch (1723:3): [True: 0, False: 16.5M]
1724
0
      EVUTIL_ASSERT(0);
1725
16.5M
    }
1726
1727
16.5M
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1728
16.5M
    base->current_event = NULL;
1729
16.5M
#ifndef EVENT__DISABLE_THREAD_SUPPORT
1730
16.5M
    if (base->current_event_waiters) {
  Branch (1730:7): [True: 0, False: 16.5M]
1731
0
      base->current_event_waiters = 0;
1732
0
      EVTHREAD_COND_BROADCAST(base->current_event_cond);
1733
0
    }
1734
16.5M
#endif
1735
1736
16.5M
    if (base->event_break)
  Branch (1736:7): [True: 0, False: 16.5M]
1737
0
      return -1;
1738
16.5M
    if (count >= max_to_process)
  Branch (1738:7): [True: 0, False: 16.5M]
1739
0
      return count;
1740
16.5M
    if (count && endtime) {
  Branch (1740:7): [True: 16.5M, False: 10.0k]
  Branch (1740:16): [True: 0, False: 16.5M]
1741
0
      struct timeval now;
1742
0
      update_time_cache(base);
1743
0
      gettime(base, &now);
1744
0
      if (evutil_timercmp(&now, endtime, >=))
1745
0
        return count;
1746
0
    }
1747
16.5M
    if (base->event_continue)
  Branch (1747:7): [True: 0, False: 16.5M]
1748
0
      break;
1749
16.5M
  }
1750
11.7M
  return count;
1751
11.7M
}
1752
1753
/*
1754
 * Active events are stored in priority queues.  Lower priorities are always
1755
 * process before higher priorities.  Low priority events can starve high
1756
 * priority ones.
1757
 */
1758
1759
static int
1760
event_process_active(struct event_base *base)
1761
11.7M
{
1762
  /* Caller must hold th_base_lock */
1763
11.7M
  struct evcallback_list *activeq = NULL;
1764
11.7M
  int i, c = 0;
1765
11.7M
  const struct timeval *endtime;
1766
11.7M
  struct timeval tv;
1767
11.7M
  const int maxcb = base->max_dispatch_callbacks;
1768
11.7M
  const int limit_after_prio = base->limit_callbacks_after_prio;
1769
11.7M
  if (base->max_dispatch_time.tv_sec >= 0) {
  Branch (1769:6): [True: 0, False: 11.7M]
1770
0
    update_time_cache(base);
1771
0
    gettime(base, &tv);
1772
0
    evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
  Branch (1772:3): [True: 0, False: 0]
  Branch (1772:3): [Folded - Ignored]
1773
0
    endtime = &tv;
1774
11.7M
  } else {
1775
11.7M
    endtime = NULL;
1776
11.7M
  }
1777
1778
11.8M
  for (i = 0; i < base->nactivequeues; ++i) {
  Branch (1778:14): [True: 11.7M, False: 8.93k]
1779
11.7M
    if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
  Branch (1779:7): [True: 11.7M, False: 0]
1780
11.7M
      base->event_running_priority = i;
1781
11.7M
      activeq = &base->activequeues[i];
1782
11.7M
      if (i < limit_after_prio)
  Branch (1782:8): [True: 11.7M, False: 0]
1783
11.7M
        c = event_process_active_single_queue(base, activeq,
1784
11.7M
            INT_MAX, NULL);
1785
0
      else
1786
0
        c = event_process_active_single_queue(base, activeq,
1787
0
            maxcb, endtime);
1788
11.7M
      if (c < 0) {
  Branch (1788:8): [True: 0, False: 11.7M]
1789
0
        goto done;
1790
11.7M
      } else if (c > 0)
  Branch (1790:15): [True: 11.7M, False: 8.93k]
1791
11.7M
        break; /* Processed a real event; do not
1792
          * consider lower-priority events */
1793
      /* If we get here, all of the events we processed
1794
       * were internal.  Continue. */
1795
11.7M
    }
1796
11.7M
  }
1797
1798
11.7M
done:
1799
11.7M
  base->event_running_priority = -1;
1800
1801
11.7M
  return c;
1802
11.7M
}
1803
1804
/*
1805
 * Wait continuously for events.  We exit only if no events are left.
1806
 */
1807
1808
int
1809
event_dispatch(void)
1810
0
{
1811
0
  return (event_loop(0));
1812
0
}
1813
1814
int
1815
event_base_dispatch(struct event_base *event_base)
1816
11.0k
{
1817
11.0k
  return (event_base_loop(event_base, 0));
1818
11.0k
}
1819
1820
const char *
1821
event_base_get_method(const struct event_base *base)
1822
0
{
1823
0
  EVUTIL_ASSERT(base);
1824
0
  return (base->evsel->name);
1825
0
}
1826
1827
/** Callback: used to implement event_base_loopexit by telling the event_base
1828
 * that it's time to exit its loop. */
1829
static void
1830
event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1831
0
{
1832
0
  struct event_base *base = arg;
1833
0
  base->event_gotterm = 1;
1834
0
}
1835
1836
int
1837
event_loopexit(const struct timeval *tv)
1838
0
{
1839
0
  return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1840
0
        current_base, tv));
1841
0
}
1842
1843
int
1844
event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1845
0
{
1846
0
  return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1847
0
        event_base, tv));
1848
0
}
1849
1850
int
1851
event_loopbreak(void)
1852
0
{
1853
0
  return (event_base_loopbreak(current_base));
1854
0
}
1855
1856
int
1857
event_base_loopbreak(struct event_base *event_base)
1858
0
{
1859
0
  int r = 0;
1860
0
  if (event_base == NULL)
  Branch (1860:6): [True: 0, False: 0]
1861
0
    return (-1);
1862
1863
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1864
0
  event_base->event_break = 1;
1865
1866
0
  if (EVBASE_NEED_NOTIFY(event_base)) {
1867
0
    r = evthread_notify_base(event_base);
1868
0
  } else {
1869
0
    r = (0);
1870
0
  }
1871
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1872
0
  return r;
1873
0
}
1874
1875
int
1876
event_base_loopcontinue(struct event_base *event_base)
1877
0
{
1878
0
  int r = 0;
1879
0
  if (event_base == NULL)
  Branch (1879:6): [True: 0, False: 0]
1880
0
    return (-1);
1881
1882
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1883
0
  event_base->event_continue = 1;
1884
1885
0
  if (EVBASE_NEED_NOTIFY(event_base)) {
1886
0
    r = evthread_notify_base(event_base);
1887
0
  } else {
1888
0
    r = (0);
1889
0
  }
1890
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1891
0
  return r;
1892
0
}
1893
1894
int
1895
event_base_got_break(struct event_base *event_base)
1896
0
{
1897
0
  int res;
1898
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1899
0
  res = event_base->event_break;
1900
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1901
0
  return res;
1902
0
}
1903
1904
int
1905
event_base_got_exit(struct event_base *event_base)
1906
0
{
1907
0
  int res;
1908
0
  EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1909
0
  res = event_base->event_gotterm;
1910
0
  EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1911
0
  return res;
1912
0
}
1913
1914
/* not thread safe */
1915
1916
int
1917
event_loop(int flags)
1918
0
{
1919
0
  return event_base_loop(current_base, flags);
1920
0
}
1921
1922
int
1923
event_base_loop(struct event_base *base, int flags)
1924
11.0k
{
1925
11.0k
  const struct eventop *evsel = base->evsel;
1926
11.0k
  struct timeval tv;
1927
11.0k
  struct timeval *tv_p;
1928
11.0k
  int res, done, retval = 0;
1929
1930
  /* Grab the lock.  We will release it inside evsel.dispatch, and again
1931
   * as we invoke user callbacks. */
1932
11.0k
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1933
1934
11.0k
  if (base->running_loop) {
  Branch (1934:6): [True: 0, False: 11.0k]
1935
0
    event_warnx("%s: reentrant invocation.  Only one event_base_loop"
1936
0
        " can run on each event_base at once.", __func__);
1937
0
    EVBASE_RELEASE_LOCK(base, th_base_lock);
1938
0
    return -1;
1939
0
  }
1940
1941
11.0k
  base->running_loop = 1;
1942
1943
11.0k
  clear_time_cache(base);
1944
1945
11.0k
  if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
  Branch (1945:6): [True: 0, False: 11.0k]
  Branch (1945:35): [True: 0, False: 0]
1946
0
    evsig_set_base_(base);
1947
1948
11.0k
  done = 0;
1949
1950
11.0k
#ifndef EVENT__DISABLE_THREAD_SUPPORT
1951
11.0k
  base->th_owner_id = EVTHREAD_GET_ID();
1952
11.0k
#endif
1953
1954
11.0k
  base->event_gotterm = base->event_break = 0;
1955
1956
11.8M
  while (!done) {
  Branch (1956:9): [True: 11.8M, False: 0]
1957
11.8M
    base->event_continue = 0;
1958
11.8M
    base->n_deferreds_queued = 0;
1959
1960
    /* Terminate the loop if we have been asked to */
1961
11.8M
    if (base->event_gotterm) {
  Branch (1961:7): [True: 0, False: 11.8M]
1962
0
      break;
1963
0
    }
1964
1965
11.8M
    if (base->event_break) {
  Branch (1965:7): [True: 0, False: 11.8M]
1966
0
      break;
1967
0
    }
1968
1969
11.8M
    tv_p = &tv;
1970
11.8M
    if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
  Branch (1970:7): [True: 11.8M, False: 0]
  Branch (1970:36): [True: 11.8M, False: 0]
1971
11.8M
      timeout_next(base, &tv_p);
1972
11.8M
    } else {
1973
      /*
1974
       * if we have active events, we just poll new events
1975
       * without waiting.
1976
       */
1977
0
      evutil_timerclear(&tv);
1978
0
    }
1979
1980
    /* If we have no events, we just exit */
1981
11.8M
    if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
  Branch (1981:7): [True: 11.8M, False: 0]
1982
11.8M
        !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
  Branch (1982:7): [True: 11.0k, False: 11.7M]
  Branch (1982:34): [True: 11.0k, False: 0]
1983
11.0k
      event_debug(("%s: no events registered.", __func__));
1984
11.0k
      retval = 1;
1985
11.0k
      goto done;
1986
11.0k
    }
1987
1988
11.7M
    event_queue_make_later_events_active(base);
1989
1990
11.7M
    clear_time_cache(base);
1991
1992
11.7M
    res = evsel->dispatch(base, tv_p);
1993
1994
11.7M
    if (res == -1) {
  Branch (1994:7): [True: 0, False: 11.7M]
1995
0
      event_debug(("%s: dispatch returned unsuccessfully.",
1996
0
        __func__));
1997
0
      retval = -1;
1998
0
      goto done;
1999
0
    }
2000
2001
11.7M
    update_time_cache(base);
2002
2003
11.7M
    timeout_process(base);
2004
2005
11.7M
    if (N_ACTIVE_CALLBACKS(base)) {
2006
11.7M
      int n = event_process_active(base);
2007
11.7M
      if ((flags & EVLOOP_ONCE)
  Branch (2007:8): [True: 0, False: 11.7M]
2008
11.7M
          && N_ACTIVE_CALLBACKS(base) == 0
  Branch (2008:11): [True: 0, False: 0]
2009
11.7M
          && n != 0)
  Branch (2009:11): [True: 0, False: 0]
2010
0
        done = 1;
2011
11.7M
    } else if (flags & EVLOOP_NONBLOCK)
  Branch (2011:14): [True: 0, False: 0]
2012
0
      done = 1;
2013
11.7M
  }
2014
0
  event_debug(("%s: asked to terminate loop.", __func__));
2015
2016
11.0k
done:
2017
11.0k
  clear_time_cache(base);
2018
11.0k
  base->running_loop = 0;
2019
2020
11.0k
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2021
2022
11.0k
  return (retval);
2023
0
}
2024
2025
/* One-time callback to implement event_base_once: invokes the user callback,
2026
 * then deletes the allocated storage */
2027
static void
2028
event_once_cb(evutil_socket_t fd, short events, void *arg)
2029
2.15k
{
2030
2.15k
  struct event_once *eonce = arg;
2031
2032
2.15k
  (*eonce->cb)(fd, events, eonce->arg);
2033
2.15k
  EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2034
2.15k
  LIST_REMOVE(eonce, next_once);
2035
2.15k
  EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2036
2.15k
  event_debug_unassign(&eonce->ev);
2037
2.15k
  mm_free(eonce);
2038
2.15k
}
2039
2040
/* not threadsafe, event scheduled once. */
2041
int
2042
event_once(evutil_socket_t fd, short events,
2043
    void (*callback)(evutil_socket_t, short, void *),
2044
    void *arg, const struct timeval *tv)
2045
0
{
2046
0
  return event_base_once(current_base, fd, events, callback, arg, tv);
2047
0
}
2048
2049
/* Schedules an event once */
2050
int
2051
event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2052
    void (*callback)(evutil_socket_t, short, void *),
2053
    void *arg, const struct timeval *tv)
2054
11.0k
{
2055
11.0k
  struct event_once *eonce;
2056
11.0k
  int res = 0;
2057
11.0k
  int activate = 0;
2058
2059
11.0k
  if (!base)
  Branch (2059:6): [True: 0, False: 11.0k]
2060
0
    return (-1);
2061
2062
  /* We cannot support signals that just fire once, or persistent
2063
   * events. */
2064
11.0k
  if (events & (EV_SIGNAL|EV_PERSIST))
  Branch (2064:6): [True: 0, False: 11.0k]
2065
0
    return (-1);
2066
2067
11.0k
  if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
  Branch (2067:6): [True: 0, False: 11.0k]
2068
0
    return (-1);
2069
2070
11.0k
  eonce->cb = callback;
2071
11.0k
  eonce->arg = arg;
2072
2073
11.0k
  if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
  Branch (2073:6): [True: 11.0k, False: 0]
2074
11.0k
    evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2075
2076
11.0k
    if (tv == NULL || ! evutil_timerisset(tv)) {
  Branch (2076:7): [True: 11.0k, False: 0]
  Branch (2076:21): [True: 18.4E, False: 11.0k]
2077
      /* If the event is going to become active immediately,
2078
       * don't put it on the timeout queue.  This is one
2079
       * idiom for scheduling a callback, so let's make
2080
       * it fast (and order-preserving). */
2081
11.0k
      activate = 1;
2082
11.0k
    }
2083
11.0k
  } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
  Branch (2083:13): [True: 0, False: 0]
2084
0
    events &= EV_READ|EV_WRITE|EV_CLOSED;
2085
2086
0
    event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2087
0
  } else {
2088
    /* Bad event combination */
2089
0
    mm_free(eonce);
2090
0
    return (-1);
2091
0
  }
2092
2093
11.0k
  if (res == 0) {
  Branch (2093:6): [True: 11.0k, False: 0]
2094
11.0k
    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2095
11.0k
    if (activate)
  Branch (2095:7): [True: 11.0k, False: 0]
2096
11.0k
      event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2097
0
    else
2098
0
      res = event_add_nolock_(&eonce->ev, tv, 0);
2099
2100
11.0k
    if (res != 0) {
  Branch (2100:7): [True: 0, False: 11.0k]
2101
0
      mm_free(eonce);
2102
0
      return (res);
2103
11.0k
    } else {
2104
11.0k
      LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2105
11.0k
    }
2106
11.0k
    EVBASE_RELEASE_LOCK(base, th_base_lock);
2107
11.0k
  }
2108
2109
11.0k
  return (0);
2110
11.0k
}
2111
2112
int
2113
event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2114
11.8M
{
2115
11.8M
  if (!base)
  Branch (2115:6): [True: 0, False: 11.8M]
2116
0
    base = current_base;
2117
11.8M
  if (arg == &event_self_cbarg_ptr_)
  Branch (2117:6): [True: 0, False: 11.8M]
2118
0
    arg = ev;
2119
2120
11.8M
  if (!(events & EV_SIGNAL))
  Branch (2120:6): [True: 11.8M, False: 0]
2121
11.8M
    event_debug_assert_socket_nonblocking_(fd);
2122
11.8M
  event_debug_assert_not_added_(ev);
2123
2124
11.8M
  ev->ev_base = base;
2125
2126
11.8M
  ev->ev_callback = callback;
2127
11.8M
  ev->ev_arg = arg;
2128
11.8M
  ev->ev_fd = fd;
2129
11.8M
  ev->ev_events = events;
2130
11.8M
  ev->ev_res = 0;
2131
11.8M
  ev->ev_flags = EVLIST_INIT;
2132
11.8M
  ev->ev_ncalls = 0;
2133
11.8M
  ev->ev_pncalls = NULL;
2134
2135
11.8M
  if (events & EV_SIGNAL) {
  Branch (2135:6): [True: 0, False: 11.8M]
2136
0
    if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
  Branch (2136:7): [True: 0, False: 0]
2137
0
      event_warnx("%s: EV_SIGNAL is not compatible with "
2138
0
          "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2139
0
      return -1;
2140
0
    }
2141
0
    ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2142
11.8M
  } else {
2143
11.8M
    if (events & EV_PERSIST) {
  Branch (2143:7): [True: 9.46M, False: 2.36M]
2144
9.46M
      evutil_timerclear(&ev->ev_io_timeout);
2145
9.46M
      ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2146
9.46M
    } else {
2147
2.36M
      ev->ev_closure = EV_CLOSURE_EVENT;
2148
2.36M
    }
2149
11.8M
  }
2150
2151
11.8M
  min_heap_elem_init_(ev);
2152
2153
11.8M
  if (base != NULL) {
  Branch (2153:6): [True: 11.8M, False: 0]
2154
    /* by default, we put new events into the middle priority */
2155
11.8M
    ev->ev_pri = base->nactivequeues / 2;
2156
11.8M
  }
2157
2158
11.8M
  event_debug_note_setup_(ev);
2159
2160
11.8M
  return 0;
2161
11.8M
}
2162
2163
int
2164
event_base_set(struct event_base *base, struct event *ev)
2165
0
{
2166
  /* Only innocent events may be assigned to a different base */
2167
0
  if (ev->ev_flags != EVLIST_INIT)
  Branch (2167:6): [True: 0, False: 0]
2168
0
    return (-1);
2169
2170
0
  event_debug_assert_is_setup_(ev);
2171
2172
0
  ev->ev_base = base;
2173
0
  ev->ev_pri = base->nactivequeues/2;
2174
2175
0
  return (0);
2176
0
}
2177
2178
void
2179
event_set(struct event *ev, evutil_socket_t fd, short events,
2180
    void (*callback)(evutil_socket_t, short, void *), void *arg)
2181
0
{
2182
0
  int r;
2183
0
  r = event_assign(ev, current_base, fd, events, callback, arg);
2184
0
  EVUTIL_ASSERT(r == 0);
2185
0
}
2186
2187
void *
2188
event_self_cbarg(void)
2189
0
{
2190
0
  return &event_self_cbarg_ptr_;
2191
0
}
2192
2193
struct event *
2194
event_base_get_running_event(struct event_base *base)
2195
0
{
2196
0
  struct event *ev = NULL;
2197
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2198
0
  if (EVBASE_IN_THREAD(base)) {
2199
0
    struct event_callback *evcb = base->current_event;
2200
0
    if (evcb->evcb_flags & EVLIST_INIT)
  Branch (2200:7): [True: 0, False: 0]
2201
0
      ev = event_callback_to_event(evcb);
2202
0
  }
2203
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2204
0
  return ev;
2205
0
}
2206
2207
struct event *
2208
event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2209
2.35M
{
2210
2.35M
  struct event *ev;
2211
2.35M
  ev = mm_malloc(sizeof(struct event));
2212
2.35M
  if (ev == NULL)
  Branch (2212:6): [True: 0, False: 2.35M]
2213
0
    return (NULL);
2214
2.35M
  if (event_assign(ev, base, fd, events, cb, arg) < 0) {
  Branch (2214:6): [True: 0, False: 2.35M]
2215
0
    mm_free(ev);
2216
0
    return (NULL);
2217
0
  }
2218
2219
2.35M
  return (ev);
2220
2.35M
}
2221
2222
void
2223
event_free(struct event *ev)
2224
2.35M
{
2225
  /* This is disabled, so that events which have been finalized be a
2226
   * valid target for event_free(). That's */
2227
  // event_debug_assert_is_setup_(ev);
2228
2229
  /* make sure that this event won't be coming back to haunt us. */
2230
2.35M
  event_del(ev);
2231
2.35M
  event_debug_note_teardown_(ev);
2232
2.35M
  mm_free(ev);
2233
2234
2.35M
}
2235
2236
void
2237
event_debug_unassign(struct event *ev)
2238
35.4k
{
2239
35.4k
  event_debug_assert_not_added_(ev);
2240
35.4k
  event_debug_note_teardown_(ev);
2241
2242
35.4k
  ev->ev_flags &= ~EVLIST_INIT;
2243
35.4k
}
2244
2245
0
#define EVENT_FINALIZE_FREE_ 0x10000
2246
static int
2247
event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2248
0
{
2249
0
  ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
  Branch (2249:23): [True: 0, False: 0]
2250
0
      EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2251
2252
0
  event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2253
0
  ev->ev_closure = closure;
2254
0
  ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2255
0
  event_active_nolock_(ev, EV_FINALIZE, 1);
2256
0
  ev->ev_flags |= EVLIST_FINALIZING;
2257
0
  return 0;
2258
0
}
2259
2260
static int
2261
event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2262
0
{
2263
0
  int r;
2264
0
  struct event_base *base = ev->ev_base;
2265
0
  if (EVUTIL_FAILURE_CHECK(!base)) {
2266
0
    event_warnx("%s: event has no event_base set.", __func__);
2267
0
    return -1;
2268
0
  }
2269
2270
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2271
0
  r = event_finalize_nolock_(base, flags, ev, cb);
2272
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2273
0
  return r;
2274
0
}
2275
2276
int
2277
event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2278
0
{
2279
0
  return event_finalize_impl_(flags, ev, cb);
2280
0
}
2281
2282
int
2283
event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2284
0
{
2285
0
  return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2286
0
}
2287
2288
void
2289
event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2290
2.35M
{
2291
2.35M
  struct event *ev = NULL;
2292
2.35M
  if (evcb->evcb_flags & EVLIST_INIT) {
  Branch (2292:6): [True: 2.35M, False: 0]
2293
2.35M
    ev = event_callback_to_event(evcb);
2294
2.35M
    event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2295
2.35M
  } else {
2296
0
    event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2297
0
  }
2298
2299
2.35M
  evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2300
2.35M
  evcb->evcb_cb_union.evcb_cbfinalize = cb;
2301
2.35M
  event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2302
2.35M
  evcb->evcb_flags |= EVLIST_FINALIZING;
2303
2.35M
}
2304
2305
void
2306
event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2307
0
{
2308
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2309
0
  event_callback_finalize_nolock_(base, flags, evcb, cb);
2310
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2311
0
}
2312
2313
/** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
2314
 * callback will be invoked on *one of them*, after they have *all* been
2315
 * finalized. */
2316
int
2317
event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2318
2.35M
{
2319
2.35M
  int n_pending = 0, i;
2320
2321
2.35M
  if (base == NULL)
  Branch (2321:6): [True: 0, False: 2.35M]
2322
0
    base = current_base;
2323
2324
2.35M
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2325
2326
2.35M
  event_debug(("%s: %d events finalizing", __func__, n_cbs));
2327
2328
  /* At most one can be currently executing; the rest we just
2329
   * cancel... But we always make sure that the finalize callback
2330
   * runs. */
2331
9.43M
  for (i = 0; i < n_cbs; ++i) {
  Branch (2331:14): [True: 7.07M, False: 2.35M]
2332
7.07M
    struct event_callback *evcb = evcbs[i];
2333
7.07M
    if (evcb == base->current_event) {
  Branch (2333:7): [True: 2.35M, False: 4.71M]
2334
2.35M
      event_callback_finalize_nolock_(base, 0, evcb, cb);
2335
2.35M
      ++n_pending;
2336
4.71M
    } else {
2337
4.71M
      event_callback_cancel_nolock_(base, evcb, 0);
2338
4.71M
    }
2339
7.07M
  }
2340
2341
2.35M
  if (n_pending == 0) {
  Branch (2341:6): [True: 0, False: 2.35M]
2342
    /* Just do the first one. */
2343
0
    event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2344
0
  }
2345
2346
2.35M
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2347
2.35M
  return 0;
2348
2.35M
}
2349
2350
/*
2351
 * Set's the priority of an event - if an event is already scheduled
2352
 * changing the priority is going to fail.
2353
 */
2354
2355
int
2356
event_priority_set(struct event *ev, int pri)
2357
22.1k
{
2358
22.1k
  event_debug_assert_is_setup_(ev);
2359
2360
22.1k
  if (ev->ev_flags & EVLIST_ACTIVE)
  Branch (2360:6): [True: 0, False: 22.1k]
2361
0
    return (-1);
2362
22.1k
  if (pri < 0 || pri >= ev->ev_base->nactivequeues)
  Branch (2362:6): [True: 0, False: 22.1k]
  Branch (2362:17): [True: 11.0k, False: 11.0k]
2363
11.0k
    return (-1);
2364
2365
11.0k
  ev->ev_pri = pri;
2366
2367
11.0k
  return (0);
2368
22.1k
}
2369
2370
/*
2371
 * Checks if a specific event is pending or scheduled.
2372
 */
2373
2374
int
2375
event_pending(const struct event *ev, short event, struct timeval *tv)
2376
4.71M
{
2377
4.71M
  int flags = 0;
2378
2379
4.71M
  if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2380
0
    event_warnx("%s: event has no event_base set.", __func__);
2381
0
    return 0;
2382
0
  }
2383
2384
4.71M
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2385
4.71M
  event_debug_assert_is_setup_(ev);
2386
2387
4.71M
  if (ev->ev_flags & EVLIST_INSERTED)
  Branch (2387:6): [True: 2.35M, False: 2.35M]
2388
2.35M
    flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2389
4.71M
  if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
  Branch (2389:6): [True: 0, False: 4.71M]
2390
0
    flags |= ev->ev_res;
2391
4.71M
  if (ev->ev_flags & EVLIST_TIMEOUT)
  Branch (2391:6): [True: 0, False: 4.71M]
2392
0
    flags |= EV_TIMEOUT;
2393
2394
4.71M
  event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2395
2396
  /* See if there is a timeout that we should report */
2397
4.71M
  if (tv != NULL && (flags & event & EV_TIMEOUT)) {
  Branch (2397:6): [True: 0, False: 4.71M]
  Branch (2397:20): [True: 0, False: 0]
2398
0
    struct timeval tmp = ev->ev_timeout;
2399
0
    tmp.tv_usec &= MICROSECONDS_MASK;
2400
    /* correctly remamp to real time */
2401
0
    evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
  Branch (2401:3): [True: 0, False: 0]
  Branch (2401:3): [Folded - Ignored]
2402
0
  }
2403
2404
4.71M
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2405
2406
4.71M
  return (flags & event);
2407
4.71M
}
2408
2409
int
2410
event_initialized(const struct event *ev)
2411
4.71M
{
2412
4.71M
  if (!(ev->ev_flags & EVLIST_INIT))
  Branch (2412:6): [True: 2.35M, False: 2.35M]
2413
2.35M
    return 0;
2414
2415
2.35M
  return 1;
2416
4.71M
}
2417
2418
void
2419
event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2420
0
{
2421
0
  event_debug_assert_is_setup_(event);
2422
2423
0
  if (base_out)
  Branch (2423:6): [True: 0, False: 0]
2424
0
    *base_out = event->ev_base;
2425
0
  if (fd_out)
  Branch (2425:6): [True: 0, False: 0]
2426
0
    *fd_out = event->ev_fd;
2427
0
  if (events_out)
  Branch (2427:6): [True: 0, False: 0]
2428
0
    *events_out = event->ev_events;
2429
0
  if (callback_out)
  Branch (2429:6): [True: 0, False: 0]
2430
0
    *callback_out = event->ev_callback;
2431
0
  if (arg_out)
  Branch (2431:6): [True: 0, False: 0]
2432
0
    *arg_out = event->ev_arg;
2433
0
}
2434
2435
size_t
2436
event_get_struct_event_size(void)
2437
0
{
2438
0
  return sizeof(struct event);
2439
0
}
2440
2441
evutil_socket_t
2442
event_get_fd(const struct event *ev)
2443
2.38M
{
2444
2.38M
  event_debug_assert_is_setup_(ev);
2445
2.38M
  return ev->ev_fd;
2446
2.38M
}
2447
2448
struct event_base *
2449
event_get_base(const struct event *ev)
2450
0
{
2451
0
  event_debug_assert_is_setup_(ev);
2452
0
  return ev->ev_base;
2453
0
}
2454
2455
short
2456
event_get_events(const struct event *ev)
2457
0
{
2458
0
  event_debug_assert_is_setup_(ev);
2459
0
  return ev->ev_events;
2460
0
}
2461
2462
event_callback_fn
2463
event_get_callback(const struct event *ev)
2464
0
{
2465
0
  event_debug_assert_is_setup_(ev);
2466
0
  return ev->ev_callback;
2467
0
}
2468
2469
void *
2470
event_get_callback_arg(const struct event *ev)
2471
0
{
2472
0
  event_debug_assert_is_setup_(ev);
2473
0
  return ev->ev_arg;
2474
0
}
2475
2476
int
2477
event_get_priority(const struct event *ev)
2478
2.35M
{
2479
2.35M
  event_debug_assert_is_setup_(ev);
2480
2.35M
  return ev->ev_pri;
2481
2.35M
}
2482
2483
int
2484
event_add(struct event *ev, const struct timeval *tv)
2485
16.5M
{
2486
16.5M
  int res;
2487
2488
16.5M
  if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2489
0
    event_warnx("%s: event has no event_base set.", __func__);
2490
0
    return -1;
2491
0
  }
2492
2493
16.5M
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2494
2495
16.5M
  res = event_add_nolock_(ev, tv, 0);
2496
2497
16.5M
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2498
2499
16.5M
  return (res);
2500
16.5M
}
2501
2502
/* Helper callback: wake an event_base from another thread.  This version
2503
 * works by writing a byte to one end of a socketpair, so that the event_base
2504
 * listening on the other end will wake up as the corresponding event
2505
 * triggers */
2506
static int
2507
evthread_notify_base_default(struct event_base *base)
2508
0
{
2509
0
  char buf[1];
2510
0
  int r;
2511
0
  buf[0] = (char) 0;
2512
#ifdef _WIN32
2513
  r = send(base->th_notify_fd[1], buf, 1, 0);
2514
#else
2515
0
  r = write(base->th_notify_fd[1], buf, 1);
2516
0
#endif
2517
0
  return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
  Branch (2517:10): [True: 0, False: 0]
  Branch (2517:19): [True: 0, False: 0]
2518
0
}
2519
2520
#ifdef EVENT__HAVE_EVENTFD
2521
/* Helper callback: wake an event_base from another thread.  This version
2522
 * assumes that you have a working eventfd() implementation. */
2523
static int
2524
evthread_notify_base_eventfd(struct event_base *base)
2525
2.37M
{
2526
2.37M
  ev_uint64_t msg = 1;
2527
2.37M
  int r;
2528
2.37M
  do {
2529
2.37M
    r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2530
2.37M
  } while (r < 0 && errno == EAGAIN);
  Branch (2530:11): [True: 0, False: 2.37M]
  Branch (2530:20): [True: 0, False: 0]
2531
2532
2.37M
  return (r < 0) ? -1 : 0;
  Branch (2532:9): [True: 0, False: 2.37M]
2533
2.37M
}
2534
#endif
2535
2536
2537
/** Tell the thread currently running the event_loop for base (if any) that it
2538
 * needs to stop waiting in its dispatch function (if it is) and process all
2539
 * active callbacks. */
2540
static int
2541
evthread_notify_base(struct event_base *base)
2542
2.37M
{
2543
2.37M
  EVENT_BASE_ASSERT_LOCKED(base);
2544
2.37M
  if (!base->th_notify_fn)
  Branch (2544:6): [True: 0, False: 2.37M]
2545
0
    return -1;
2546
2.37M
  if (base->is_notify_pending)
  Branch (2546:6): [True: 2.07k, False: 2.37M]
2547
2.07k
    return 0;
2548
2.37M
  base->is_notify_pending = 1;
2549
2.37M
  return base->th_notify_fn(base);
2550
2.37M
}
2551
2552
/* Implementation function to remove a timeout on a currently pending event.
2553
 */
2554
int
2555
event_remove_timer_nolock_(struct event *ev)
2556
0
{
2557
0
  struct event_base *base = ev->ev_base;
2558
2559
0
  EVENT_BASE_ASSERT_LOCKED(base);
2560
0
  event_debug_assert_is_setup_(ev);
2561
2562
0
  event_debug(("event_remove_timer_nolock: event: %p", ev));
2563
2564
  /* If it's not pending on a timeout, we don't need to do anything. */
2565
0
  if (ev->ev_flags & EVLIST_TIMEOUT) {
  Branch (2565:6): [True: 0, False: 0]
2566
0
    event_queue_remove_timeout(base, ev);
2567
0
    evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2568
0
  }
2569
2570
0
  return (0);
2571
0
}
2572
2573
int
2574
event_remove_timer(struct event *ev)
2575
0
{
2576
0
  int res;
2577
2578
0
  if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2579
0
    event_warnx("%s: event has no event_base set.", __func__);
2580
0
    return -1;
2581
0
  }
2582
2583
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2584
2585
0
  res = event_remove_timer_nolock_(ev);
2586
2587
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2588
2589
0
  return (res);
2590
0
}
2591
2592
/* Implementation function to add an event.  Works just like event_add,
2593
 * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
2594
 * we treat tv as an absolute time, not as an interval to add to the current
2595
 * time */
2596
int
2597
event_add_nolock_(struct event *ev, const struct timeval *tv,
2598
    int tv_is_absolute)
2599
23.5M
{
2600
23.5M
  struct event_base *base = ev->ev_base;
2601
23.5M
  int res = 0;
2602
23.5M
  int notify = 0;
2603
2604
23.5M
  EVENT_BASE_ASSERT_LOCKED(base);
2605
23.5M
  event_debug_assert_is_setup_(ev);
2606
2607
23.5M
  event_debug((
2608
23.5M
     "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2609
23.5M
     ev,
2610
23.5M
     EV_SOCK_ARG(ev->ev_fd),
2611
23.5M
     ev->ev_events & EV_READ ? "EV_READ " : " ",
2612
23.5M
     ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2613
23.5M
     ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2614
23.5M
     tv ? "EV_TIMEOUT " : " ",
2615
23.5M
     ev->ev_callback));
2616
2617
23.5M
  EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2618
2619
23.5M
  if (ev->ev_flags & EVLIST_FINALIZING) {
  Branch (2619:6): [True: 0, False: 23.5M]
2620
    /* XXXX debug */
2621
0
    return (-1);
2622
0
  }
2623
2624
  /*
2625
   * prepare for timeout insertion further below, if we get a
2626
   * failure on any step, we should not change any state.
2627
   */
2628
23.5M
  if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
  Branch (2628:6): [True: 18.8M, False: 4.73M]
  Branch (2628:20): [True: 7.07M, False: 11.7M]
2629
7.07M
    if (min_heap_reserve_(&base->timeheap,
  Branch (2629:7): [True: 0, False: 7.07M]
2630
7.07M
      1 + min_heap_size_(&base->timeheap)) == -1)
2631
0
      return (-1);  /* ENOMEM == errno */
2632
7.07M
  }
2633
2634
  /* If the main thread is currently executing a signal event's
2635
   * callback, and we are not the main thread, then we want to wait
2636
   * until the callback is done before we mess with the event, or else
2637
   * we can race on ev_ncalls and ev_pncalls below. */
2638
23.5M
#ifndef EVENT__DISABLE_THREAD_SUPPORT
2639
23.5M
  if (base->current_event == event_to_event_callback(ev) &&
  Branch (2639:6): [True: 7.06M, False: 16.5M]
2640
23.5M
      (ev->ev_events & EV_SIGNAL)
  Branch (2640:6): [True: 0, False: 7.06M]
2641
23.5M
      && !EVBASE_IN_THREAD(base)) {
2642
0
    ++base->current_event_waiters;
2643
0
    EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2644
0
  }
2645
23.5M
#endif
2646
2647
23.5M
  if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
  Branch (2647:6): [True: 23.5M, False: 0]
2648
23.5M
      !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
  Branch (2648:6): [True: 9.45M, False: 14.1M]
2649
9.45M
    if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
  Branch (2649:7): [True: 9.45M, False: 0]
2650
9.45M
      res = evmap_io_add_(base, ev->ev_fd, ev);
2651
0
    else if (ev->ev_events & EV_SIGNAL)
  Branch (2651:12): [True: 0, False: 0]
2652
0
      res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2653
9.45M
    if (res != -1)
  Branch (2653:7): [True: 9.45M, False: 0]
2654
9.45M
      event_queue_insert_inserted(base, ev);
2655
9.45M
    if (res == 1) {
  Branch (2655:7): [True: 9.45M, False: 0]
2656
      /* evmap says we need to notify the main thread. */
2657
9.45M
      notify = 1;
2658
9.45M
      res = 0;
2659
9.45M
    }
2660
9.45M
  }
2661
2662
  /*
2663
   * we should change the timeout state only if the previous event
2664
   * addition succeeded.
2665
   */
2666
23.5M
  if (res != -1 && tv != NULL) {
  Branch (2666:6): [True: 23.5M, False: 0]
  Branch (2666:19): [True: 18.8M, False: 4.73M]
2667
18.8M
    struct timeval now;
2668
18.8M
    int common_timeout;
2669
#ifdef USE_REINSERT_TIMEOUT
2670
    int was_common;
2671
    int old_timeout_idx;
2672
#endif
2673
2674
    /*
2675
     * for persistent timeout events, we remember the
2676
     * timeout value and re-add the event.
2677
     *
2678
     * If tv_is_absolute, this was already set.
2679
     */
2680
18.8M
    if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
  Branch (2680:7): [True: 18.8M, False: 0]
  Branch (2680:53): [True: 11.7M, False: 7.06M]
2681
11.7M
      ev->ev_io_timeout = *tv;
2682
2683
18.8M
#ifndef USE_REINSERT_TIMEOUT
2684
18.8M
    if (ev->ev_flags & EVLIST_TIMEOUT) {
  Branch (2684:7): [True: 11.7M, False: 7.07M]
2685
11.7M
      event_queue_remove_timeout(base, ev);
2686
11.7M
    }
2687
18.8M
#endif
2688
2689
    /* Check if it is active due to a timeout.  Rescheduling
2690
     * this timeout before the callback can be executed
2691
     * removes it from the active list. */
2692
18.8M
    if ((ev->ev_flags & EVLIST_ACTIVE) &&
  Branch (2692:7): [True: 0, False: 18.8M]
2693
18.8M
        (ev->ev_res & EV_TIMEOUT)) {
  Branch (2693:7): [True: 0, False: 0]
2694
0
      if (ev->ev_events & EV_SIGNAL) {
  Branch (2694:8): [True: 0, False: 0]
2695
        /* See if we are just active executing
2696
         * this event in a loop
2697
         */
2698
0
        if (ev->ev_ncalls && ev->ev_pncalls) {
  Branch (2698:9): [True: 0, False: 0]
  Branch (2698:26): [True: 0, False: 0]
2699
          /* Abort loop */
2700
0
          *ev->ev_pncalls = 0;
2701
0
        }
2702
0
      }
2703
2704
0
      event_queue_remove_active(base, event_to_event_callback(ev));
2705
0
    }
2706
2707
18.8M
    gettime(base, &now);
2708
2709
18.8M
    common_timeout = is_common_timeout(tv, base);
2710
#ifdef USE_REINSERT_TIMEOUT
2711
    was_common = is_common_timeout(&ev->ev_timeout, base);
2712
    old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2713
#endif
2714
2715
18.8M
    if (tv_is_absolute) {
  Branch (2715:7): [True: 7.06M, False: 11.7M]
2716
7.06M
      ev->ev_timeout = *tv;
2717
11.7M
    } else if (common_timeout) {
  Branch (2717:14): [True: 0, False: 11.7M]
2718
0
      struct timeval tmp = *tv;
2719
0
      tmp.tv_usec &= MICROSECONDS_MASK;
2720
0
      evutil_timeradd(&now, &tmp, &ev->ev_timeout);
  Branch (2720:4): [True: 0, False: 0]
  Branch (2720:4): [Folded - Ignored]
2721
0
      ev->ev_timeout.tv_usec |=
2722
0
          (tv->tv_usec & ~MICROSECONDS_MASK);
2723
11.7M
    } else {
2724
11.7M
      evutil_timeradd(&now, tv, &ev->ev_timeout);
  Branch (2724:4): [True: 0, False: 11.7M]
  Branch (2724:4): [Folded - Ignored]
2725
11.7M
    }
2726
2727
18.8M
    event_debug((
2728
18.8M
       "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2729
18.8M
       ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2730
2731
#ifdef USE_REINSERT_TIMEOUT
2732
    event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2733
#else
2734
18.8M
    event_queue_insert_timeout(base, ev);
2735
18.8M
#endif
2736
2737
18.8M
    if (common_timeout) {
  Branch (2737:7): [True: 0, False: 18.8M]
2738
0
      struct common_timeout_list *ctl =
2739
0
          get_common_timeout_list(base, &ev->ev_timeout);
2740
0
      if (ev == TAILQ_FIRST(&ctl->events)) {
  Branch (2740:8): [True: 0, False: 0]
2741
0
        common_timeout_schedule(ctl, &now, ev);
2742
0
      }
2743
18.8M
    } else {
2744
18.8M
      struct event* top = NULL;
2745
      /* See if the earliest timeout is now earlier than it
2746
       * was before: if so, we will need to tell the main
2747
       * thread to wake up earlier than it would otherwise.
2748
       * We double check the timeout of the top element to
2749
       * handle time distortions due to system suspension.
2750
       */
2751
18.8M
      if (min_heap_elt_is_top_(ev))
  Branch (2751:8): [True: 14.1M, False: 4.71M]
2752
14.1M
        notify = 1;
2753
4.71M
      else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
  Branch (2753:13): [True: 4.71M, False: 0]
2754
4.71M
           evutil_timercmp(&top->ev_timeout, &now, <))
2755
0
        notify = 1;
2756
18.8M
    }
2757
18.8M
  }
2758
2759
  /* if we are not in the right thread, we need to wake up the loop */
2760
23.5M
  if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
  Branch (2760:6): [True: 23.5M, False: 0]
  Branch (2760:19): [True: 21.2M, False: 2.35M]
2761
0
    evthread_notify_base(base);
2762
2763
23.5M
  event_debug_note_add_(ev);
2764
2765
23.5M
  return (res);
2766
23.5M
}
2767
2768
static int
2769
event_del_(struct event *ev, int blocking)
2770
28.2M
{
2771
28.2M
  int res;
2772
28.2M
  struct event_base *base = ev->ev_base;
2773
2774
28.2M
  if (EVUTIL_FAILURE_CHECK(!base)) {
2775
0
    event_warnx("%s: event has no event_base set.", __func__);
2776
0
    return -1;
2777
0
  }
2778
2779
28.2M
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2780
28.2M
  res = event_del_nolock_(ev, blocking);
2781
28.2M
  EVBASE_RELEASE_LOCK(base, th_base_lock);
2782
2783
28.2M
  return (res);
2784
28.2M
}
2785
2786
int
2787
event_del(struct event *ev)
2788
28.2M
{
2789
28.2M
  return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2790
28.2M
}
2791
2792
int
2793
event_del_block(struct event *ev)
2794
0
{
2795
0
  return event_del_(ev, EVENT_DEL_BLOCK);
2796
0
}
2797
2798
int
2799
event_del_noblock(struct event *ev)
2800
0
{
2801
0
  return event_del_(ev, EVENT_DEL_NOBLOCK);
2802
0
}
2803
2804
/** Helper for event_del: always called with th_base_lock held.
2805
 *
2806
 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2807
 * EVEN_IF_FINALIZING} values. See those for more information.
2808
 */
2809
int
2810
event_del_nolock_(struct event *ev, int blocking)
2811
35.3M
{
2812
35.3M
  struct event_base *base;
2813
35.3M
  int res = 0, notify = 0;
2814
2815
35.3M
  event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2816
35.3M
    ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2817
2818
  /* An event without a base has not been added */
2819
35.3M
  if (ev->ev_base == NULL)
  Branch (2819:6): [True: 0, False: 35.3M]
2820
0
    return (-1);
2821
2822
35.3M
  EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2823
2824
35.3M
  if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
  Branch (2824:6): [True: 35.3M, False: 8.93k]
2825
35.3M
    if (ev->ev_flags & EVLIST_FINALIZING) {
  Branch (2825:7): [True: 0, False: 35.3M]
2826
      /* XXXX Debug */
2827
0
      return 0;
2828
0
    }
2829
35.3M
  }
2830
2831
35.3M
  base = ev->ev_base;
2832
2833
35.3M
  EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2834
2835
  /* See if we are just active executing this event in a loop */
2836
35.3M
  if (ev->ev_events & EV_SIGNAL) {
  Branch (2836:6): [True: 0, False: 35.3M]
2837
0
    if (ev->ev_ncalls && ev->ev_pncalls) {
  Branch (2837:7): [True: 0, False: 0]
  Branch (2837:24): [True: 0, False: 0]
2838
      /* Abort loop */
2839
0
      *ev->ev_pncalls = 0;
2840
0
    }
2841
0
  }
2842
2843
35.3M
  if (ev->ev_flags & EVLIST_TIMEOUT) {
  Branch (2843:6): [True: 7.07M, False: 28.2M]
2844
    /* NOTE: We never need to notify the main thread because of a
2845
     * deleted timeout event: all that could happen if we don't is
2846
     * that the dispatch loop might wake up too early.  But the
2847
     * point of notifying the main thread _is_ to wake up the
2848
     * dispatch loop early anyway, so we wouldn't gain anything by
2849
     * doing it.
2850
     */
2851
7.07M
    event_queue_remove_timeout(base, ev);
2852
7.07M
  }
2853
2854
35.3M
  if (ev->ev_flags & EVLIST_ACTIVE)
  Branch (2854:6): [True: 2.36M, False: 33.0M]
2855
2.36M
    event_queue_remove_active(base, event_to_event_callback(ev));
2856
33.0M
  else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
  Branch (2856:11): [True: 0, False: 33.0M]
2857
0
    event_queue_remove_active_later(base, event_to_event_callback(ev));
2858
2859
35.3M
  if (ev->ev_flags & EVLIST_INSERTED) {
  Branch (2859:6): [True: 9.45M, False: 25.9M]
2860
9.45M
    event_queue_remove_inserted(base, ev);
2861
9.45M
    if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
  Branch (2861:7): [True: 9.45M, False: 0]
2862
9.45M
      res = evmap_io_del_(base, ev->ev_fd, ev);
2863
0
    else
2864
0
      res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2865
9.45M
    if (res == 1) {
  Branch (2865:7): [True: 9.45M, False: 0]
2866
      /* evmap says we need to notify the main thread. */
2867
9.45M
      notify = 1;
2868
9.45M
      res = 0;
2869
9.45M
    }
2870
    /* If we do not have events, let's notify event base so it can
2871
     * exit without waiting */
2872
9.45M
    if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
  Branch (2872:7): [True: 13.2k, False: 9.44M]
  Branch (2872:34): [True: 13.2k, False: 0]
2873
13.2k
      notify = 1;
2874
9.45M
  }
2875
2876
  /* if we are not in the right thread, we need to wake up the loop */
2877
35.3M
  if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
  Branch (2877:6): [True: 35.3M, False: 0]
  Branch (2877:19): [True: 9.45M, False: 25.9M]
2878
11.0k
    evthread_notify_base(base);
2879
2880
35.3M
  event_debug_note_del_(ev);
2881
2882
  /* If the main thread is currently executing this event's callback,
2883
   * and we are not the main thread, then we want to wait until the
2884
   * callback is done before returning. That way, when this function
2885
   * returns, it will be safe to free the user-supplied argument.
2886
   */
2887
35.3M
#ifndef EVENT__DISABLE_THREAD_SUPPORT
2888
35.3M
  if (blocking != EVENT_DEL_NOBLOCK &&
  Branch (2888:6): [True: 30.6M, False: 4.71M]
2889
35.3M
      base->current_event == event_to_event_callback(ev) &&
  Branch (2889:6): [True: 16.4M, False: 14.1M]
2890
35.3M
      !EVBASE_IN_THREAD(base) &&
2891
35.3M
      (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
  Branch (2891:7): [True: 0, False: 0]
  Branch (2891:38): [True: 0, False: 0]
2892
0
    ++base->current_event_waiters;
2893
0
    EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2894
0
  }
2895
35.3M
#endif
2896
2897
35.3M
  return (res);
2898
35.3M
}
2899
2900
void
2901
event_active(struct event *ev, int res, short ncalls)
2902
2.35M
{
2903
2.35M
  if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2904
0
    event_warnx("%s: event has no event_base set.", __func__);
2905
0
    return;
2906
0
  }
2907
2908
2.35M
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2909
2910
2.35M
  event_debug_assert_is_setup_(ev);
2911
2912
2.35M
  event_active_nolock_(ev, res, ncalls);
2913
2914
2.35M
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2915
2.35M
}
2916
2917
2918
void
2919
event_active_nolock_(struct event *ev, int res, short ncalls)
2920
14.1M
{
2921
14.1M
  struct event_base *base;
2922
2923
14.1M
  event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2924
14.1M
    ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2925
2926
14.1M
  base = ev->ev_base;
2927
14.1M
  EVENT_BASE_ASSERT_LOCKED(base);
2928
2929
14.1M
  if (ev->ev_flags & EVLIST_FINALIZING) {
  Branch (2929:6): [True: 0, False: 14.1M]
2930
    /* XXXX debug */
2931
0
    return;
2932
0
  }
2933
2934
14.1M
  switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2935
0
  default:
  Branch (2935:2): [True: 0, False: 14.1M]
2936
0
  case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
  Branch (2936:2): [True: 0, False: 14.1M]
2937
0
    EVUTIL_ASSERT(0);
2938
0
    break;
2939
0
  case EVLIST_ACTIVE:
  Branch (2939:2): [True: 0, False: 14.1M]
2940
    /* We get different kinds of events, add them together */
2941
0
    ev->ev_res |= res;
2942
0
    return;
2943
0
  case EVLIST_ACTIVE_LATER:
  Branch (2943:2): [True: 0, False: 14.1M]
2944
0
    ev->ev_res |= res;
2945
0
    break;
2946
14.1M
  case 0:
  Branch (2946:2): [True: 14.1M, False: 0]
2947
14.1M
    ev->ev_res = res;
2948
14.1M
    break;
2949
14.1M
  }
2950
2951
14.1M
  if (ev->ev_pri < base->event_running_priority)
  Branch (2951:6): [True: 0, False: 14.1M]
2952
0
    base->event_continue = 1;
2953
2954
14.1M
  if (ev->ev_events & EV_SIGNAL) {
  Branch (2954:6): [True: 0, False: 14.1M]
2955
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
2956
0
    if (base->current_event == event_to_event_callback(ev) &&
  Branch (2956:7): [True: 0, False: 0]
2957
0
        !EVBASE_IN_THREAD(base)) {
2958
0
      ++base->current_event_waiters;
2959
0
      EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2960
0
    }
2961
0
#endif
2962
0
    ev->ev_ncalls = ncalls;
2963
0
    ev->ev_pncalls = NULL;
2964
0
  }
2965
2966
14.1M
  event_callback_activate_nolock_(base, event_to_event_callback(ev));
2967
14.1M
}
2968
2969
void
2970
event_active_later_(struct event *ev, int res)
2971
0
{
2972
0
  EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2973
0
  event_active_later_nolock_(ev, res);
2974
0
  EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2975
0
}
2976
2977
void
2978
event_active_later_nolock_(struct event *ev, int res)
2979
0
{
2980
0
  struct event_base *base = ev->ev_base;
2981
0
  EVENT_BASE_ASSERT_LOCKED(base);
2982
2983
0
  if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
  Branch (2983:6): [True: 0, False: 0]
2984
    /* We get different kinds of events, add them together */
2985
0
    ev->ev_res |= res;
2986
0
    return;
2987
0
  }
2988
2989
0
  ev->ev_res = res;
2990
2991
0
  event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2992
0
}
2993
2994
int
2995
event_callback_activate_(struct event_base *base,
2996
    struct event_callback *evcb)
2997
0
{
2998
0
  int r;
2999
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3000
0
  r = event_callback_activate_nolock_(base, evcb);
3001
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3002
0
  return r;
3003
0
}
3004
3005
int
3006
event_callback_activate_nolock_(struct event_base *base,
3007
    struct event_callback *evcb)
3008
16.5M
{
3009
16.5M
  int r = 1;
3010
3011
16.5M
  if (evcb->evcb_flags & EVLIST_FINALIZING)
  Branch (3011:6): [True: 0, False: 16.5M]
3012
0
    return 0;
3013
3014
16.5M
  switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3015
0
  default:
  Branch (3015:2): [True: 0, False: 16.5M]
3016
0
    EVUTIL_ASSERT(0);
3017
0
    EVUTIL_FALLTHROUGH;
3018
0
  case EVLIST_ACTIVE_LATER:
  Branch (3018:2): [True: 0, False: 16.5M]
3019
0
    event_queue_remove_active_later(base, evcb);
3020
0
    r = 0;
3021
0
    break;
3022
0
  case EVLIST_ACTIVE:
  Branch (3022:2): [True: 0, False: 16.5M]
3023
0
    return 0;
3024
16.5M
  case 0:
  Branch (3024:2): [True: 16.5M, False: 0]
3025
16.5M
    break;
3026
16.5M
  }
3027
3028
16.5M
  event_queue_insert_active(base, evcb);
3029
3030
16.5M
  if (EVBASE_NEED_NOTIFY(base))
3031
2.36M
    evthread_notify_base(base);
3032
3033
16.5M
  return r;
3034
16.5M
}
3035
3036
int
3037
event_callback_activate_later_nolock_(struct event_base *base,
3038
    struct event_callback *evcb)
3039
0
{
3040
0
  if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
  Branch (3040:6): [True: 0, False: 0]
3041
0
    return 0;
3042
3043
0
  event_queue_insert_active_later(base, evcb);
3044
0
  if (EVBASE_NEED_NOTIFY(base))
3045
0
    evthread_notify_base(base);
3046
0
  return 1;
3047
0
}
3048
3049
void
3050
event_callback_init_(struct event_base *base,
3051
    struct event_callback *cb)
3052
0
{
3053
0
  memset(cb, 0, sizeof(*cb));
3054
0
  cb->evcb_pri = base->nactivequeues - 1;
3055
0
}
3056
3057
int
3058
event_callback_cancel_(struct event_base *base,
3059
    struct event_callback *evcb)
3060
4.71M
{
3061
4.71M
  int r;
3062
4.71M
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3063
4.71M
  r = event_callback_cancel_nolock_(base, evcb, 0);
3064
4.71M
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3065
4.71M
  return r;
3066
4.71M
}
3067
3068
int
3069
event_callback_cancel_nolock_(struct event_base *base,
3070
    struct event_callback *evcb, int even_if_finalizing)
3071
9.43M
{
3072
9.43M
  if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
  Branch (3072:6): [True: 0, False: 9.43M]
  Branch (3072:48): [True: 0, False: 0]
3073
0
    return 0;
3074
3075
9.43M
  if (evcb->evcb_flags & EVLIST_INIT)
  Branch (3075:6): [True: 2.35M, False: 7.07M]
3076
2.35M
    return event_del_nolock_(event_callback_to_event(evcb),
3077
2.35M
        even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
  Branch (3077:7): [True: 0, False: 2.35M]
3078
3079
7.07M
  switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3080
0
  default:
  Branch (3080:2): [True: 0, False: 7.07M]
3081
0
  case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
  Branch (3081:2): [True: 0, False: 7.07M]
3082
0
    EVUTIL_ASSERT(0);
3083
0
    break;
3084
0
  case EVLIST_ACTIVE:
  Branch (3084:2): [True: 0, False: 7.07M]
3085
    /* We get different kinds of events, add them together */
3086
0
    event_queue_remove_active(base, evcb);
3087
0
    return 0;
3088
0
  case EVLIST_ACTIVE_LATER:
  Branch (3088:2): [True: 0, False: 7.07M]
3089
0
    event_queue_remove_active_later(base, evcb);
3090
0
    break;
3091
7.07M
  case 0:
  Branch (3091:2): [True: 7.07M, False: 0]
3092
7.07M
    break;
3093
7.07M
  }
3094
3095
7.07M
  return 0;
3096
7.07M
}
3097
3098
void
3099
event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3100
4.71M
{
3101
4.71M
  memset(cb, 0, sizeof(*cb));
3102
4.71M
  cb->evcb_cb_union.evcb_selfcb = fn;
3103
4.71M
  cb->evcb_arg = arg;
3104
4.71M
  cb->evcb_pri = priority;
3105
4.71M
  cb->evcb_closure = EV_CLOSURE_CB_SELF;
3106
4.71M
}
3107
3108
void
3109
event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3110
0
{
3111
0
  cb->evcb_pri = priority;
3112
0
}
3113
3114
void
3115
event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3116
4.71M
{
3117
4.71M
  if (!base)
  Branch (3117:6): [True: 0, False: 4.71M]
3118
0
    base = current_base;
3119
4.71M
  event_callback_cancel_(base, cb);
3120
4.71M
}
3121
3122
0
#define MAX_DEFERREDS_QUEUED 32
3123
int
3124
event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3125
0
{
3126
0
  int r = 1;
3127
0
  if (!base)
  Branch (3127:6): [True: 0, False: 0]
3128
0
    base = current_base;
3129
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3130
0
  if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
  Branch (3130:6): [True: 0, False: 0]
3131
0
    r = event_callback_activate_later_nolock_(base, cb);
3132
0
  } else {
3133
0
    r = event_callback_activate_nolock_(base, cb);
3134
0
    if (r) {
  Branch (3134:7): [True: 0, False: 0]
3135
0
      ++base->n_deferreds_queued;
3136
0
    }
3137
0
  }
3138
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3139
0
  return r;
3140
0
}
3141
3142
static int
3143
timeout_next(struct event_base *base, struct timeval **tv_p)
3144
11.8M
{
3145
  /* Caller must hold th_base_lock */
3146
11.8M
  struct timeval now;
3147
11.8M
  struct event *ev;
3148
11.8M
  struct timeval *tv = *tv_p;
3149
11.8M
  int res = 0;
3150
3151
11.8M
  ev = min_heap_top_(&base->timeheap);
3152
3153
11.8M
  if (ev == NULL) {
  Branch (3153:6): [True: 4.73M, False: 7.06M]
3154
    /* if no time-based events are active wait for I/O */
3155
4.73M
    *tv_p = NULL;
3156
4.73M
    goto out;
3157
4.73M
  }
3158
3159
7.06M
  if (gettime(base, &now) == -1) {
  Branch (3159:6): [True: 0, False: 7.06M]
3160
0
    res = -1;
3161
0
    goto out;
3162
0
  }
3163
3164
7.06M
  if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3165
0
    evutil_timerclear(tv);
3166
0
    goto out;
3167
0
  }
3168
3169
7.06M
  evutil_timersub(&ev->ev_timeout, &now, tv);
  Branch (3169:2): [True: 0, False: 7.06M]
  Branch (3169:2): [Folded - Ignored]
3170
3171
7.06M
  EVUTIL_ASSERT(tv->tv_sec >= 0);
3172
7.06M
  EVUTIL_ASSERT(tv->tv_usec >= 0);
3173
7.06M
  event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3174
3175
11.8M
out:
3176
11.8M
  return (res);
3177
7.06M
}
3178
3179
/* Activate every event whose timeout has elapsed. */
3180
static void
3181
timeout_process(struct event_base *base)
3182
11.7M
{
3183
  /* Caller must hold lock. */
3184
11.7M
  struct timeval now;
3185
11.7M
  struct event *ev;
3186
3187
11.7M
  if (min_heap_empty_(&base->timeheap)) {
  Branch (3187:6): [True: 4.72M, False: 7.06M]
3188
4.72M
    return;
3189
4.72M
  }
3190
3191
7.06M
  gettime(base, &now);
3192
3193
7.06M
  while ((ev = min_heap_top_(&base->timeheap))) {
  Branch (3193:9): [True: 7.06M, False: 0]
3194
7.06M
    if (evutil_timercmp(&ev->ev_timeout, &now, >))
3195
7.06M
      break;
3196
3197
    /* delete this event from the I/O queues */
3198
0
    event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3199
3200
0
    event_debug(("timeout_process: event: %p, call %p",
3201
0
       ev, ev->ev_callback));
3202
0
    event_active_nolock_(ev, EV_TIMEOUT, 1);
3203
0
  }
3204
7.06M
}
3205
3206
#ifndef MAX
3207
61.3M
#define MAX(a,b) (((a)>(b))?(a):(b))
3208
#endif
3209
3210
61.3M
#define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3211
3212
/* These are a fancy way to spell
3213
     if (~flags & EVLIST_INTERNAL)
3214
         base->event_count--/++;
3215
*/
3216
#define DECR_EVENT_COUNT(base,flags) \
3217
44.8M
  ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3218
44.8M
#define INCR_EVENT_COUNT(base,flags) do {         \
3219
44.8M
  ((base)->event_count += !((flags) & EVLIST_INTERNAL));     \
3220
44.8M
  MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);   \
3221
44.8M
} while (0)
3222
3223
static void
3224
event_queue_remove_inserted(struct event_base *base, struct event *ev)
3225
9.45M
{
3226
9.45M
  EVENT_BASE_ASSERT_LOCKED(base);
3227
9.45M
  if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3228
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3229
0
        ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3230
0
    return;
3231
0
  }
3232
9.45M
  DECR_EVENT_COUNT(base, ev->ev_flags);
3233
9.45M
  ev->ev_flags &= ~EVLIST_INSERTED;
3234
9.45M
}
3235
static void
3236
event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3237
16.5M
{
3238
16.5M
  EVENT_BASE_ASSERT_LOCKED(base);
3239
16.5M
  if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3240
0
    event_errx(1, "%s: %p not on queue %x", __func__,
3241
0
         evcb, EVLIST_ACTIVE);
3242
0
    return;
3243
0
  }
3244
16.5M
  DECR_EVENT_COUNT(base, evcb->evcb_flags);
3245
16.5M
  evcb->evcb_flags &= ~EVLIST_ACTIVE;
3246
16.5M
  base->event_count_active--;
3247
3248
16.5M
  TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3249
16.5M
      evcb, evcb_active_next);
3250
16.5M
}
3251
static void
3252
event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3253
0
{
3254
0
  EVENT_BASE_ASSERT_LOCKED(base);
3255
0
  if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3256
0
    event_errx(1, "%s: %p not on queue %x", __func__,
3257
0
         evcb, EVLIST_ACTIVE_LATER);
3258
0
    return;
3259
0
  }
3260
0
  DECR_EVENT_COUNT(base, evcb->evcb_flags);
3261
0
  evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3262
0
  base->event_count_active--;
3263
3264
0
  TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3265
0
}
3266
static void
3267
event_queue_remove_timeout(struct event_base *base, struct event *ev)
3268
18.8M
{
3269
18.8M
  EVENT_BASE_ASSERT_LOCKED(base);
3270
18.8M
  if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3271
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3272
0
        ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3273
0
    return;
3274
0
  }
3275
18.8M
  DECR_EVENT_COUNT(base, ev->ev_flags);
3276
18.8M
  ev->ev_flags &= ~EVLIST_TIMEOUT;
3277
3278
18.8M
  if (is_common_timeout(&ev->ev_timeout, base)) {
  Branch (3278:6): [True: 0, False: 18.8M]
3279
0
    struct common_timeout_list *ctl =
3280
0
        get_common_timeout_list(base, &ev->ev_timeout);
3281
0
    TAILQ_REMOVE(&ctl->events, ev,
3282
0
        ev_timeout_pos.ev_next_with_common_timeout);
3283
18.8M
  } else {
3284
18.8M
    min_heap_erase_(&base->timeheap, ev);
3285
18.8M
  }
3286
18.8M
}
3287
3288
#ifdef USE_REINSERT_TIMEOUT
3289
/* Remove and reinsert 'ev' into the timeout queue. */
3290
static void
3291
event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3292
    int was_common, int is_common, int old_timeout_idx)
3293
{
3294
  struct common_timeout_list *ctl;
3295
  if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3296
    event_queue_insert_timeout(base, ev);
3297
    return;
3298
  }
3299
3300
  switch ((was_common<<1) | is_common) {
3301
  case 3: /* Changing from one common timeout to another */
3302
    ctl = base->common_timeout_queues[old_timeout_idx];
3303
    TAILQ_REMOVE(&ctl->events, ev,
3304
        ev_timeout_pos.ev_next_with_common_timeout);
3305
    ctl = get_common_timeout_list(base, &ev->ev_timeout);
3306
    insert_common_timeout_inorder(ctl, ev);
3307
    break;
3308
  case 2: /* Was common; is no longer common */
3309
    ctl = base->common_timeout_queues[old_timeout_idx];
3310
    TAILQ_REMOVE(&ctl->events, ev,
3311
        ev_timeout_pos.ev_next_with_common_timeout);
3312
    min_heap_push_(&base->timeheap, ev);
3313
    break;
3314
  case 1: /* Wasn't common; has become common. */
3315
    min_heap_erase_(&base->timeheap, ev);
3316
    ctl = get_common_timeout_list(base, &ev->ev_timeout);
3317
    insert_common_timeout_inorder(ctl, ev);
3318
    break;
3319
  case 0: /* was in heap; is still on heap. */
3320
    min_heap_adjust_(&base->timeheap, ev);
3321
    break;
3322
  default:
3323
    EVUTIL_ASSERT(0); /* unreachable */
3324
    break;
3325
  }
3326
}
3327
#endif
3328
3329
/* Add 'ev' to the common timeout list in 'ev'. */
3330
static void
3331
insert_common_timeout_inorder(struct common_timeout_list *ctl,
3332
    struct event *ev)
3333
0
{
3334
0
  struct event *e;
3335
  /* By all logic, we should just be able to append 'ev' to the end of
3336
   * ctl->events, since the timeout on each 'ev' is set to {the common
3337
   * timeout} + {the time when we add the event}, and so the events
3338
   * should arrive in order of their timeeouts.  But just in case
3339
   * there's some wacky threading issue going on, we do a search from
3340
   * the end of 'ev' to find the right insertion point.
3341
   */
3342
0
  TAILQ_FOREACH_REVERSE(e, &ctl->events,
3343
0
      event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3344
    /* This timercmp is a little sneaky, since both ev and e have
3345
     * magic values in tv_usec.  Fortunately, they ought to have
3346
     * the _same_ magic values in tv_usec.  Let's assert for that.
3347
     */
3348
0
    EVUTIL_ASSERT(
3349
0
      is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3350
0
    if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3351
0
      TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3352
0
          ev_timeout_pos.ev_next_with_common_timeout);
3353
0
      return;
3354
0
    }
3355
0
  }
3356
0
  TAILQ_INSERT_HEAD(&ctl->events, ev,
3357
0
      ev_timeout_pos.ev_next_with_common_timeout);
3358
0
}
3359
3360
static void
3361
event_queue_insert_inserted(struct event_base *base, struct event *ev)
3362
9.45M
{
3363
9.45M
  EVENT_BASE_ASSERT_LOCKED(base);
3364
3365
9.45M
  if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3366
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3367
0
        ev, EV_SOCK_ARG(ev->ev_fd));
3368
0
    return;
3369
0
  }
3370
3371
9.45M
  INCR_EVENT_COUNT(base, ev->ev_flags);
3372
3373
9.45M
  ev->ev_flags |= EVLIST_INSERTED;
3374
9.45M
}
3375
3376
static void
3377
event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3378
16.5M
{
3379
16.5M
  EVENT_BASE_ASSERT_LOCKED(base);
3380
3381
16.5M
  if (evcb->evcb_flags & EVLIST_ACTIVE) {
  Branch (3381:6): [True: 0, False: 16.5M]
3382
    /* Double insertion is possible for active events */
3383
0
    return;
3384
0
  }
3385
3386
16.5M
  INCR_EVENT_COUNT(base, evcb->evcb_flags);
3387
3388
16.5M
  evcb->evcb_flags |= EVLIST_ACTIVE;
3389
3390
16.5M
  base->event_count_active++;
3391
16.5M
  MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3392
16.5M
  EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3393
16.5M
  TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3394
16.5M
      evcb, evcb_active_next);
3395
16.5M
}
3396
3397
static void
3398
event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3399
0
{
3400
0
  EVENT_BASE_ASSERT_LOCKED(base);
3401
0
  if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
  Branch (3401:6): [True: 0, False: 0]
3402
    /* Double insertion is possible */
3403
0
    return;
3404
0
  }
3405
3406
0
  INCR_EVENT_COUNT(base, evcb->evcb_flags);
3407
0
  evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3408
0
  base->event_count_active++;
3409
0
  MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3410
0
  EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3411
0
  TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3412
0
}
3413
3414
static void
3415
event_queue_insert_timeout(struct event_base *base, struct event *ev)
3416
18.8M
{
3417
18.8M
  EVENT_BASE_ASSERT_LOCKED(base);
3418
3419
18.8M
  if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3420
0
    event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3421
0
        ev, EV_SOCK_ARG(ev->ev_fd));
3422
0
    return;
3423
0
  }
3424
3425
18.8M
  INCR_EVENT_COUNT(base, ev->ev_flags);
3426
3427
18.8M
  ev->ev_flags |= EVLIST_TIMEOUT;
3428
3429
18.8M
  if (is_common_timeout(&ev->ev_timeout, base)) {
  Branch (3429:6): [True: 0, False: 18.8M]
3430
0
    struct common_timeout_list *ctl =
3431
0
        get_common_timeout_list(base, &ev->ev_timeout);
3432
0
    insert_common_timeout_inorder(ctl, ev);
3433
18.8M
  } else {
3434
18.8M
    min_heap_push_(&base->timeheap, ev);
3435
18.8M
  }
3436
18.8M
}
3437
3438
static void
3439
event_queue_make_later_events_active(struct event_base *base)
3440
11.7M
{
3441
11.7M
  struct event_callback *evcb;
3442
11.7M
  EVENT_BASE_ASSERT_LOCKED(base);
3443
3444
11.7M
  while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
  Branch (3444:9): [True: 0, False: 11.7M]
3445
0
    TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3446
0
    evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3447
0
    EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3448
0
    TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3449
0
    base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3450
0
  }
3451
11.7M
}
3452
3453
/* Functions for debugging */
3454
3455
const char *
3456
event_get_version(void)
3457
0
{
3458
0
  return (EVENT__VERSION);
3459
0
}
3460
3461
ev_uint32_t
3462
event_get_version_number(void)
3463
9.43M
{
3464
9.43M
  return (EVENT__NUMERIC_VERSION);
3465
9.43M
}
3466
3467
/*
3468
 * No thread-safe interface needed - the information should be the same
3469
 * for all threads.
3470
 */
3471
3472
const char *
3473
event_get_method(void)
3474
0
{
3475
0
  return (current_base->evsel->name);
3476
0
}
3477
3478
#ifndef EVENT__DISABLE_MM_REPLACEMENT
3479
static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3480
static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3481
static void (*mm_free_fn_)(void *p) = NULL;
3482
3483
void *
3484
event_mm_malloc_(size_t sz)
3485
30.7M
{
3486
30.7M
  if (sz == 0)
  Branch (3486:6): [True: 0, False: 30.7M]
3487
0
    return NULL;
3488
3489
30.7M
  if (mm_malloc_fn_)
  Branch (3489:6): [True: 0, False: 30.7M]
3490
0
    return mm_malloc_fn_(sz);
3491
30.7M
  else
3492
30.7M
    return malloc(sz);
3493
30.7M
}
3494
3495
void *
3496
event_mm_calloc_(size_t count, size_t size)
3497
54.3M
{
3498
54.3M
  if (count == 0 || size == 0)
  Branch (3498:6): [True: 0, False: 54.3M]
  Branch (3498:20): [True: 0, False: 54.3M]
3499
0
    return NULL;
3500
3501
54.3M
  if (mm_malloc_fn_) {
  Branch (3501:6): [True: 0, False: 54.3M]
3502
0
    size_t sz = count * size;
3503
0
    void *p = NULL;
3504
0
    if (count > EV_SIZE_MAX / size)
  Branch (3504:7): [True: 0, False: 0]
3505
0
      goto error;
3506
0
    p = mm_malloc_fn_(sz);
3507
0
    if (p)
  Branch (3507:7): [True: 0, False: 0]
3508
0
      return memset(p, 0, sz);
3509
54.3M
  } else {
3510
54.3M
    void *p = calloc(count, size);
3511
#ifdef _WIN32
3512
    /* Windows calloc doesn't reliably set ENOMEM */
3513
    if (p == NULL)
3514
      goto error;
3515
#endif
3516
54.3M
    return p;
3517
54.3M
  }
3518
3519
0
error:
3520
0
  errno = ENOMEM;
3521
0
  return NULL;
3522
54.3M
}
3523
3524
char *
3525
event_mm_strdup_(const char *str)
3526
54.2M
{
3527
54.2M
  if (!str) {
  Branch (3527:6): [True: 0, False: 54.2M]
3528
0
    errno = EINVAL;
3529
0
    return NULL;
3530
0
  }
3531
3532
54.2M
  if (mm_malloc_fn_) {
  Branch (3532:6): [True: 0, False: 54.2M]
3533
0
    size_t ln = strlen(str);
3534
0
    void *p = NULL;
3535
0
    if (ln == EV_SIZE_MAX)
  Branch (3535:7): [True: 0, False: 0]
3536
0
      goto error;
3537
0
    p = mm_malloc_fn_(ln+1);
3538
0
    if (p)
  Branch (3538:7): [True: 0, False: 0]
3539
0
      return memcpy(p, str, ln+1);
3540
0
  } else
3541
#ifdef _WIN32
3542
    return _strdup(str);
3543
#else
3544
54.2M
    return strdup(str);
3545
0
#endif
3546
3547
0
error:
3548
0
  errno = ENOMEM;
3549
0
  return NULL;
3550
54.2M
}
3551
3552
void *
3553
event_mm_realloc_(void *ptr, size_t sz)
3554
33.2k
{
3555
33.2k
  if (mm_realloc_fn_)
  Branch (3555:6): [True: 0, False: 33.2k]
3556
0
    return mm_realloc_fn_(ptr, sz);
3557
33.2k
  else
3558
33.2k
    return realloc(ptr, sz);
3559
33.2k
}
3560
3561
void
3562
event_mm_free_(void *ptr)
3563
139M
{
3564
139M
  if (mm_free_fn_)
  Branch (3564:6): [True: 0, False: 139M]
3565
0
    mm_free_fn_(ptr);
3566
139M
  else
3567
139M
    free(ptr);
3568
139M
}
3569
3570
void
3571
event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3572
      void *(*realloc_fn)(void *ptr, size_t sz),
3573
      void (*free_fn)(void *ptr))
3574
0
{
3575
0
  mm_malloc_fn_ = malloc_fn;
3576
0
  mm_realloc_fn_ = realloc_fn;
3577
0
  mm_free_fn_ = free_fn;
3578
0
}
3579
#endif
3580
3581
#ifdef EVENT__HAVE_EVENTFD
3582
static void
3583
evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3584
2.36M
{
3585
2.36M
  ev_uint64_t msg;
3586
2.36M
  ev_ssize_t r;
3587
2.36M
  struct event_base *base = arg;
3588
3589
2.36M
  r = read(fd, (void*) &msg, sizeof(msg));
3590
2.36M
  if (r<0 && errno != EAGAIN) {
  Branch (3590:6): [True: 0, False: 2.36M]
  Branch (3590:13): [True: 0, False: 0]
3591
0
    event_sock_warn(fd, "Error reading from eventfd");
3592
0
  }
3593
2.36M
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3594
2.36M
  base->is_notify_pending = 0;
3595
2.36M
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3596
2.36M
}
3597
#endif
3598
3599
static void
3600
evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3601
0
{
3602
0
  unsigned char buf[1024];
3603
0
  struct event_base *base = arg;
3604
#ifdef _WIN32
3605
  while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3606
    ;
3607
#else
3608
0
  while (read(fd, (char*)buf, sizeof(buf)) > 0)
  Branch (3608:9): [True: 0, False: 0]
3609
0
    ;
3610
0
#endif
3611
3612
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3613
0
  base->is_notify_pending = 0;
3614
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3615
0
}
3616
3617
int
3618
evthread_make_base_notifiable(struct event_base *base)
3619
11.0k
{
3620
11.0k
  int r;
3621
11.0k
  if (!base)
  Branch (3621:6): [True: 0, False: 11.0k]
3622
0
    return -1;
3623
3624
11.0k
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3625
11.0k
  r = evthread_make_base_notifiable_nolock_(base);
3626
11.0k
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3627
11.0k
  return r;
3628
11.0k
}
3629
3630
static int
3631
evthread_make_base_notifiable_nolock_(struct event_base *base)
3632
11.0k
{
3633
11.0k
  void (*cb)(evutil_socket_t, short, void *);
3634
11.0k
  int (*notify)(struct event_base *);
3635
3636
11.0k
  if (base->th_notify_fn != NULL) {
  Branch (3636:6): [True: 0, False: 11.0k]
3637
    /* The base is already notifiable: we're doing fine. */
3638
0
    return 0;
3639
0
  }
3640
3641
#if defined(EVENT__HAVE_WORKING_KQUEUE)
3642
  if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3643
    base->th_notify_fn = event_kq_notify_base_;
3644
    /* No need to add an event here; the backend can wake
3645
     * itself up just fine. */
3646
    return 0;
3647
  }
3648
#endif
3649
3650
11.0k
#ifdef EVENT__HAVE_EVENTFD
3651
11.0k
  base->th_notify_fd[0] = evutil_eventfd_(0,
3652
11.0k
      EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3653
11.0k
  if (base->th_notify_fd[0] >= 0) {
  Branch (3653:6): [True: 11.0k, False: 0]
3654
11.0k
    base->th_notify_fd[1] = -1;
3655
11.0k
    notify = evthread_notify_base_eventfd;
3656
11.0k
    cb = evthread_notify_drain_eventfd;
3657
11.0k
  } else
3658
0
#endif
3659
0
  if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
  Branch (3659:6): [True: 0, False: 0]
3660
0
    notify = evthread_notify_base_default;
3661
0
    cb = evthread_notify_drain_default;
3662
0
  } else {
3663
0
    return -1;
3664
0
  }
3665
3666
11.0k
  base->th_notify_fn = notify;
3667
3668
  /* prepare an event that we can use for wakeup */
3669
11.0k
  event_assign(&base->th_notify, base, base->th_notify_fd[0],
3670
11.0k
         EV_READ|EV_PERSIST, cb, base);
3671
3672
  /* we need to mark this as internal event */
3673
11.0k
  base->th_notify.ev_flags |= EVLIST_INTERNAL;
3674
11.0k
  event_priority_set(&base->th_notify, 0);
3675
3676
11.0k
  return event_add_nolock_(&base->th_notify, NULL, 0);
3677
11.0k
}
3678
3679
int
3680
event_base_foreach_event_nolock_(struct event_base *base,
3681
    event_base_foreach_event_cb fn, void *arg)
3682
0
{
3683
0
  int r, i;
3684
0
  unsigned u;
3685
0
  struct event *ev;
3686
3687
  /* Start out with all the EVLIST_INSERTED events. */
3688
0
  if ((r = evmap_foreach_event_(base, fn, arg)))
  Branch (3688:6): [True: 0, False: 0]
3689
0
    return r;
3690
3691
  /* Okay, now we deal with those events that have timeouts and are in
3692
   * the min-heap. */
3693
0
  for (u = 0; u < base->timeheap.n; ++u) {
  Branch (3693:14): [True: 0, False: 0]
3694
0
    ev = base->timeheap.p[u];
3695
0
    if (ev->ev_flags & EVLIST_INSERTED) {
  Branch (3695:7): [True: 0, False: 0]
3696
      /* we already processed this one */
3697
0
      continue;
3698
0
    }
3699
0
    if ((r = fn(base, ev, arg)))
  Branch (3699:7): [True: 0, False: 0]
3700
0
      return r;
3701
0
  }
3702
3703
  /* Now for the events in one of the timeout queues.
3704
   * the min-heap. */
3705
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
  Branch (3705:14): [True: 0, False: 0]
3706
0
    struct common_timeout_list *ctl =
3707
0
        base->common_timeout_queues[i];
3708
0
    TAILQ_FOREACH(ev, &ctl->events,
3709
0
        ev_timeout_pos.ev_next_with_common_timeout) {
3710
0
      if (ev->ev_flags & EVLIST_INSERTED) {
  Branch (3710:8): [True: 0, False: 0]
3711
        /* we already processed this one */
3712
0
        continue;
3713
0
      }
3714
0
      if ((r = fn(base, ev, arg)))
  Branch (3714:8): [True: 0, False: 0]
3715
0
        return r;
3716
0
    }
3717
0
  }
3718
3719
  /* Finally, we deal wit all the active events that we haven't touched
3720
   * yet. */
3721
0
  for (i = 0; i < base->nactivequeues; ++i) {
  Branch (3721:14): [True: 0, False: 0]
3722
0
    struct event_callback *evcb;
3723
0
    TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3724
0
      if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
  Branch (3724:8): [True: 0, False: 0]
3725
        /* This isn't an event (evlist_init clear), or
3726
         * we already processed it. (inserted or
3727
         * timeout set */
3728
0
        continue;
3729
0
      }
3730
0
      ev = event_callback_to_event(evcb);
3731
0
      if ((r = fn(base, ev, arg)))
  Branch (3731:8): [True: 0, False: 0]
3732
0
        return r;
3733
0
    }
3734
0
  }
3735
3736
0
  return 0;
3737
0
}
3738
3739
/* Helper for event_base_dump_events: called on each event in the event base;
3740
 * dumps only the inserted events. */
3741
static int
3742
dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3743
0
{
3744
0
  FILE *output = arg;
3745
0
  const char *gloss = (e->ev_events & EV_SIGNAL) ?
  Branch (3745:22): [True: 0, False: 0]
3746
0
      "sig" : "fd ";
3747
3748
0
  if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
  Branch (3748:6): [True: 0, False: 0]
3749
0
    return 0;
3750
3751
0
  fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3752
0
      (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3753
0
      (e->ev_events&EV_READ)?" Read":"",
  Branch (3753:6): [True: 0, False: 0]
3754
0
      (e->ev_events&EV_WRITE)?" Write":"",
  Branch (3754:6): [True: 0, False: 0]
3755
0
      (e->ev_events&EV_CLOSED)?" EOF":"",
  Branch (3755:6): [True: 0, False: 0]
3756
0
      (e->ev_events&EV_SIGNAL)?" Signal":"",
  Branch (3756:6): [True: 0, False: 0]
3757
0
      (e->ev_events&EV_PERSIST)?" Persist":"",
  Branch (3757:6): [True: 0, False: 0]
3758
0
      (e->ev_events&EV_ET)?" ET":"",
  Branch (3758:6): [True: 0, False: 0]
3759
0
      (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
  Branch (3759:6): [True: 0, False: 0]
3760
0
  if (e->ev_flags & EVLIST_TIMEOUT) {
  Branch (3760:6): [True: 0, False: 0]
3761
0
    struct timeval tv;
3762
0
    tv.tv_sec = e->ev_timeout.tv_sec;
3763
0
    tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3764
0
    evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
  Branch (3764:3): [True: 0, False: 0]
  Branch (3764:3): [Folded - Ignored]
3765
0
    fprintf(output, " Timeout=%ld.%06d",
3766
0
        (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3767
0
  }
3768
0
  fputc('\n', output);
3769
3770
0
  return 0;
3771
0
}
3772
3773
/* Helper for event_base_dump_events: called on each event in the event base;
3774
 * dumps only the active events. */
3775
static int
3776
dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3777
0
{
3778
0
  FILE *output = arg;
3779
0
  const char *gloss = (e->ev_events & EV_SIGNAL) ?
  Branch (3779:22): [True: 0, False: 0]
3780
0
      "sig" : "fd ";
3781
3782
0
  if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
  Branch (3782:6): [True: 0, False: 0]
3783
0
    return 0;
3784
3785
0
  fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3786
0
      (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3787
0
      (e->ev_res&EV_READ)?" Read":"",
  Branch (3787:6): [True: 0, False: 0]
3788
0
      (e->ev_res&EV_WRITE)?" Write":"",
  Branch (3788:6): [True: 0, False: 0]
3789
0
      (e->ev_res&EV_CLOSED)?" EOF":"",
  Branch (3789:6): [True: 0, False: 0]
3790
0
      (e->ev_res&EV_SIGNAL)?" Signal":"",
  Branch (3790:6): [True: 0, False: 0]
3791
0
      (e->ev_res&EV_TIMEOUT)?" Timeout":"",
  Branch (3791:6): [True: 0, False: 0]
3792
0
      (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
  Branch (3792:6): [True: 0, False: 0]
3793
0
      (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
  Branch (3793:6): [True: 0, False: 0]
3794
3795
0
  return 0;
3796
0
}
3797
3798
int
3799
event_base_foreach_event(struct event_base *base,
3800
    event_base_foreach_event_cb fn, void *arg)
3801
0
{
3802
0
  int r;
3803
0
  if ((!fn) || (!base)) {
  Branch (3803:6): [True: 0, False: 0]
  Branch (3803:15): [True: 0, False: 0]
3804
0
    return -1;
3805
0
  }
3806
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3807
0
  r = event_base_foreach_event_nolock_(base, fn, arg);
3808
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3809
0
  return r;
3810
0
}
3811
3812
3813
void
3814
event_base_dump_events(struct event_base *base, FILE *output)
3815
0
{
3816
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3817
0
  fprintf(output, "Inserted events:\n");
3818
0
  event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3819
3820
0
  fprintf(output, "Active events:\n");
3821
0
  event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3822
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3823
0
}
3824
3825
void
3826
event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3827
0
{
3828
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3829
3830
  /* Activate any non timer events */
3831
0
  if (!(events & EV_TIMEOUT)) {
  Branch (3831:6): [True: 0, False: 0]
3832
0
    evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3833
0
  } else {
3834
    /* If we want to activate timer events, loop and activate each event with
3835
     * the same fd in both the timeheap and common timeouts list */
3836
0
    int i;
3837
0
    unsigned u;
3838
0
    struct event *ev;
3839
3840
0
    for (u = 0; u < base->timeheap.n; ++u) {
  Branch (3840:15): [True: 0, False: 0]
3841
0
      ev = base->timeheap.p[u];
3842
0
      if (ev->ev_fd == fd) {
  Branch (3842:8): [True: 0, False: 0]
3843
0
        event_active_nolock_(ev, EV_TIMEOUT, 1);
3844
0
      }
3845
0
    }
3846
3847
0
    for (i = 0; i < base->n_common_timeouts; ++i) {
  Branch (3847:15): [True: 0, False: 0]
3848
0
      struct common_timeout_list *ctl = base->common_timeout_queues[i];
3849
0
      TAILQ_FOREACH(ev, &ctl->events,
3850
0
        ev_timeout_pos.ev_next_with_common_timeout) {
3851
0
        if (ev->ev_fd == fd) {
  Branch (3851:9): [True: 0, False: 0]
3852
0
          event_active_nolock_(ev, EV_TIMEOUT, 1);
3853
0
        }
3854
0
      }
3855
0
    }
3856
0
  }
3857
3858
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3859
0
}
3860
3861
void
3862
event_base_active_by_signal(struct event_base *base, int sig)
3863
0
{
3864
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3865
0
  evmap_signal_active_(base, sig, 1);
3866
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3867
0
}
3868
3869
3870
void
3871
event_base_add_virtual_(struct event_base *base)
3872
0
{
3873
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3874
0
  base->virtual_event_count++;
3875
0
  MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3876
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3877
0
}
3878
3879
void
3880
event_base_del_virtual_(struct event_base *base)
3881
0
{
3882
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3883
0
  EVUTIL_ASSERT(base->virtual_event_count > 0);
3884
0
  base->virtual_event_count--;
3885
0
  if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
  Branch (3885:6): [True: 0, False: 0]
3886
0
    evthread_notify_base(base);
3887
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3888
0
}
3889
3890
static void
3891
event_free_debug_globals_locks(void)
3892
0
{
3893
0
#ifndef EVENT__DISABLE_THREAD_SUPPORT
3894
0
#ifndef EVENT__DISABLE_DEBUG_MODE
3895
0
  if (event_debug_map_lock_ != NULL) {
  Branch (3895:6): [True: 0, False: 0]
3896
0
    EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3897
0
    event_debug_map_lock_ = NULL;
3898
0
    evthreadimpl_disable_lock_debugging_();
3899
0
  }
3900
0
#endif /* EVENT__DISABLE_DEBUG_MODE */
3901
0
#endif /* EVENT__DISABLE_THREAD_SUPPORT */
3902
0
  return;
3903
0
}
3904
3905
static void
3906
event_free_debug_globals(void)
3907
0
{
3908
0
  event_free_debug_globals_locks();
3909
0
}
3910
3911
static void
3912
event_free_evsig_globals(void)
3913
0
{
3914
0
  evsig_free_globals_();
3915
0
}
3916
3917
static void
3918
event_free_evutil_globals(void)
3919
0
{
3920
0
  evutil_free_globals_();
3921
0
}
3922
3923
static void
3924
event_free_globals(void)
3925
0
{
3926
0
  event_free_debug_globals();
3927
0
  event_free_evsig_globals();
3928
0
  event_free_evutil_globals();
3929
0
}
3930
3931
void
3932
libevent_global_shutdown(void)
3933
0
{
3934
0
  event_disable_debug_mode();
3935
0
  event_free_globals();
3936
0
}
3937
3938
#ifndef EVENT__DISABLE_THREAD_SUPPORT
3939
int
3940
event_global_setup_locks_(const int enable_locks)
3941
11.0k
{
3942
11.0k
#ifndef EVENT__DISABLE_DEBUG_MODE
3943
11.0k
  EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3944
11.0k
#endif
3945
11.0k
  if (evsig_global_setup_locks_(enable_locks) < 0)
  Branch (3945:6): [True: 0, False: 11.0k]
3946
0
    return -1;
3947
11.0k
  if (evutil_global_setup_locks_(enable_locks) < 0)
  Branch (3947:6): [True: 0, False: 11.0k]
3948
0
    return -1;
3949
11.0k
  if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
  Branch (3949:6): [True: 0, False: 11.0k]
3950
0
    return -1;
3951
11.0k
  return 0;
3952
11.0k
}
3953
#endif
3954
3955
void
3956
event_base_assert_ok_(struct event_base *base)
3957
0
{
3958
0
  EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3959
0
  event_base_assert_ok_nolock_(base);
3960
0
  EVBASE_RELEASE_LOCK(base, th_base_lock);
3961
0
}
3962
3963
void
3964
event_base_assert_ok_nolock_(struct event_base *base)
3965
0
{
3966
0
  int i;
3967
0
  int count;
3968
3969
  /* First do checks on the per-fd and per-signal lists */
3970
0
  evmap_check_integrity_(base);
3971
3972
  /* Check the heap property */
3973
0
  for (i = 1; i < (int)base->timeheap.n; ++i) {
  Branch (3973:14): [True: 0, False: 0]
3974
0
    int parent = (i - 1) / 2;
3975
0
    struct event *ev, *p_ev;
3976
0
    ev = base->timeheap.p[i];
3977
0
    p_ev = base->timeheap.p[parent];
3978
0
    EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3979
0
    EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3980
0
    EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3981
0
  }
3982
3983
  /* Check that the common timeouts are fine */
3984
0
  for (i = 0; i < base->n_common_timeouts; ++i) {
  Branch (3984:14): [True: 0, False: 0]
3985
0
    struct common_timeout_list *ctl = base->common_timeout_queues[i];
3986
0
    struct event *last=NULL, *ev;
3987
3988
0
    EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3989
3990
0
    TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3991
0
      if (last)
  Branch (3991:8): [True: 0, False: 0]
3992
0
        EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3993
0
      EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3994
0
      EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3995
0
      EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3996
0
      last = ev;
3997
0
    }
3998
0
  }
3999
4000
  /* Check the active queues. */
4001
0
  count = 0;
4002
0
  for (i = 0; i < base->nactivequeues; ++i) {
  Branch (4002:14): [True: 0, False: 0]
4003
0
    struct event_callback *evcb;
4004
0
    EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4005
0
    TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4006
0
      EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4007
0
      EVUTIL_ASSERT(evcb->evcb_pri == i);
4008
0
      ++count;
4009
0
    }
4010
0
  }
4011
4012
0
  {
4013
0
    struct event_callback *evcb;
4014
0
    TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4015
0
      EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4016
0
      ++count;
4017
0
    }
4018
0
  }
4019
0
  EVUTIL_ASSERT(count == base->event_count_active);
4020
0
}