LCOV - code coverage report
Current view: top level - ipc/chromium/src/third_party/libevent - event.c (source / functions) Hit Total Coverage
Test: output.info Lines: 414 1680 24.6 %
Date: 2017-07-14 16:53:18 Functions: 40 173 23.1 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
       3             :  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
       4             :  *
       5             :  * Redistribution and use in source and binary forms, with or without
       6             :  * modification, are permitted provided that the following conditions
       7             :  * are met:
       8             :  * 1. Redistributions of source code must retain the above copyright
       9             :  *    notice, this list of conditions and the following disclaimer.
      10             :  * 2. Redistributions in binary form must reproduce the above copyright
      11             :  *    notice, this list of conditions and the following disclaimer in the
      12             :  *    documentation and/or other materials provided with the distribution.
      13             :  * 3. The name of the author may not be used to endorse or promote products
      14             :  *    derived from this software without specific prior written permission.
      15             :  *
      16             :  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
      17             :  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
      18             :  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
      19             :  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
      20             :  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
      21             :  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      22             :  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      23             :  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      24             :  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
      25             :  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      26             :  */
      27             : #include "event2/event-config.h"
      28             : #include "evconfig-private.h"
      29             : 
      30             : #ifdef _WIN32
      31             : #include <winsock2.h>
      32             : #define WIN32_LEAN_AND_MEAN
      33             : #include <windows.h>
      34             : #undef WIN32_LEAN_AND_MEAN
      35             : #endif
      36             : #include <sys/types.h>
      37             : #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
      38             : #include <sys/time.h>
      39             : #endif
      40             : #include <sys/queue.h>
      41             : #ifdef EVENT__HAVE_SYS_SOCKET_H
      42             : #include <sys/socket.h>
      43             : #endif
      44             : #include <stdio.h>
      45             : #include <stdlib.h>
      46             : #ifdef EVENT__HAVE_UNISTD_H
      47             : #include <unistd.h>
      48             : #endif
      49             : #include <ctype.h>
      50             : #include <errno.h>
      51             : #include <signal.h>
      52             : #include <string.h>
      53             : #include <time.h>
      54             : #include <limits.h>
      55             : 
      56             : #include "event2/event.h"
      57             : #include "event2/event_struct.h"
      58             : #include "event2/event_compat.h"
      59             : #include "event-internal.h"
      60             : #include "defer-internal.h"
      61             : #include "evthread-internal.h"
      62             : #include "event2/thread.h"
      63             : #include "event2/util.h"
      64             : #include "log-internal.h"
      65             : #include "evmap-internal.h"
      66             : #include "iocp-internal.h"
      67             : #include "changelist-internal.h"
      68             : #define HT_NO_CACHE_HASH_VALUES
      69             : #include "ht-internal.h"
      70             : #include "util-internal.h"
      71             : 
      72             : 
      73             : #ifdef EVENT__HAVE_WORKING_KQUEUE
      74             : #include "kqueue-internal.h"
      75             : #endif
      76             : 
      77             : #ifdef EVENT__HAVE_EVENT_PORTS
      78             : extern const struct eventop evportops;
      79             : #endif
      80             : #ifdef EVENT__HAVE_SELECT
      81             : extern const struct eventop selectops;
      82             : #endif
      83             : #ifdef EVENT__HAVE_POLL
      84             : extern const struct eventop pollops;
      85             : #endif
      86             : #ifdef EVENT__HAVE_EPOLL
      87             : extern const struct eventop epollops;
      88             : #endif
      89             : #ifdef EVENT__HAVE_WORKING_KQUEUE
      90             : extern const struct eventop kqops;
      91             : #endif
      92             : #ifdef EVENT__HAVE_DEVPOLL
      93             : extern const struct eventop devpollops;
      94             : #endif
      95             : #ifdef _WIN32
      96             : extern const struct eventop win32ops;
      97             : #endif
      98             : 
      99             : /* Array of backends in order of preference. */
     100             : static const struct eventop *eventops[] = {
     101             : #ifdef EVENT__HAVE_EVENT_PORTS
     102             :         &evportops,
     103             : #endif
     104             : #ifdef EVENT__HAVE_WORKING_KQUEUE
     105             :         &kqops,
     106             : #endif
     107             : #ifdef EVENT__HAVE_EPOLL
     108             :         &epollops,
     109             : #endif
     110             : #ifdef EVENT__HAVE_DEVPOLL
     111             :         &devpollops,
     112             : #endif
     113             : #ifdef EVENT__HAVE_POLL
     114             :         &pollops,
     115             : #endif
     116             : #ifdef EVENT__HAVE_SELECT
     117             :         &selectops,
     118             : #endif
     119             : #ifdef _WIN32
     120             :         &win32ops,
     121             : #endif
     122             :         NULL
     123             : };
     124             : 
     125             : /* Global state; deprecated */
     126             : struct event_base *event_global_current_base_ = NULL;
     127             : #define current_base event_global_current_base_
     128             : 
     129             : /* Global state */
     130             : 
     131             : static void *event_self_cbarg_ptr_ = NULL;
     132             : 
     133             : /* Prototypes */
     134             : static void     event_queue_insert_active(struct event_base *, struct event_callback *);
     135             : static void     event_queue_insert_active_later(struct event_base *, struct event_callback *);
     136             : static void     event_queue_insert_timeout(struct event_base *, struct event *);
     137             : static void     event_queue_insert_inserted(struct event_base *, struct event *);
     138             : static void     event_queue_remove_active(struct event_base *, struct event_callback *);
     139             : static void     event_queue_remove_active_later(struct event_base *, struct event_callback *);
     140             : static void     event_queue_remove_timeout(struct event_base *, struct event *);
     141             : static void     event_queue_remove_inserted(struct event_base *, struct event *);
     142             : static void event_queue_make_later_events_active(struct event_base *base);
     143             : 
     144             : static int evthread_make_base_notifiable_nolock_(struct event_base *base);
     145             : static int event_del_(struct event *ev, int blocking);
     146             : 
     147             : #ifdef USE_REINSERT_TIMEOUT
     148             : /* This code seems buggy; only turn it on if we find out what the trouble is. */
     149             : static void     event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
     150             : #endif
     151             : 
     152             : static int      event_haveevents(struct event_base *);
     153             : 
     154             : static int      event_process_active(struct event_base *);
     155             : 
     156             : static int      timeout_next(struct event_base *, struct timeval **);
     157             : static void     timeout_process(struct event_base *);
     158             : 
     159             : static inline void      event_signal_closure(struct event_base *, struct event *ev);
     160             : static inline void      event_persist_closure(struct event_base *, struct event *ev);
     161             : 
     162             : static int      evthread_notify_base(struct event_base *base);
     163             : 
     164             : static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
     165             :     struct event *ev);
     166             : 
     167             : #ifndef EVENT__DISABLE_DEBUG_MODE
     168             : /* These functions implement a hashtable of which 'struct event *' structures
     169             :  * have been setup or added.  We don't want to trust the content of the struct
     170             :  * event itself, since we're trying to work through cases where an event gets
     171             :  * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
     172             :  */
     173             : 
     174             : struct event_debug_entry {
     175             :         HT_ENTRY(event_debug_entry) node;
     176             :         const struct event *ptr;
     177             :         unsigned added : 1;
     178             : };
     179             : 
     180             : static inline unsigned
     181           0 : hash_debug_entry(const struct event_debug_entry *e)
     182             : {
     183             :         /* We need to do this silliness to convince compilers that we
     184             :          * honestly mean to cast e->ptr to an integer, and discard any
     185             :          * part of it that doesn't fit in an unsigned.
     186             :          */
     187           0 :         unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
     188             :         /* Our hashtable implementation is pretty sensitive to low bits,
     189             :          * and every struct event is over 64 bytes in size, so we can
     190             :          * just say >>6. */
     191           0 :         return (u >> 6);
     192             : }
     193             : 
     194             : static inline int
     195           0 : eq_debug_entry(const struct event_debug_entry *a,
     196             :     const struct event_debug_entry *b)
     197             : {
     198           0 :         return a->ptr == b->ptr;
     199             : }
     200             : 
     201             : int event_debug_mode_on_ = 0;
     202             : 
     203             : 
     204             : #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
     205             : /**
     206             :  * @brief debug mode variable which is set for any function/structure that needs
     207             :  *        to be shared across threads (if thread support is enabled).
     208             :  *
     209             :  *        When and if evthreads are initialized, this variable will be evaluated,
     210             :  *        and if set to something other than zero, this means the evthread setup 
     211             :  *        functions were called out of order.
     212             :  *
     213             :  *        See: "Locks and threading" in the documentation.
     214             :  */
     215             : int event_debug_created_threadable_ctx_ = 0;
     216             : #endif
     217             : 
     218             : /* Set if it's too late to enable event_debug_mode. */
     219             : static int event_debug_mode_too_late = 0;
     220             : #ifndef EVENT__DISABLE_THREAD_SUPPORT
     221             : static void *event_debug_map_lock_ = NULL;
     222             : #endif
     223             : static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
     224             :         HT_INITIALIZER();
     225             : 
     226           0 : HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
     227             :     eq_debug_entry)
     228           0 : HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
     229             :     eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
     230             : 
     231             : /* Macro: record that ev is now setup (that is, ready for an add) */
     232             : #define event_debug_note_setup_(ev) do {                                \
     233             :         if (event_debug_mode_on_) {                                     \
     234             :                 struct event_debug_entry *dent,find;                    \
     235             :                 find.ptr = (ev);                                        \
     236             :                 EVLOCK_LOCK(event_debug_map_lock_, 0);                  \
     237             :                 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
     238             :                 if (dent) {                                             \
     239             :                         dent->added = 0;                             \
     240             :                 } else {                                                \
     241             :                         dent = mm_malloc(sizeof(*dent));                \
     242             :                         if (!dent)                                      \
     243             :                                 event_err(1,                            \
     244             :                                     "Out of memory in debugging code");       \
     245             :                         dent->ptr = (ev);                            \
     246             :                         dent->added = 0;                             \
     247             :                         HT_INSERT(event_debug_map, &global_debug_map, dent); \
     248             :                 }                                                       \
     249             :                 EVLOCK_UNLOCK(event_debug_map_lock_, 0);                \
     250             :         }                                                               \
     251             :         event_debug_mode_too_late = 1;                                  \
     252             :         } while (0)
     253             : /* Macro: record that ev is no longer setup */
     254             : #define event_debug_note_teardown_(ev) do {                             \
     255             :         if (event_debug_mode_on_) {                                     \
     256             :                 struct event_debug_entry *dent,find;                    \
     257             :                 find.ptr = (ev);                                        \
     258             :                 EVLOCK_LOCK(event_debug_map_lock_, 0);                  \
     259             :                 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
     260             :                 if (dent)                                               \
     261             :                         mm_free(dent);                                  \
     262             :                 EVLOCK_UNLOCK(event_debug_map_lock_, 0);                \
     263             :         }                                                               \
     264             :         event_debug_mode_too_late = 1;                                  \
     265             :         } while (0)
     266             : /* Macro: record that ev is now added */
     267             : #define event_debug_note_add_(ev)       do {                            \
     268             :         if (event_debug_mode_on_) {                                     \
     269             :                 struct event_debug_entry *dent,find;                    \
     270             :                 find.ptr = (ev);                                        \
     271             :                 EVLOCK_LOCK(event_debug_map_lock_, 0);                  \
     272             :                 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
     273             :                 if (dent) {                                             \
     274             :                         dent->added = 1;                             \
     275             :                 } else {                                                \
     276             :                         event_errx(EVENT_ERR_ABORT_,                    \
     277             :                             "%s: noting an add on a non-setup event %p" \
     278             :                             " (events: 0x%x, fd: "EV_SOCK_FMT         \
     279             :                             ", flags: 0x%x)",                         \
     280             :                             __func__, (ev), (ev)->ev_events,         \
     281             :                             EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);    \
     282             :                 }                                                       \
     283             :                 EVLOCK_UNLOCK(event_debug_map_lock_, 0);                \
     284             :         }                                                               \
     285             :         event_debug_mode_too_late = 1;                                  \
     286             :         } while (0)
     287             : /* Macro: record that ev is no longer added */
     288             : #define event_debug_note_del_(ev) do {                                  \
     289             :         if (event_debug_mode_on_) {                                     \
     290             :                 struct event_debug_entry *dent,find;                    \
     291             :                 find.ptr = (ev);                                        \
     292             :                 EVLOCK_LOCK(event_debug_map_lock_, 0);                  \
     293             :                 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
     294             :                 if (dent) {                                             \
     295             :                         dent->added = 0;                             \
     296             :                 } else {                                                \
     297             :                         event_errx(EVENT_ERR_ABORT_,                    \
     298             :                             "%s: noting a del on a non-setup event %p"        \
     299             :                             " (events: 0x%x, fd: "EV_SOCK_FMT         \
     300             :                             ", flags: 0x%x)",                         \
     301             :                             __func__, (ev), (ev)->ev_events,         \
     302             :                             EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);    \
     303             :                 }                                                       \
     304             :                 EVLOCK_UNLOCK(event_debug_map_lock_, 0);                \
     305             :         }                                                               \
     306             :         event_debug_mode_too_late = 1;                                  \
     307             :         } while (0)
     308             : /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
     309             : #define event_debug_assert_is_setup_(ev) do {                           \
     310             :         if (event_debug_mode_on_) {                                     \
     311             :                 struct event_debug_entry *dent,find;                    \
     312             :                 find.ptr = (ev);                                        \
     313             :                 EVLOCK_LOCK(event_debug_map_lock_, 0);                  \
     314             :                 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
     315             :                 if (!dent) {                                            \
     316             :                         event_errx(EVENT_ERR_ABORT_,                    \
     317             :                             "%s called on a non-initialized event %p" \
     318             :                             " (events: 0x%x, fd: "EV_SOCK_FMT\
     319             :                             ", flags: 0x%x)",                         \
     320             :                             __func__, (ev), (ev)->ev_events,         \
     321             :                             EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);    \
     322             :                 }                                                       \
     323             :                 EVLOCK_UNLOCK(event_debug_map_lock_, 0);                \
     324             :         }                                                               \
     325             :         } while (0)
     326             : /* Macro: assert that ev is not added (i.e., okay to tear down or set
     327             :  * up again) */
     328             : #define event_debug_assert_not_added_(ev) do {                          \
     329             :         if (event_debug_mode_on_) {                                     \
     330             :                 struct event_debug_entry *dent,find;                    \
     331             :                 find.ptr = (ev);                                        \
     332             :                 EVLOCK_LOCK(event_debug_map_lock_, 0);                  \
     333             :                 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
     334             :                 if (dent && dent->added) {                           \
     335             :                         event_errx(EVENT_ERR_ABORT_,                    \
     336             :                             "%s called on an already added event %p"  \
     337             :                             " (events: 0x%x, fd: "EV_SOCK_FMT", "   \
     338             :                             "flags: 0x%x)",                           \
     339             :                             __func__, (ev), (ev)->ev_events,         \
     340             :                             EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);    \
     341             :                 }                                                       \
     342             :                 EVLOCK_UNLOCK(event_debug_map_lock_, 0);                \
     343             :         }                                                               \
     344             :         } while (0)
     345             : #else
     346             : #define event_debug_note_setup_(ev) \
     347             :         ((void)0)
     348             : #define event_debug_note_teardown_(ev) \
     349             :         ((void)0)
     350             : #define event_debug_note_add_(ev) \
     351             :         ((void)0)
     352             : #define event_debug_note_del_(ev) \
     353             :         ((void)0)
     354             : #define event_debug_assert_is_setup_(ev) \
     355             :         ((void)0)
     356             : #define event_debug_assert_not_added_(ev) \
     357             :         ((void)0)
     358             : #endif
     359             : 
     360             : #define EVENT_BASE_ASSERT_LOCKED(base)          \
     361             :         EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
     362             : 
     363             : /* How often (in seconds) do we check for changes in wall clock time relative
     364             :  * to monotonic time?  Set this to -1 for 'never.' */
     365             : #define CLOCK_SYNC_INTERVAL 5
     366             : 
     367             : /** Set 'tp' to the current time according to 'base'.  We must hold the lock
     368             :  * on 'base'.  If there is a cached time, return it.  Otherwise, use
     369             :  * clock_gettime or gettimeofday as appropriate to find out the right time.
     370             :  * Return 0 on success, -1 on failure.
     371             :  */
     372             : static int
     373         654 : gettime(struct event_base *base, struct timeval *tp)
     374             : {
     375         654 :         EVENT_BASE_ASSERT_LOCKED(base);
     376             : 
     377         654 :         if (base->tv_cache.tv_sec) {
     378           0 :                 *tp = base->tv_cache;
     379           0 :                 return (0);
     380             :         }
     381             : 
     382         654 :         if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
     383           0 :                 return -1;
     384             :         }
     385             : 
     386        1308 :         if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
     387         654 :             < tp->tv_sec) {
     388             :                 struct timeval tv;
     389           6 :                 evutil_gettimeofday(&tv,NULL);
     390           6 :                 evutil_timersub(&tv, tp, &base->tv_clock_diff);
     391           6 :                 base->last_updated_clock_diff = tp->tv_sec;
     392             :         }
     393             : 
     394         654 :         return 0;
     395             : }
     396             : 
     397             : int
     398           0 : event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
     399             : {
     400             :         int r;
     401           0 :         if (!base) {
     402           0 :                 base = current_base;
     403           0 :                 if (!current_base)
     404           0 :                         return evutil_gettimeofday(tv, NULL);
     405             :         }
     406             : 
     407           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
     408           0 :         if (base->tv_cache.tv_sec == 0) {
     409           0 :                 r = evutil_gettimeofday(tv, NULL);
     410             :         } else {
     411           0 :                 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
     412           0 :                 r = 0;
     413             :         }
     414           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
     415           0 :         return r;
     416             : }
     417             : 
     418             : /** Make 'base' have no current cached time. */
     419             : static inline void
     420        1959 : clear_time_cache(struct event_base *base)
     421             : {
     422        1959 :         base->tv_cache.tv_sec = 0;
     423        1959 : }
     424             : 
     425             : /** Replace the cached time in 'base' with the current time. */
     426             : static inline void
     427         651 : update_time_cache(struct event_base *base)
     428             : {
     429         651 :         base->tv_cache.tv_sec = 0;
     430         651 :         if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
     431         651 :             gettime(base, &base->tv_cache);
     432         651 : }
     433             : 
     434             : int
     435           0 : event_base_update_cache_time(struct event_base *base)
     436             : {
     437             : 
     438           0 :         if (!base) {
     439           0 :                 base = current_base;
     440           0 :                 if (!current_base)
     441           0 :                         return -1;
     442             :         }
     443             : 
     444           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
     445           0 :         if (base->running_loop)
     446           0 :                 update_time_cache(base);
     447           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
     448           0 :         return 0;
     449             : }
     450             : 
     451             : static inline struct event *
     452         655 : event_callback_to_event(struct event_callback *evcb)
     453             : {
     454         655 :         EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
     455         655 :         return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
     456             : }
     457             : 
     458             : static inline struct event_callback *
     459         704 : event_to_event_callback(struct event *ev)
     460             : {
     461         704 :         return &ev->ev_evcallback;
     462             : }
     463             : 
     464             : struct event_base *
     465           0 : event_init(void)
     466             : {
     467           0 :         struct event_base *base = event_base_new_with_config(NULL);
     468             : 
     469           0 :         if (base == NULL) {
     470           0 :                 event_errx(1, "%s: Unable to construct event_base", __func__);
     471             :                 return NULL;
     472             :         }
     473             : 
     474           0 :         current_base = base;
     475             : 
     476           0 :         return (base);
     477             : }
     478             : 
     479             : struct event_base *
     480           3 : event_base_new(void)
     481             : {
     482           3 :         struct event_base *base = NULL;
     483           3 :         struct event_config *cfg = event_config_new();
     484           3 :         if (cfg) {
     485           3 :                 base = event_base_new_with_config(cfg);
     486           3 :                 event_config_free(cfg);
     487             :         }
     488           3 :         return base;
     489             : }
     490             : 
     491             : /** Return true iff 'method' is the name of a method that 'cfg' tells us to
     492             :  * avoid. */
     493             : static int
     494           3 : event_config_is_avoided_method(const struct event_config *cfg,
     495             :     const char *method)
     496             : {
     497             :         struct event_config_entry *entry;
     498             : 
     499           3 :         TAILQ_FOREACH(entry, &cfg->entries, next) {
     500           0 :                 if (entry->avoid_method != NULL &&
     501           0 :                     strcmp(entry->avoid_method, method) == 0)
     502           0 :                         return (1);
     503             :         }
     504             : 
     505           3 :         return (0);
     506             : }
     507             : 
     508             : /** Return true iff 'method' is disabled according to the environment. */
     509             : static int
     510           3 : event_is_method_disabled(const char *name)
     511             : {
     512             :         char environment[64];
     513             :         int i;
     514             : 
     515           3 :         evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
     516          18 :         for (i = 8; environment[i] != '\0'; ++i)
     517          15 :                 environment[i] = EVUTIL_TOUPPER_(environment[i]);
     518             :         /* Note that evutil_getenv_() ignores the environment entirely if
     519             :          * we're setuid */
     520           3 :         return (evutil_getenv_(environment) != NULL);
     521             : }
     522             : 
     523             : int
     524           0 : event_base_get_features(const struct event_base *base)
     525             : {
     526           0 :         return base->evsel->features;
     527             : }
     528             : 
     529             : void
     530           0 : event_enable_debug_mode(void)
     531             : {
     532             : #ifndef EVENT__DISABLE_DEBUG_MODE
     533           0 :         if (event_debug_mode_on_)
     534           0 :                 event_errx(1, "%s was called twice!", __func__);
     535           0 :         if (event_debug_mode_too_late)
     536           0 :                 event_errx(1, "%s must be called *before* creating any events "
     537             :                     "or event_bases",__func__);
     538             : 
     539           0 :         event_debug_mode_on_ = 1;
     540             : 
     541           0 :         HT_INIT(event_debug_map, &global_debug_map);
     542             : #endif
     543           0 : }
     544             : 
     545             : void
     546           0 : event_disable_debug_mode(void)
     547             : {
     548             : #ifndef EVENT__DISABLE_DEBUG_MODE
     549             :         struct event_debug_entry **ent, *victim;
     550             : 
     551           0 :         EVLOCK_LOCK(event_debug_map_lock_, 0);
     552           0 :         for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
     553           0 :                 victim = *ent;
     554           0 :                 ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
     555           0 :                 mm_free(victim);
     556             :         }
     557           0 :         HT_CLEAR(event_debug_map, &global_debug_map);
     558           0 :         EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
     559             : 
     560           0 :         event_debug_mode_on_  = 0;
     561             : #endif
     562           0 : }
     563             : 
     564             : struct event_base *
     565           3 : event_base_new_with_config(const struct event_config *cfg)
     566             : {
     567             :         int i;
     568             :         struct event_base *base;
     569             :         int should_check_environment;
     570             : 
     571             : #ifndef EVENT__DISABLE_DEBUG_MODE
     572           3 :         event_debug_mode_too_late = 1;
     573             : #endif
     574             : 
     575           3 :         if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
     576           0 :                 event_warn("%s: calloc", __func__);
     577           0 :                 return NULL;
     578             :         }
     579             : 
     580           3 :         if (cfg)
     581           3 :                 base->flags = cfg->flags;
     582             : 
     583           3 :         should_check_environment =
     584           3 :             !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
     585             : 
     586             :         {
     587             :                 struct timeval tmp;
     588           3 :                 int precise_time =
     589           3 :                     cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
     590             :                 int flags;
     591           3 :                 if (should_check_environment && !precise_time) {
     592           3 :                         precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
     593           3 :                         base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
     594             :                 }
     595           3 :                 flags = precise_time ? EV_MONOT_PRECISE : 0;
     596           3 :                 evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
     597             : 
     598           3 :                 gettime(base, &tmp);
     599             :         }
     600             : 
     601           3 :         min_heap_ctor_(&base->timeheap);
     602             : 
     603           3 :         base->sig.ev_signal_pair[0] = -1;
     604           3 :         base->sig.ev_signal_pair[1] = -1;
     605           3 :         base->th_notify_fd[0] = -1;
     606           3 :         base->th_notify_fd[1] = -1;
     607             : 
     608           3 :         TAILQ_INIT(&base->active_later_queue);
     609             : 
     610           3 :         evmap_io_initmap_(&base->io);
     611           3 :         evmap_signal_initmap_(&base->sigmap);
     612           3 :         event_changelist_init_(&base->changelist);
     613             : 
     614           3 :         base->evbase = NULL;
     615             : 
     616           3 :         if (cfg) {
     617           3 :                 memcpy(&base->max_dispatch_time,
     618           3 :                     &cfg->max_dispatch_interval, sizeof(struct timeval));
     619           3 :                 base->limit_callbacks_after_prio =
     620           3 :                     cfg->limit_callbacks_after_prio;
     621             :         } else {
     622           0 :                 base->max_dispatch_time.tv_sec = -1;
     623           0 :                 base->limit_callbacks_after_prio = 1;
     624             :         }
     625           3 :         if (cfg && cfg->max_dispatch_callbacks >= 0) {
     626           3 :                 base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
     627             :         } else {
     628           0 :                 base->max_dispatch_callbacks = INT_MAX;
     629             :         }
     630           6 :         if (base->max_dispatch_callbacks == INT_MAX &&
     631           3 :             base->max_dispatch_time.tv_sec == -1)
     632           3 :                 base->limit_callbacks_after_prio = INT_MAX;
     633             : 
     634           6 :         for (i = 0; eventops[i] && !base->evbase; i++) {
     635           3 :                 if (cfg != NULL) {
     636             :                         /* determine if this backend should be avoided */
     637           3 :                         if (event_config_is_avoided_method(cfg,
     638           3 :                                 eventops[i]->name))
     639           0 :                                 continue;
     640           6 :                         if ((eventops[i]->features & cfg->require_features)
     641           3 :                             != cfg->require_features)
     642           0 :                                 continue;
     643             :                 }
     644             : 
     645             :                 /* also obey the environment variables */
     646           6 :                 if (should_check_environment &&
     647           3 :                     event_is_method_disabled(eventops[i]->name))
     648           0 :                         continue;
     649             : 
     650           3 :                 base->evsel = eventops[i];
     651             : 
     652           3 :                 base->evbase = base->evsel->init(base);
     653             :         }
     654             : 
     655           3 :         if (base->evbase == NULL) {
     656           0 :                 event_warnx("%s: no event mechanism available",
     657             :                     __func__);
     658           0 :                 base->evsel = NULL;
     659           0 :                 event_base_free(base);
     660           0 :                 return NULL;
     661             :         }
     662             : 
     663           3 :         if (evutil_getenv_("EVENT_SHOW_METHOD"))
     664           0 :                 event_msgx("libevent using: %s", base->evsel->name);
     665             : 
     666             :         /* allocate a single active event queue */
     667           3 :         if (event_base_priority_init(base, 1) < 0) {
     668           0 :                 event_base_free(base);
     669           0 :                 return NULL;
     670             :         }
     671             : 
     672             :         /* prepare for threading */
     673             : 
     674             : #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
     675           3 :         event_debug_created_threadable_ctx_ = 1;
     676             : #endif
     677             : 
     678             : #ifndef EVENT__DISABLE_THREAD_SUPPORT
     679           3 :         if (EVTHREAD_LOCKING_ENABLED() &&
     680           0 :             (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
     681             :                 int r;
     682           0 :                 EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
     683           0 :                 EVTHREAD_ALLOC_COND(base->current_event_cond);
     684           0 :                 r = evthread_make_base_notifiable(base);
     685           0 :                 if (r<0) {
     686           0 :                         event_warnx("%s: Unable to make base notifiable.", __func__);
     687           0 :                         event_base_free(base);
     688           0 :                         return NULL;
     689             :                 }
     690             :         }
     691             : #endif
     692             : 
     693             : #ifdef _WIN32
     694             :         if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
     695             :                 event_base_start_iocp_(base, cfg->n_cpus_hint);
     696             : #endif
     697             : 
     698           3 :         return (base);
     699             : }
     700             : 
     701             : int
     702           0 : event_base_start_iocp_(struct event_base *base, int n_cpus)
     703             : {
     704             : #ifdef _WIN32
     705             :         if (base->iocp)
     706             :                 return 0;
     707             :         base->iocp = event_iocp_port_launch_(n_cpus);
     708             :         if (!base->iocp) {
     709             :                 event_warnx("%s: Couldn't launch IOCP", __func__);
     710             :                 return -1;
     711             :         }
     712             :         return 0;
     713             : #else
     714           0 :         return -1;
     715             : #endif
     716             : }
     717             : 
     718             : void
     719           0 : event_base_stop_iocp_(struct event_base *base)
     720             : {
     721             : #ifdef _WIN32
     722             :         int rv;
     723             : 
     724             :         if (!base->iocp)
     725             :                 return;
     726             :         rv = event_iocp_shutdown_(base->iocp, -1);
     727             :         EVUTIL_ASSERT(rv >= 0);
     728             :         base->iocp = NULL;
     729             : #endif
     730           0 : }
     731             : 
     732             : static int
     733           0 : event_base_cancel_single_callback_(struct event_base *base,
     734             :     struct event_callback *evcb,
     735             :     int run_finalizers)
     736             : {
     737           0 :         int result = 0;
     738             : 
     739           0 :         if (evcb->evcb_flags & EVLIST_INIT) {
     740           0 :                 struct event *ev = event_callback_to_event(evcb);
     741           0 :                 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
     742           0 :                         event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
     743           0 :                         result = 1;
     744             :                 }
     745             :         } else {
     746           0 :                 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
     747           0 :                 event_callback_cancel_nolock_(base, evcb, 1);
     748           0 :                 EVBASE_RELEASE_LOCK(base, th_base_lock);
     749           0 :                 result = 1;
     750             :         }
     751             : 
     752           0 :         if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
     753           0 :                 switch (evcb->evcb_closure) {
     754             :                 case EV_CLOSURE_EVENT_FINALIZE:
     755             :                 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
     756           0 :                         struct event *ev = event_callback_to_event(evcb);
     757           0 :                         ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
     758           0 :                         if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
     759           0 :                                 mm_free(ev);
     760           0 :                         break;
     761             :                 }
     762             :                 case EV_CLOSURE_CB_FINALIZE:
     763           0 :                         evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
     764           0 :                         break;
     765             :                 default:
     766           0 :                         break;
     767             :                 }
     768             :         }
     769           0 :         return result;
     770             : }
     771             : 
     772           0 : static int event_base_free_queues_(struct event_base *base, int run_finalizers)
     773             : {
     774           0 :         int deleted = 0, i;
     775             : 
     776           0 :         for (i = 0; i < base->nactivequeues; ++i) {
     777             :                 struct event_callback *evcb, *next;
     778           0 :                 for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
     779           0 :                         next = TAILQ_NEXT(evcb, evcb_active_next);
     780           0 :                         deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
     781           0 :                         evcb = next;
     782             :                 }
     783             :         }
     784             : 
     785             :         {
     786             :                 struct event_callback *evcb;
     787           0 :                 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
     788           0 :                         deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
     789             :                 }
     790             :         }
     791             : 
     792           0 :         return deleted;
     793             : }
     794             : 
     795             : static void
     796           0 : event_base_free_(struct event_base *base, int run_finalizers)
     797             : {
     798           0 :         int i, n_deleted=0;
     799             :         struct event *ev;
     800             :         /* XXXX grab the lock? If there is contention when one thread frees
     801             :          * the base, then the contending thread will be very sad soon. */
     802             : 
     803             :         /* event_base_free(NULL) is how to free the current_base if we
     804             :          * made it with event_init and forgot to hold a reference to it. */
     805           0 :         if (base == NULL && current_base)
     806           0 :                 base = current_base;
     807             :         /* Don't actually free NULL. */
     808           0 :         if (base == NULL) {
     809           0 :                 event_warnx("%s: no base to free", __func__);
     810           0 :                 return;
     811             :         }
     812             :         /* XXX(niels) - check for internal events first */
     813             : 
     814             : #ifdef _WIN32
     815             :         event_base_stop_iocp_(base);
     816             : #endif
     817             : 
     818             :         /* threading fds if we have them */
     819           0 :         if (base->th_notify_fd[0] != -1) {
     820           0 :                 event_del(&base->th_notify);
     821           0 :                 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
     822           0 :                 if (base->th_notify_fd[1] != -1)
     823           0 :                         EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
     824           0 :                 base->th_notify_fd[0] = -1;
     825           0 :                 base->th_notify_fd[1] = -1;
     826           0 :                 event_debug_unassign(&base->th_notify);
     827             :         }
     828             : 
     829             :         /* Delete all non-internal events. */
     830           0 :         evmap_delete_all_(base);
     831             : 
     832           0 :         while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
     833           0 :                 event_del(ev);
     834           0 :                 ++n_deleted;
     835             :         }
     836           0 :         for (i = 0; i < base->n_common_timeouts; ++i) {
     837           0 :                 struct common_timeout_list *ctl =
     838           0 :                     base->common_timeout_queues[i];
     839           0 :                 event_del(&ctl->timeout_event); /* Internal; doesn't count */
     840           0 :                 event_debug_unassign(&ctl->timeout_event);
     841           0 :                 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
     842           0 :                         struct event *next = TAILQ_NEXT(ev,
     843             :                             ev_timeout_pos.ev_next_with_common_timeout);
     844           0 :                         if (!(ev->ev_flags & EVLIST_INTERNAL)) {
     845           0 :                                 event_del(ev);
     846           0 :                                 ++n_deleted;
     847             :                         }
     848           0 :                         ev = next;
     849             :                 }
     850           0 :                 mm_free(ctl);
     851             :         }
     852           0 :         if (base->common_timeout_queues)
     853           0 :                 mm_free(base->common_timeout_queues);
     854             : 
     855           0 :         for (;;) {
     856             :                 /* For finalizers we can register yet another finalizer out from
     857             :                  * finalizer, and iff finalizer will be in active_later_queue we can
     858             :                  * add finalizer to activequeues, and we will have events in
     859             :                  * activequeues after this function returns, which is not what we want
     860             :                  * (we even have an assertion for this).
     861             :                  *
     862             :                  * A simple case is bufferevent with underlying (i.e. filters).
     863             :                  */
     864           0 :                 int i = event_base_free_queues_(base, run_finalizers);
     865           0 :                 if (!i) {
     866           0 :                         break;
     867             :                 }
     868           0 :                 n_deleted += i;
     869             :         }
     870             : 
     871           0 :         if (n_deleted)
     872           0 :                 event_debug(("%s: %d events were still set in base",
     873             :                         __func__, n_deleted));
     874             : 
     875           0 :         while (LIST_FIRST(&base->once_events)) {
     876           0 :                 struct event_once *eonce = LIST_FIRST(&base->once_events);
     877           0 :                 LIST_REMOVE(eonce, next_once);
     878           0 :                 mm_free(eonce);
     879             :         }
     880             : 
     881           0 :         if (base->evsel != NULL && base->evsel->dealloc != NULL)
     882           0 :                 base->evsel->dealloc(base);
     883             : 
     884           0 :         for (i = 0; i < base->nactivequeues; ++i)
     885           0 :                 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
     886             : 
     887           0 :         EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
     888           0 :         min_heap_dtor_(&base->timeheap);
     889             : 
     890           0 :         mm_free(base->activequeues);
     891             : 
     892           0 :         evmap_io_clear_(&base->io);
     893           0 :         evmap_signal_clear_(&base->sigmap);
     894           0 :         event_changelist_freemem_(&base->changelist);
     895             : 
     896           0 :         EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
     897           0 :         EVTHREAD_FREE_COND(base->current_event_cond);
     898             : 
     899             :         /* If we're freeing current_base, there won't be a current_base. */
     900           0 :         if (base == current_base)
     901           0 :                 current_base = NULL;
     902           0 :         mm_free(base);
     903             : }
     904             : 
     905             : void
     906           0 : event_base_free_nofinalize(struct event_base *base)
     907             : {
     908           0 :         event_base_free_(base, 0);
     909           0 : }
     910             : 
     911             : void
     912           0 : event_base_free(struct event_base *base)
     913             : {
     914           0 :         event_base_free_(base, 1);
     915           0 : }
     916             : 
     917             : /* Fake eventop; used to disable the backend temporarily inside event_reinit
     918             :  * so that we can call event_del() on an event without telling the backend.
     919             :  */
     920             : static int
     921           0 : nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
     922             :     short events, void *fdinfo)
     923             : {
     924           0 :         return 0;
     925             : }
     926             : const struct eventop nil_eventop = {
     927             :         "nil",
     928             :         NULL, /* init: unused. */
     929             :         NULL, /* add: unused. */
     930             :         nil_backend_del, /* del: used, so needs to be killed. */
     931             :         NULL, /* dispatch: unused. */
     932             :         NULL, /* dealloc: unused. */
     933             :         0, 0, 0
     934             : };
     935             : 
     936             : /* reinitialize the event base after a fork */
     937             : int
     938           0 : event_reinit(struct event_base *base)
     939             : {
     940             :         const struct eventop *evsel;
     941           0 :         int res = 0;
     942           0 :         int was_notifiable = 0;
     943           0 :         int had_signal_added = 0;
     944             : 
     945           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
     946             : 
     947           0 :         evsel = base->evsel;
     948             : 
     949             :         /* check if this event mechanism requires reinit on the backend */
     950           0 :         if (evsel->need_reinit) {
     951             :                 /* We're going to call event_del() on our notify events (the
     952             :                  * ones that tell about signals and wakeup events).  But we
     953             :                  * don't actually want to tell the backend to change its
     954             :                  * state, since it might still share some resource (a kqueue,
     955             :                  * an epoll fd) with the parent process, and we don't want to
     956             :                  * delete the fds from _that_ backend, we temporarily stub out
     957             :                  * the evsel with a replacement.
     958             :                  */
     959           0 :                 base->evsel = &nil_eventop;
     960             :         }
     961             : 
     962             :         /* We need to re-create a new signal-notification fd and a new
     963             :          * thread-notification fd.  Otherwise, we'll still share those with
     964             :          * the parent process, which would make any notification sent to them
     965             :          * get received by one or both of the event loops, more or less at
     966             :          * random.
     967             :          */
     968           0 :         if (base->sig.ev_signal_added) {
     969           0 :                 event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
     970           0 :                 event_debug_unassign(&base->sig.ev_signal);
     971           0 :                 memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
     972           0 :                 had_signal_added = 1;
     973           0 :                 base->sig.ev_signal_added = 0;
     974             :         }
     975           0 :         if (base->sig.ev_signal_pair[0] != -1)
     976           0 :                 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
     977           0 :         if (base->sig.ev_signal_pair[1] != -1)
     978           0 :                 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
     979           0 :         if (base->th_notify_fn != NULL) {
     980           0 :                 was_notifiable = 1;
     981           0 :                 base->th_notify_fn = NULL;
     982             :         }
     983           0 :         if (base->th_notify_fd[0] != -1) {
     984           0 :                 event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
     985           0 :                 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
     986           0 :                 if (base->th_notify_fd[1] != -1)
     987           0 :                         EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
     988           0 :                 base->th_notify_fd[0] = -1;
     989           0 :                 base->th_notify_fd[1] = -1;
     990           0 :                 event_debug_unassign(&base->th_notify);
     991             :         }
     992             : 
     993             :         /* Replace the original evsel. */
     994           0 :         base->evsel = evsel;
     995             : 
     996           0 :         if (evsel->need_reinit) {
     997             :                 /* Reconstruct the backend through brute-force, so that we do
     998             :                  * not share any structures with the parent process. For some
     999             :                  * backends, this is necessary: epoll and kqueue, for
    1000             :                  * instance, have events associated with a kernel
    1001             :                  * structure. If didn't reinitialize, we'd share that
    1002             :                  * structure with the parent process, and any changes made by
    1003             :                  * the parent would affect our backend's behavior (and vice
    1004             :                  * versa).
    1005             :                  */
    1006           0 :                 if (base->evsel->dealloc != NULL)
    1007           0 :                         base->evsel->dealloc(base);
    1008           0 :                 base->evbase = evsel->init(base);
    1009           0 :                 if (base->evbase == NULL) {
    1010           0 :                         event_errx(1,
    1011             :                            "%s: could not reinitialize event mechanism",
    1012             :                            __func__);
    1013             :                         res = -1;
    1014             :                         goto done;
    1015             :                 }
    1016             : 
    1017             :                 /* Empty out the changelist (if any): we are starting from a
    1018             :                  * blank slate. */
    1019           0 :                 event_changelist_freemem_(&base->changelist);
    1020             : 
    1021             :                 /* Tell the event maps to re-inform the backend about all
    1022             :                  * pending events. This will make the signal notification
    1023             :                  * event get re-created if necessary. */
    1024           0 :                 if (evmap_reinit_(base) < 0)
    1025           0 :                         res = -1;
    1026             :         } else {
    1027           0 :                 res = evsig_init_(base);
    1028           0 :                 if (res == 0 && had_signal_added) {
    1029           0 :                         res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
    1030           0 :                         if (res == 0)
    1031           0 :                                 base->sig.ev_signal_added = 1;
    1032             :                 }
    1033             :         }
    1034             : 
    1035             :         /* If we were notifiable before, and nothing just exploded, become
    1036             :          * notifiable again. */
    1037           0 :         if (was_notifiable && res == 0)
    1038           0 :                 res = evthread_make_base_notifiable_nolock_(base);
    1039             : 
    1040             : done:
    1041           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1042           0 :         return (res);
    1043             : }
    1044             : 
    1045             : /* Get the monotonic time for this event_base' timer */
    1046             : int
    1047           0 : event_gettime_monotonic(struct event_base *base, struct timeval *tv)
    1048             : {
    1049           0 :   int rv = -1;
    1050             : 
    1051           0 :   if (base && tv) {
    1052           0 :     EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    1053           0 :     rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
    1054           0 :     EVBASE_RELEASE_LOCK(base, th_base_lock);
    1055             :   }
    1056             : 
    1057           0 :   return rv;
    1058             : }
    1059             : 
    1060             : const char **
    1061           0 : event_get_supported_methods(void)
    1062             : {
    1063             :         static const char **methods = NULL;
    1064             :         const struct eventop **method;
    1065             :         const char **tmp;
    1066           0 :         int i = 0, k;
    1067             : 
    1068             :         /* count all methods */
    1069           0 :         for (method = &eventops[0]; *method != NULL; ++method) {
    1070           0 :                 ++i;
    1071             :         }
    1072             : 
    1073             :         /* allocate one more than we need for the NULL pointer */
    1074           0 :         tmp = mm_calloc((i + 1), sizeof(char *));
    1075           0 :         if (tmp == NULL)
    1076           0 :                 return (NULL);
    1077             : 
    1078             :         /* populate the array with the supported methods */
    1079           0 :         for (k = 0, i = 0; eventops[k] != NULL; ++k) {
    1080           0 :                 tmp[i++] = eventops[k]->name;
    1081             :         }
    1082           0 :         tmp[i] = NULL;
    1083             : 
    1084           0 :         if (methods != NULL)
    1085           0 :                 mm_free((char**)methods);
    1086             : 
    1087           0 :         methods = tmp;
    1088             : 
    1089           0 :         return (methods);
    1090             : }
    1091             : 
    1092             : struct event_config *
    1093           3 : event_config_new(void)
    1094             : {
    1095           3 :         struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
    1096             : 
    1097           3 :         if (cfg == NULL)
    1098           0 :                 return (NULL);
    1099             : 
    1100           3 :         TAILQ_INIT(&cfg->entries);
    1101           3 :         cfg->max_dispatch_interval.tv_sec = -1;
    1102           3 :         cfg->max_dispatch_callbacks = INT_MAX;
    1103           3 :         cfg->limit_callbacks_after_prio = 1;
    1104             : 
    1105           3 :         return (cfg);
    1106             : }
    1107             : 
    1108             : static void
    1109           0 : event_config_entry_free(struct event_config_entry *entry)
    1110             : {
    1111           0 :         if (entry->avoid_method != NULL)
    1112           0 :                 mm_free((char *)entry->avoid_method);
    1113           0 :         mm_free(entry);
    1114           0 : }
    1115             : 
    1116             : void
    1117           3 : event_config_free(struct event_config *cfg)
    1118             : {
    1119             :         struct event_config_entry *entry;
    1120             : 
    1121           6 :         while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
    1122           0 :                 TAILQ_REMOVE(&cfg->entries, entry, next);
    1123           0 :                 event_config_entry_free(entry);
    1124             :         }
    1125           3 :         mm_free(cfg);
    1126           3 : }
    1127             : 
    1128             : int
    1129           0 : event_config_set_flag(struct event_config *cfg, int flag)
    1130             : {
    1131           0 :         if (!cfg)
    1132           0 :                 return -1;
    1133           0 :         cfg->flags |= flag;
    1134           0 :         return 0;
    1135             : }
    1136             : 
    1137             : int
    1138           0 : event_config_avoid_method(struct event_config *cfg, const char *method)
    1139             : {
    1140           0 :         struct event_config_entry *entry = mm_malloc(sizeof(*entry));
    1141           0 :         if (entry == NULL)
    1142           0 :                 return (-1);
    1143             : 
    1144           0 :         if ((entry->avoid_method = mm_strdup(method)) == NULL) {
    1145           0 :                 mm_free(entry);
    1146           0 :                 return (-1);
    1147             :         }
    1148             : 
    1149           0 :         TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
    1150             : 
    1151           0 :         return (0);
    1152             : }
    1153             : 
    1154             : int
    1155           0 : event_config_require_features(struct event_config *cfg,
    1156             :     int features)
    1157             : {
    1158           0 :         if (!cfg)
    1159           0 :                 return (-1);
    1160           0 :         cfg->require_features = features;
    1161           0 :         return (0);
    1162             : }
    1163             : 
    1164             : int
    1165           0 : event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
    1166             : {
    1167           0 :         if (!cfg)
    1168           0 :                 return (-1);
    1169           0 :         cfg->n_cpus_hint = cpus;
    1170           0 :         return (0);
    1171             : }
    1172             : 
    1173             : int
    1174           0 : event_config_set_max_dispatch_interval(struct event_config *cfg,
    1175             :     const struct timeval *max_interval, int max_callbacks, int min_priority)
    1176             : {
    1177           0 :         if (max_interval)
    1178           0 :                 memcpy(&cfg->max_dispatch_interval, max_interval,
    1179             :                     sizeof(struct timeval));
    1180             :         else
    1181           0 :                 cfg->max_dispatch_interval.tv_sec = -1;
    1182           0 :         cfg->max_dispatch_callbacks =
    1183           0 :             max_callbacks >= 0 ? max_callbacks : INT_MAX;
    1184           0 :         if (min_priority < 0)
    1185           0 :                 min_priority = 0;
    1186           0 :         cfg->limit_callbacks_after_prio = min_priority;
    1187           0 :         return (0);
    1188             : }
    1189             : 
    1190             : int
    1191           0 : event_priority_init(int npriorities)
    1192             : {
    1193           0 :         return event_base_priority_init(current_base, npriorities);
    1194             : }
    1195             : 
    1196             : int
    1197           3 : event_base_priority_init(struct event_base *base, int npriorities)
    1198             : {
    1199             :         int i, r;
    1200           3 :         r = -1;
    1201             : 
    1202           3 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    1203             : 
    1204           3 :         if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
    1205           3 :             || npriorities >= EVENT_MAX_PRIORITIES)
    1206             :                 goto err;
    1207             : 
    1208           3 :         if (npriorities == base->nactivequeues)
    1209           0 :                 goto ok;
    1210             : 
    1211           3 :         if (base->nactivequeues) {
    1212           0 :                 mm_free(base->activequeues);
    1213           0 :                 base->nactivequeues = 0;
    1214             :         }
    1215             : 
    1216             :         /* Allocate our priority queues */
    1217           3 :         base->activequeues = (struct evcallback_list *)
    1218           3 :           mm_calloc(npriorities, sizeof(struct evcallback_list));
    1219           3 :         if (base->activequeues == NULL) {
    1220           0 :                 event_warn("%s: calloc", __func__);
    1221           0 :                 goto err;
    1222             :         }
    1223           3 :         base->nactivequeues = npriorities;
    1224             : 
    1225           6 :         for (i = 0; i < base->nactivequeues; ++i) {
    1226           3 :                 TAILQ_INIT(&base->activequeues[i]);
    1227             :         }
    1228             : 
    1229             : ok:
    1230           3 :         r = 0;
    1231             : err:
    1232           3 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1233           3 :         return (r);
    1234             : }
    1235             : 
    1236             : int
    1237           0 : event_base_get_npriorities(struct event_base *base)
    1238             : {
    1239             : 
    1240             :         int n;
    1241           0 :         if (base == NULL)
    1242           0 :                 base = current_base;
    1243             : 
    1244           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    1245           0 :         n = base->nactivequeues;
    1246           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1247           0 :         return (n);
    1248             : }
    1249             : 
    1250             : int
    1251           0 : event_base_get_num_events(struct event_base *base, unsigned int type)
    1252             : {
    1253           0 :         int r = 0;
    1254             : 
    1255           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    1256             : 
    1257           0 :         if (type & EVENT_BASE_COUNT_ACTIVE)
    1258           0 :                 r += base->event_count_active;
    1259             : 
    1260           0 :         if (type & EVENT_BASE_COUNT_VIRTUAL)
    1261           0 :                 r += base->virtual_event_count;
    1262             : 
    1263           0 :         if (type & EVENT_BASE_COUNT_ADDED)
    1264           0 :                 r += base->event_count;
    1265             : 
    1266           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1267             : 
    1268           0 :         return r;
    1269             : }
    1270             : 
    1271             : int
    1272           0 : event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
    1273             : {
    1274           0 :         int r = 0;
    1275             : 
    1276           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    1277             : 
    1278           0 :         if (type & EVENT_BASE_COUNT_ACTIVE) {
    1279           0 :                 r += base->event_count_active_max;
    1280           0 :                 if (clear)
    1281           0 :                         base->event_count_active_max = 0;
    1282             :         }
    1283             : 
    1284           0 :         if (type & EVENT_BASE_COUNT_VIRTUAL) {
    1285           0 :                 r += base->virtual_event_count_max;
    1286           0 :                 if (clear)
    1287           0 :                         base->virtual_event_count_max = 0;
    1288             :         }
    1289             : 
    1290           0 :         if (type & EVENT_BASE_COUNT_ADDED) {
    1291           0 :                 r += base->event_count_max;
    1292           0 :                 if (clear)
    1293           0 :                         base->event_count_max = 0;
    1294             :         }
    1295             : 
    1296           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1297             : 
    1298           0 :         return r;
    1299             : }
    1300             : 
    1301             : /* Returns true iff we're currently watching any events. */
    1302             : static int
    1303         654 : event_haveevents(struct event_base *base)
    1304             : {
    1305             :         /* Caller must hold th_base_lock */
    1306         654 :         return (base->virtual_event_count > 0 || base->event_count > 0);
    1307             : }
    1308             : 
    1309             : /* "closure" function called when processing active signal events */
    1310             : static inline void
    1311           0 : event_signal_closure(struct event_base *base, struct event *ev)
    1312             : {
    1313             :         short ncalls;
    1314             :         int should_break;
    1315             : 
    1316             :         /* Allows deletes to work */
    1317           0 :         ncalls = ev->ev_ncalls;
    1318           0 :         if (ncalls != 0)
    1319           0 :                 ev->ev_pncalls = &ncalls;
    1320           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1321           0 :         while (ncalls) {
    1322           0 :                 ncalls--;
    1323           0 :                 ev->ev_ncalls = ncalls;
    1324           0 :                 if (ncalls == 0)
    1325           0 :                         ev->ev_pncalls = NULL;
    1326           0 :                 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
    1327             : 
    1328           0 :                 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    1329           0 :                 should_break = base->event_break;
    1330           0 :                 EVBASE_RELEASE_LOCK(base, th_base_lock);
    1331             : 
    1332           0 :                 if (should_break) {
    1333           0 :                         if (ncalls != 0)
    1334           0 :                                 ev->ev_pncalls = NULL;
    1335           0 :                         return;
    1336             :                 }
    1337             :         }
    1338             : }
    1339             : 
    1340             : /* Common timeouts are special timeouts that are handled as queues rather than
    1341             :  * in the minheap.  This is more efficient than the minheap if we happen to
    1342             :  * know that we're going to get several thousands of timeout events all with
    1343             :  * the same timeout value.
    1344             :  *
    1345             :  * Since all our timeout handling code assumes timevals can be copied,
    1346             :  * assigned, etc, we can't use "magic pointer" to encode these common
    1347             :  * timeouts.  Searching through a list to see if every timeout is common could
    1348             :  * also get inefficient.  Instead, we take advantage of the fact that tv_usec
    1349             :  * is 32 bits long, but only uses 20 of those bits (since it can never be over
    1350             :  * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
    1351             :  * of index into the event_base's aray of common timeouts.
    1352             :  */
    1353             : 
    1354             : #define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
    1355             : #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
    1356             : #define COMMON_TIMEOUT_IDX_SHIFT 20
    1357             : #define COMMON_TIMEOUT_MASK     0xf0000000
    1358             : #define COMMON_TIMEOUT_MAGIC    0x50000000
    1359             : 
    1360             : #define COMMON_TIMEOUT_IDX(tv) \
    1361             :         (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
    1362             : 
    1363             : /** Return true iff if 'tv' is a common timeout in 'base' */
    1364             : static inline int
    1365           0 : is_common_timeout(const struct timeval *tv,
    1366             :     const struct event_base *base)
    1367             : {
    1368             :         int idx;
    1369           0 :         if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
    1370           0 :                 return 0;
    1371           0 :         idx = COMMON_TIMEOUT_IDX(tv);
    1372           0 :         return idx < base->n_common_timeouts;
    1373             : }
    1374             : 
    1375             : /* True iff tv1 and tv2 have the same common-timeout index, or if neither
    1376             :  * one is a common timeout. */
    1377             : static inline int
    1378           0 : is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
    1379             : {
    1380           0 :         return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
    1381           0 :             (tv2->tv_usec & ~MICROSECONDS_MASK);
    1382             : }
    1383             : 
    1384             : /** Requires that 'tv' is a common timeout.  Return the corresponding
    1385             :  * common_timeout_list. */
    1386             : static inline struct common_timeout_list *
    1387           0 : get_common_timeout_list(struct event_base *base, const struct timeval *tv)
    1388             : {
    1389           0 :         return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
    1390             : }
    1391             : 
    1392             : #if 0
    1393             : static inline int
    1394             : common_timeout_ok(const struct timeval *tv,
    1395             :     struct event_base *base)
    1396             : {
    1397             :         const struct timeval *expect =
    1398             :             &get_common_timeout_list(base, tv)->duration;
    1399             :         return tv->tv_sec == expect->tv_sec &&
    1400             :             tv->tv_usec == expect->tv_usec;
    1401             : }
    1402             : #endif
    1403             : 
    1404             : /* Add the timeout for the first event in given common timeout list to the
    1405             :  * event_base's minheap. */
    1406             : static void
    1407           0 : common_timeout_schedule(struct common_timeout_list *ctl,
    1408             :     const struct timeval *now, struct event *head)
    1409             : {
    1410           0 :         struct timeval timeout = head->ev_timeout;
    1411           0 :         timeout.tv_usec &= MICROSECONDS_MASK;
    1412           0 :         event_add_nolock_(&ctl->timeout_event, &timeout, 1);
    1413           0 : }
    1414             : 
    1415             : /* Callback: invoked when the timeout for a common timeout queue triggers.
    1416             :  * This means that (at least) the first event in that queue should be run,
    1417             :  * and the timeout should be rescheduled if there are more events. */
    1418             : static void
    1419           0 : common_timeout_callback(evutil_socket_t fd, short what, void *arg)
    1420             : {
    1421             :         struct timeval now;
    1422           0 :         struct common_timeout_list *ctl = arg;
    1423           0 :         struct event_base *base = ctl->base;
    1424           0 :         struct event *ev = NULL;
    1425           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    1426           0 :         gettime(base, &now);
    1427             :         while (1) {
    1428           0 :                 ev = TAILQ_FIRST(&ctl->events);
    1429           0 :                 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
    1430           0 :                     (ev->ev_timeout.tv_sec == now.tv_sec &&
    1431           0 :                         (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
    1432             :                         break;
    1433           0 :                 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
    1434           0 :                 event_active_nolock_(ev, EV_TIMEOUT, 1);
    1435             :         }
    1436           0 :         if (ev)
    1437           0 :                 common_timeout_schedule(ctl, &now, ev);
    1438           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1439           0 : }
    1440             : 
    1441             : #define MAX_COMMON_TIMEOUTS 256
    1442             : 
    1443             : const struct timeval *
    1444           0 : event_base_init_common_timeout(struct event_base *base,
    1445             :     const struct timeval *duration)
    1446             : {
    1447             :         int i;
    1448             :         struct timeval tv;
    1449           0 :         const struct timeval *result=NULL;
    1450             :         struct common_timeout_list *new_ctl;
    1451             : 
    1452           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    1453           0 :         if (duration->tv_usec > 1000000) {
    1454           0 :                 memcpy(&tv, duration, sizeof(struct timeval));
    1455           0 :                 if (is_common_timeout(duration, base))
    1456           0 :                         tv.tv_usec &= MICROSECONDS_MASK;
    1457           0 :                 tv.tv_sec += tv.tv_usec / 1000000;
    1458           0 :                 tv.tv_usec %= 1000000;
    1459           0 :                 duration = &tv;
    1460             :         }
    1461           0 :         for (i = 0; i < base->n_common_timeouts; ++i) {
    1462           0 :                 const struct common_timeout_list *ctl =
    1463           0 :                     base->common_timeout_queues[i];
    1464           0 :                 if (duration->tv_sec == ctl->duration.tv_sec &&
    1465           0 :                     duration->tv_usec ==
    1466           0 :                     (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
    1467           0 :                         EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
    1468           0 :                         result = &ctl->duration;
    1469           0 :                         goto done;
    1470             :                 }
    1471             :         }
    1472           0 :         if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
    1473           0 :                 event_warnx("%s: Too many common timeouts already in use; "
    1474             :                     "we only support %d per event_base", __func__,
    1475             :                     MAX_COMMON_TIMEOUTS);
    1476           0 :                 goto done;
    1477             :         }
    1478           0 :         if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
    1479           0 :                 int n = base->n_common_timeouts < 16 ? 16 :
    1480           0 :                     base->n_common_timeouts*2;
    1481           0 :                 struct common_timeout_list **newqueues =
    1482           0 :                     mm_realloc(base->common_timeout_queues,
    1483             :                         n*sizeof(struct common_timeout_queue *));
    1484           0 :                 if (!newqueues) {
    1485           0 :                         event_warn("%s: realloc",__func__);
    1486           0 :                         goto done;
    1487             :                 }
    1488           0 :                 base->n_common_timeouts_allocated = n;
    1489           0 :                 base->common_timeout_queues = newqueues;
    1490             :         }
    1491           0 :         new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
    1492           0 :         if (!new_ctl) {
    1493           0 :                 event_warn("%s: calloc",__func__);
    1494           0 :                 goto done;
    1495             :         }
    1496           0 :         TAILQ_INIT(&new_ctl->events);
    1497           0 :         new_ctl->duration.tv_sec = duration->tv_sec;
    1498           0 :         new_ctl->duration.tv_usec =
    1499           0 :             duration->tv_usec | COMMON_TIMEOUT_MAGIC |
    1500           0 :             (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
    1501           0 :         evtimer_assign(&new_ctl->timeout_event, base,
    1502             :             common_timeout_callback, new_ctl);
    1503           0 :         new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
    1504           0 :         event_priority_set(&new_ctl->timeout_event, 0);
    1505           0 :         new_ctl->base = base;
    1506           0 :         base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
    1507           0 :         result = &new_ctl->duration;
    1508             : 
    1509             : done:
    1510           0 :         if (result)
    1511           0 :                 EVUTIL_ASSERT(is_common_timeout(result, base));
    1512             : 
    1513           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1514           0 :         return result;
    1515             : }
    1516             : 
    1517             : /* Closure function invoked when we're activating a persistent event. */
    1518             : static inline void
    1519         649 : event_persist_closure(struct event_base *base, struct event *ev)
    1520             : {
    1521             :         void (*evcb_callback)(evutil_socket_t, short, void *);
    1522             : 
    1523             :         // Other fields of *ev that must be stored before executing
    1524             :         evutil_socket_t evcb_fd;
    1525             :         short evcb_res;
    1526             :         void *evcb_arg;
    1527             : 
    1528             :         /* reschedule the persistent event if we have a timeout. */
    1529         649 :         if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
    1530             :                 /* If there was a timeout, we want it to run at an interval of
    1531             :                  * ev_io_timeout after the last time it was _scheduled_ for,
    1532             :                  * not ev_io_timeout after _now_.  If it fired for another
    1533             :                  * reason, though, the timeout ought to start ticking _now_. */
    1534             :                 struct timeval run_at, relative_to, delay, now;
    1535           0 :                 ev_uint32_t usec_mask = 0;
    1536           0 :                 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
    1537             :                         &ev->ev_io_timeout));
    1538           0 :                 gettime(base, &now);
    1539           0 :                 if (is_common_timeout(&ev->ev_timeout, base)) {
    1540           0 :                         delay = ev->ev_io_timeout;
    1541           0 :                         usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
    1542           0 :                         delay.tv_usec &= MICROSECONDS_MASK;
    1543           0 :                         if (ev->ev_res & EV_TIMEOUT) {
    1544           0 :                                 relative_to = ev->ev_timeout;
    1545           0 :                                 relative_to.tv_usec &= MICROSECONDS_MASK;
    1546             :                         } else {
    1547           0 :                                 relative_to = now;
    1548             :                         }
    1549             :                 } else {
    1550           0 :                         delay = ev->ev_io_timeout;
    1551           0 :                         if (ev->ev_res & EV_TIMEOUT) {
    1552           0 :                                 relative_to = ev->ev_timeout;
    1553             :                         } else {
    1554           0 :                                 relative_to = now;
    1555             :                         }
    1556             :                 }
    1557           0 :                 evutil_timeradd(&relative_to, &delay, &run_at);
    1558           0 :                 if (evutil_timercmp(&run_at, &now, <)) {
    1559             :                         /* Looks like we missed at least one invocation due to
    1560             :                          * a clock jump, not running the event loop for a
    1561             :                          * while, really slow callbacks, or
    1562             :                          * something. Reschedule relative to now.
    1563             :                          */
    1564           0 :                         evutil_timeradd(&now, &delay, &run_at);
    1565             :                 }
    1566           0 :                 run_at.tv_usec |= usec_mask;
    1567           0 :                 event_add_nolock_(ev, &run_at, 1);
    1568             :         }
    1569             : 
    1570             :         // Save our callback before we release the lock
    1571         649 :         evcb_callback = ev->ev_callback;
    1572         649 :         evcb_fd = ev->ev_fd;
    1573         649 :         evcb_res = ev->ev_res;
    1574         649 :         evcb_arg = ev->ev_arg;
    1575             : 
    1576             :         // Release the lock
    1577         649 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1578             : 
    1579             :         // Execute the callback
    1580         649 :         (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
    1581         649 : }
    1582             : 
    1583             : /*
    1584             :   Helper for event_process_active to process all the events in a single queue,
    1585             :   releasing the lock as we go.  This function requires that the lock be held
    1586             :   when it's invoked.  Returns -1 if we get a signal or an event_break that
    1587             :   means we should stop processing any active events now.  Otherwise returns
    1588             :   the number of non-internal event_callbacks that we processed.
    1589             : */
    1590             : static int
    1591         651 : event_process_active_single_queue(struct event_base *base,
    1592             :     struct evcallback_list *activeq,
    1593             :     int max_to_process, const struct timeval *endtime)
    1594             : {
    1595             :         struct event_callback *evcb;
    1596         651 :         int count = 0;
    1597             : 
    1598         651 :         EVUTIL_ASSERT(activeq != NULL);
    1599             : 
    1600         909 :         for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
    1601         655 :                 struct event *ev=NULL;
    1602         655 :                 if (evcb->evcb_flags & EVLIST_INIT) {
    1603         655 :                         ev = event_callback_to_event(evcb);
    1604             : 
    1605         655 :                         if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
    1606         649 :                                 event_queue_remove_active(base, evcb);
    1607             :                         else
    1608           6 :                                 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
    1609         655 :                         event_debug((
    1610             :                             "event_process_active: event: %p, %s%s%scall %p",
    1611             :                             ev,
    1612             :                             ev->ev_res & EV_READ ? "EV_READ " : " ",
    1613             :                             ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
    1614             :                             ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
    1615             :                             ev->ev_callback));
    1616             :                 } else {
    1617           0 :                         event_queue_remove_active(base, evcb);
    1618           0 :                         event_debug(("event_process_active: event_callback %p, "
    1619             :                                 "closure %d, call %p",
    1620             :                                 evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
    1621             :                 }
    1622             : 
    1623         655 :                 if (!(evcb->evcb_flags & EVLIST_INTERNAL))
    1624         655 :                         ++count;
    1625             : 
    1626             : 
    1627         655 :                 base->current_event = evcb;
    1628             : #ifndef EVENT__DISABLE_THREAD_SUPPORT
    1629         655 :                 base->current_event_waiters = 0;
    1630             : #endif
    1631             : 
    1632         655 :                 switch (evcb->evcb_closure) {
    1633             :                 case EV_CLOSURE_EVENT_SIGNAL:
    1634           0 :                         EVUTIL_ASSERT(ev != NULL);
    1635           0 :                         event_signal_closure(base, ev);
    1636           0 :                         break;
    1637             :                 case EV_CLOSURE_EVENT_PERSIST:
    1638         649 :                         EVUTIL_ASSERT(ev != NULL);
    1639         649 :                         event_persist_closure(base, ev);
    1640         649 :                         break;
    1641             :                 case EV_CLOSURE_EVENT: {
    1642             :                         void (*evcb_callback)(evutil_socket_t, short, void *);
    1643           6 :                         EVUTIL_ASSERT(ev != NULL);
    1644           6 :                         evcb_callback = *ev->ev_callback;
    1645           6 :                         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1646           6 :                         evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
    1647             :                 }
    1648           6 :                 break;
    1649             :                 case EV_CLOSURE_CB_SELF: {
    1650           0 :                         void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
    1651           0 :                         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1652           0 :                         evcb_selfcb(evcb, evcb->evcb_arg);
    1653             :                 }
    1654           0 :                 break;
    1655             :                 case EV_CLOSURE_EVENT_FINALIZE:
    1656             :                 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
    1657             :                         void (*evcb_evfinalize)(struct event *, void *);
    1658           0 :                         int evcb_closure = evcb->evcb_closure;
    1659           0 :                         EVUTIL_ASSERT(ev != NULL);
    1660           0 :                         base->current_event = NULL;
    1661           0 :                         evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
    1662           0 :                         EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
    1663           0 :                         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1664           0 :                         evcb_evfinalize(ev, ev->ev_arg);
    1665           0 :                         event_debug_note_teardown_(ev);
    1666           0 :                         if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
    1667           0 :                                 mm_free(ev);
    1668             :                 }
    1669           0 :                 break;
    1670             :                 case EV_CLOSURE_CB_FINALIZE: {
    1671           0 :                         void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
    1672           0 :                         base->current_event = NULL;
    1673           0 :                         EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
    1674           0 :                         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1675           0 :                         evcb_cbfinalize(evcb, evcb->evcb_arg);
    1676             :                 }
    1677           0 :                 break;
    1678             :                 default:
    1679           0 :                         EVUTIL_ASSERT(0);
    1680             :                 }
    1681             : 
    1682         655 :                 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    1683         655 :                 base->current_event = NULL;
    1684             : #ifndef EVENT__DISABLE_THREAD_SUPPORT
    1685         655 :                 if (base->current_event_waiters) {
    1686           0 :                         base->current_event_waiters = 0;
    1687           0 :                         EVTHREAD_COND_BROADCAST(base->current_event_cond);
    1688             :                 }
    1689             : #endif
    1690             : 
    1691         655 :                 if (base->event_break)
    1692         397 :                         return -1;
    1693         258 :                 if (count >= max_to_process)
    1694           0 :                         return count;
    1695         258 :                 if (count && endtime) {
    1696             :                         struct timeval now;
    1697           0 :                         update_time_cache(base);
    1698           0 :                         gettime(base, &now);
    1699           0 :                         if (evutil_timercmp(&now, endtime, >=))
    1700           0 :                                 return count;
    1701             :                 }
    1702         258 :                 if (base->event_continue)
    1703           0 :                         break;
    1704             :         }
    1705         254 :         return count;
    1706             : }
    1707             : 
    1708             : /*
    1709             :  * Active events are stored in priority queues.  Lower priorities are always
    1710             :  * process before higher priorities.  Low priority events can starve high
    1711             :  * priority ones.
    1712             :  */
    1713             : 
    1714             : static int
    1715         651 : event_process_active(struct event_base *base)
    1716             : {
    1717             :         /* Caller must hold th_base_lock */
    1718         651 :         struct evcallback_list *activeq = NULL;
    1719         651 :         int i, c = 0;
    1720             :         const struct timeval *endtime;
    1721             :         struct timeval tv;
    1722         651 :         const int maxcb = base->max_dispatch_callbacks;
    1723         651 :         const int limit_after_prio = base->limit_callbacks_after_prio;
    1724         651 :         if (base->max_dispatch_time.tv_sec >= 0) {
    1725           0 :                 update_time_cache(base);
    1726           0 :                 gettime(base, &tv);
    1727           0 :                 evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
    1728           0 :                 endtime = &tv;
    1729             :         } else {
    1730         651 :                 endtime = NULL;
    1731             :         }
    1732             : 
    1733         651 :         for (i = 0; i < base->nactivequeues; ++i) {
    1734         651 :                 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
    1735         651 :                         base->event_running_priority = i;
    1736         651 :                         activeq = &base->activequeues[i];
    1737         651 :                         if (i < limit_after_prio)
    1738         651 :                                 c = event_process_active_single_queue(base, activeq,
    1739             :                                     INT_MAX, NULL);
    1740             :                         else
    1741           0 :                                 c = event_process_active_single_queue(base, activeq,
    1742             :                                     maxcb, endtime);
    1743         651 :                         if (c < 0) {
    1744         397 :                                 goto done;
    1745         254 :                         } else if (c > 0)
    1746         254 :                                 break; /* Processed a real event; do not
    1747             :                                         * consider lower-priority events */
    1748             :                         /* If we get here, all of the events we processed
    1749             :                          * were internal.  Continue. */
    1750             :                 }
    1751             :         }
    1752             : 
    1753             : done:
    1754         651 :         base->event_running_priority = -1;
    1755             : 
    1756         651 :         return c;
    1757             : }
    1758             : 
    1759             : /*
    1760             :  * Wait continuously for events.  We exit only if no events are left.
    1761             :  */
    1762             : 
    1763             : int
    1764           0 : event_dispatch(void)
    1765             : {
    1766           0 :         return (event_loop(0));
    1767             : }
    1768             : 
    1769             : int
    1770           0 : event_base_dispatch(struct event_base *event_base)
    1771             : {
    1772           0 :         return (event_base_loop(event_base, 0));
    1773             : }
    1774             : 
    1775             : const char *
    1776           0 : event_base_get_method(const struct event_base *base)
    1777             : {
    1778           0 :         EVUTIL_ASSERT(base);
    1779           0 :         return (base->evsel->name);
    1780             : }
    1781             : 
    1782             : /** Callback: used to implement event_base_loopexit by telling the event_base
    1783             :  * that it's time to exit its loop. */
    1784             : static void
    1785           0 : event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
    1786             : {
    1787           0 :         struct event_base *base = arg;
    1788           0 :         base->event_gotterm = 1;
    1789           0 : }
    1790             : 
    1791             : int
    1792           0 : event_loopexit(const struct timeval *tv)
    1793             : {
    1794           0 :         return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
    1795             :                     current_base, tv));
    1796             : }
    1797             : 
    1798             : int
    1799           0 : event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
    1800             : {
    1801           0 :         return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
    1802             :                     event_base, tv));
    1803             : }
    1804             : 
    1805             : int
    1806           0 : event_loopbreak(void)
    1807             : {
    1808           0 :         return (event_base_loopbreak(current_base));
    1809             : }
    1810             : 
    1811             : int
    1812         397 : event_base_loopbreak(struct event_base *event_base)
    1813             : {
    1814         397 :         int r = 0;
    1815         397 :         if (event_base == NULL)
    1816           0 :                 return (-1);
    1817             : 
    1818         397 :         EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
    1819         397 :         event_base->event_break = 1;
    1820             : 
    1821         397 :         if (EVBASE_NEED_NOTIFY(event_base)) {
    1822           0 :                 r = evthread_notify_base(event_base);
    1823             :         } else {
    1824         397 :                 r = (0);
    1825             :         }
    1826         397 :         EVBASE_RELEASE_LOCK(event_base, th_base_lock);
    1827         397 :         return r;
    1828             : }
    1829             : 
    1830             : int
    1831           0 : event_base_loopcontinue(struct event_base *event_base)
    1832             : {
    1833           0 :         int r = 0;
    1834           0 :         if (event_base == NULL)
    1835           0 :                 return (-1);
    1836             : 
    1837           0 :         EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
    1838           0 :         event_base->event_continue = 1;
    1839             : 
    1840           0 :         if (EVBASE_NEED_NOTIFY(event_base)) {
    1841           0 :                 r = evthread_notify_base(event_base);
    1842             :         } else {
    1843           0 :                 r = (0);
    1844             :         }
    1845           0 :         EVBASE_RELEASE_LOCK(event_base, th_base_lock);
    1846           0 :         return r;
    1847             : }
    1848             : 
    1849             : int
    1850           0 : event_base_got_break(struct event_base *event_base)
    1851             : {
    1852             :         int res;
    1853           0 :         EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
    1854           0 :         res = event_base->event_break;
    1855           0 :         EVBASE_RELEASE_LOCK(event_base, th_base_lock);
    1856           0 :         return res;
    1857             : }
    1858             : 
    1859             : int
    1860           0 : event_base_got_exit(struct event_base *event_base)
    1861             : {
    1862             :         int res;
    1863           0 :         EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
    1864           0 :         res = event_base->event_gotterm;
    1865           0 :         EVBASE_RELEASE_LOCK(event_base, th_base_lock);
    1866           0 :         return res;
    1867             : }
    1868             : 
    1869             : /* not thread safe */
    1870             : 
    1871             : int
    1872           0 : event_loop(int flags)
    1873             : {
    1874           0 :         return event_base_loop(current_base, flags);
    1875             : }
    1876             : 
    1877             : int
    1878         654 : event_base_loop(struct event_base *base, int flags)
    1879             : {
    1880         654 :         const struct eventop *evsel = base->evsel;
    1881             :         struct timeval tv;
    1882             :         struct timeval *tv_p;
    1883         654 :         int res, done, retval = 0;
    1884             : 
    1885             :         /* Grab the lock.  We will release it inside evsel.dispatch, and again
    1886             :          * as we invoke user callbacks. */
    1887         654 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    1888             : 
    1889         654 :         if (base->running_loop) {
    1890           0 :                 event_warnx("%s: reentrant invocation.  Only one event_base_loop"
    1891             :                     " can run on each event_base at once.", __func__);
    1892           0 :                 EVBASE_RELEASE_LOCK(base, th_base_lock);
    1893           0 :                 return -1;
    1894             :         }
    1895             : 
    1896         654 :         base->running_loop = 1;
    1897             : 
    1898         654 :         clear_time_cache(base);
    1899             : 
    1900         654 :         if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
    1901           0 :                 evsig_set_base_(base);
    1902             : 
    1903         654 :         done = 0;
    1904             : 
    1905             : #ifndef EVENT__DISABLE_THREAD_SUPPORT
    1906         654 :         base->th_owner_id = EVTHREAD_GET_ID();
    1907             : #endif
    1908             : 
    1909         654 :         base->event_gotterm = base->event_break = 0;
    1910             : 
    1911        1959 :         while (!done) {
    1912         660 :                 base->event_continue = 0;
    1913         660 :                 base->n_deferreds_queued = 0;
    1914             : 
    1915             :                 /* Terminate the loop if we have been asked to */
    1916         660 :                 if (base->event_gotterm) {
    1917           0 :                         break;
    1918             :                 }
    1919             : 
    1920         660 :                 if (base->event_break) {
    1921           6 :                         break;
    1922             :                 }
    1923             : 
    1924         654 :                 tv_p = &tv;
    1925         654 :                 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
    1926         648 :                         timeout_next(base, &tv_p);
    1927             :                 } else {
    1928             :                         /*
    1929             :                          * if we have active events, we just poll new events
    1930             :                          * without waiting.
    1931             :                          */
    1932           6 :                         evutil_timerclear(&tv);
    1933             :                 }
    1934             : 
    1935             :                 /* If we have no events, we just exit */
    1936        1308 :                 if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
    1937         654 :                     !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
    1938           0 :                         event_debug(("%s: no events registered.", __func__));
    1939           0 :                         retval = 1;
    1940           0 :                         goto done;
    1941             :                 }
    1942             : 
    1943         654 :                 event_queue_make_later_events_active(base);
    1944             : 
    1945         654 :                 clear_time_cache(base);
    1946             : 
    1947         654 :                 res = evsel->dispatch(base, tv_p);
    1948             : 
    1949         651 :                 if (res == -1) {
    1950           0 :                         event_debug(("%s: dispatch returned unsuccessfully.",
    1951             :                                 __func__));
    1952           0 :                         retval = -1;
    1953           0 :                         goto done;
    1954             :                 }
    1955             : 
    1956         651 :                 update_time_cache(base);
    1957             : 
    1958         651 :                 timeout_process(base);
    1959             : 
    1960         651 :                 if (N_ACTIVE_CALLBACKS(base)) {
    1961         651 :                         int n = event_process_active(base);
    1962         651 :                         if ((flags & EVLOOP_ONCE)
    1963         651 :                             && N_ACTIVE_CALLBACKS(base) == 0
    1964         645 :                             && n != 0)
    1965         645 :                                 done = 1;
    1966           0 :                 } else if (flags & EVLOOP_NONBLOCK)
    1967           0 :                         done = 1;
    1968             :         }
    1969         651 :         event_debug(("%s: asked to terminate loop.", __func__));
    1970             : 
    1971             : done:
    1972         651 :         clear_time_cache(base);
    1973         651 :         base->running_loop = 0;
    1974             : 
    1975         651 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    1976             : 
    1977         651 :         return (retval);
    1978             : }
    1979             : 
    1980             : /* One-time callback to implement event_base_once: invokes the user callback,
    1981             :  * then deletes the allocated storage */
    1982             : static void
    1983           0 : event_once_cb(evutil_socket_t fd, short events, void *arg)
    1984             : {
    1985           0 :         struct event_once *eonce = arg;
    1986             : 
    1987           0 :         (*eonce->cb)(fd, events, eonce->arg);
    1988           0 :         EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
    1989           0 :         LIST_REMOVE(eonce, next_once);
    1990           0 :         EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
    1991           0 :         event_debug_unassign(&eonce->ev);
    1992           0 :         mm_free(eonce);
    1993           0 : }
    1994             : 
    1995             : /* not threadsafe, event scheduled once. */
    1996             : int
    1997           0 : event_once(evutil_socket_t fd, short events,
    1998             :     void (*callback)(evutil_socket_t, short, void *),
    1999             :     void *arg, const struct timeval *tv)
    2000             : {
    2001           0 :         return event_base_once(current_base, fd, events, callback, arg, tv);
    2002             : }
    2003             : 
    2004             : /* Schedules an event once */
    2005             : int
    2006           0 : event_base_once(struct event_base *base, evutil_socket_t fd, short events,
    2007             :     void (*callback)(evutil_socket_t, short, void *),
    2008             :     void *arg, const struct timeval *tv)
    2009             : {
    2010             :         struct event_once *eonce;
    2011           0 :         int res = 0;
    2012           0 :         int activate = 0;
    2013             : 
    2014             :         /* We cannot support signals that just fire once, or persistent
    2015             :          * events. */
    2016           0 :         if (events & (EV_SIGNAL|EV_PERSIST))
    2017           0 :                 return (-1);
    2018             : 
    2019           0 :         if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
    2020           0 :                 return (-1);
    2021             : 
    2022           0 :         eonce->cb = callback;
    2023           0 :         eonce->arg = arg;
    2024             : 
    2025           0 :         if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
    2026           0 :                 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
    2027             : 
    2028           0 :                 if (tv == NULL || ! evutil_timerisset(tv)) {
    2029             :                         /* If the event is going to become active immediately,
    2030             :                          * don't put it on the timeout queue.  This is one
    2031             :                          * idiom for scheduling a callback, so let's make
    2032             :                          * it fast (and order-preserving). */
    2033           0 :                         activate = 1;
    2034             :                 }
    2035           0 :         } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
    2036           0 :                 events &= EV_READ|EV_WRITE|EV_CLOSED;
    2037             : 
    2038           0 :                 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
    2039             :         } else {
    2040             :                 /* Bad event combination */
    2041           0 :                 mm_free(eonce);
    2042           0 :                 return (-1);
    2043             :         }
    2044             : 
    2045           0 :         if (res == 0) {
    2046           0 :                 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    2047           0 :                 if (activate)
    2048           0 :                         event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
    2049             :                 else
    2050           0 :                         res = event_add_nolock_(&eonce->ev, tv, 0);
    2051             : 
    2052           0 :                 if (res != 0) {
    2053           0 :                         mm_free(eonce);
    2054           0 :                         return (res);
    2055             :                 } else {
    2056           0 :                         LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
    2057             :                 }
    2058           0 :                 EVBASE_RELEASE_LOCK(base, th_base_lock);
    2059             :         }
    2060             : 
    2061           0 :         return (0);
    2062             : }
    2063             : 
    2064             : int
    2065          43 : event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
    2066             : {
    2067          43 :         if (!base)
    2068          40 :                 base = current_base;
    2069          43 :         if (arg == &event_self_cbarg_ptr_)
    2070           0 :                 arg = ev;
    2071             : 
    2072          43 :         event_debug_assert_not_added_(ev);
    2073             : 
    2074          43 :         ev->ev_base = base;
    2075             : 
    2076          43 :         ev->ev_callback = callback;
    2077          43 :         ev->ev_arg = arg;
    2078          43 :         ev->ev_fd = fd;
    2079          43 :         ev->ev_events = events;
    2080          43 :         ev->ev_res = 0;
    2081          43 :         ev->ev_flags = EVLIST_INIT;
    2082          43 :         ev->ev_ncalls = 0;
    2083          43 :         ev->ev_pncalls = NULL;
    2084             : 
    2085          43 :         if (events & EV_SIGNAL) {
    2086           0 :                 if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
    2087           0 :                         event_warnx("%s: EV_SIGNAL is not compatible with "
    2088             :                             "EV_READ, EV_WRITE or EV_CLOSED", __func__);
    2089           0 :                         return -1;
    2090             :                 }
    2091           0 :                 ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
    2092             :         } else {
    2093          43 :                 if (events & EV_PERSIST) {
    2094          37 :                         evutil_timerclear(&ev->ev_io_timeout);
    2095          37 :                         ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
    2096             :                 } else {
    2097           6 :                         ev->ev_closure = EV_CLOSURE_EVENT;
    2098             :                 }
    2099             :         }
    2100             : 
    2101          43 :         min_heap_elem_init_(ev);
    2102             : 
    2103          43 :         if (base != NULL) {
    2104             :                 /* by default, we put new events into the middle priority */
    2105           3 :                 ev->ev_pri = base->nactivequeues / 2;
    2106             :         }
    2107             : 
    2108          43 :         event_debug_note_setup_(ev);
    2109             : 
    2110          43 :         return 0;
    2111             : }
    2112             : 
    2113             : int
    2114          40 : event_base_set(struct event_base *base, struct event *ev)
    2115             : {
    2116             :         /* Only innocent events may be assigned to a different base */
    2117          40 :         if (ev->ev_flags != EVLIST_INIT)
    2118           0 :                 return (-1);
    2119             : 
    2120          40 :         event_debug_assert_is_setup_(ev);
    2121             : 
    2122          40 :         ev->ev_base = base;
    2123          40 :         ev->ev_pri = base->nactivequeues/2;
    2124             : 
    2125          40 :         return (0);
    2126             : }
    2127             : 
    2128             : void
    2129          40 : event_set(struct event *ev, evutil_socket_t fd, short events,
    2130             :           void (*callback)(evutil_socket_t, short, void *), void *arg)
    2131             : {
    2132             :         int r;
    2133          40 :         r = event_assign(ev, current_base, fd, events, callback, arg);
    2134          40 :         EVUTIL_ASSERT(r == 0);
    2135          40 : }
    2136             : 
    2137             : void *
    2138           0 : event_self_cbarg(void)
    2139             : {
    2140           0 :         return &event_self_cbarg_ptr_;
    2141             : }
    2142             : 
    2143             : struct event *
    2144           0 : event_base_get_running_event(struct event_base *base)
    2145             : {
    2146           0 :         struct event *ev = NULL;
    2147           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    2148           0 :         if (EVBASE_IN_THREAD(base)) {
    2149           0 :                 struct event_callback *evcb = base->current_event;
    2150           0 :                 if (evcb->evcb_flags & EVLIST_INIT)
    2151           0 :                         ev = event_callback_to_event(evcb);
    2152             :         }
    2153           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    2154           0 :         return ev;
    2155             : }
    2156             : 
    2157             : struct event *
    2158           0 : event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
    2159             : {
    2160             :         struct event *ev;
    2161           0 :         ev = mm_malloc(sizeof(struct event));
    2162           0 :         if (ev == NULL)
    2163           0 :                 return (NULL);
    2164           0 :         if (event_assign(ev, base, fd, events, cb, arg) < 0) {
    2165           0 :                 mm_free(ev);
    2166           0 :                 return (NULL);
    2167             :         }
    2168             : 
    2169           0 :         return (ev);
    2170             : }
    2171             : 
    2172             : void
    2173           0 : event_free(struct event *ev)
    2174             : {
    2175             :         /* This is disabled, so that events which have been finalized be a
    2176             :          * valid target for event_free(). That's */
    2177             :         // event_debug_assert_is_setup_(ev);
    2178             : 
    2179             :         /* make sure that this event won't be coming back to haunt us. */
    2180           0 :         event_del(ev);
    2181           0 :         event_debug_note_teardown_(ev);
    2182           0 :         mm_free(ev);
    2183             : 
    2184           0 : }
    2185             : 
    2186             : void
    2187           0 : event_debug_unassign(struct event *ev)
    2188             : {
    2189           0 :         event_debug_assert_not_added_(ev);
    2190           0 :         event_debug_note_teardown_(ev);
    2191             : 
    2192           0 :         ev->ev_flags &= ~EVLIST_INIT;
    2193           0 : }
    2194             : 
    2195             : #define EVENT_FINALIZE_FREE_ 0x10000
    2196             : static int
    2197           0 : event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
    2198             : {
    2199           0 :         ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
    2200             :             EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
    2201             : 
    2202           0 :         event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
    2203           0 :         ev->ev_closure = closure;
    2204           0 :         ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
    2205           0 :         event_active_nolock_(ev, EV_FINALIZE, 1);
    2206           0 :         ev->ev_flags |= EVLIST_FINALIZING;
    2207           0 :         return 0;
    2208             : }
    2209             : 
    2210             : static int
    2211           0 : event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
    2212             : {
    2213             :         int r;
    2214           0 :         struct event_base *base = ev->ev_base;
    2215           0 :         if (EVUTIL_FAILURE_CHECK(!base)) {
    2216           0 :                 event_warnx("%s: event has no event_base set.", __func__);
    2217           0 :                 return -1;
    2218             :         }
    2219             : 
    2220           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    2221           0 :         r = event_finalize_nolock_(base, flags, ev, cb);
    2222           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    2223           0 :         return r;
    2224             : }
    2225             : 
    2226             : int
    2227           0 : event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
    2228             : {
    2229           0 :         return event_finalize_impl_(flags, ev, cb);
    2230             : }
    2231             : 
    2232             : int
    2233           0 : event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
    2234             : {
    2235           0 :         return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
    2236             : }
    2237             : 
    2238             : void
    2239           0 : event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
    2240             : {
    2241           0 :         struct event *ev = NULL;
    2242           0 :         if (evcb->evcb_flags & EVLIST_INIT) {
    2243           0 :                 ev = event_callback_to_event(evcb);
    2244           0 :                 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
    2245             :         } else {
    2246           0 :                 event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
    2247             :         }
    2248             : 
    2249           0 :         evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
    2250           0 :         evcb->evcb_cb_union.evcb_cbfinalize = cb;
    2251           0 :         event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
    2252           0 :         evcb->evcb_flags |= EVLIST_FINALIZING;
    2253           0 : }
    2254             : 
    2255             : void
    2256           0 : event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
    2257             : {
    2258           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    2259           0 :         event_callback_finalize_nolock_(base, flags, evcb, cb);
    2260           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    2261           0 : }
    2262             : 
    2263             : /** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
    2264             :  * callback will be invoked on *one of them*, after they have *all* been
    2265             :  * finalized. */
    2266             : int
    2267           0 : event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
    2268             : {
    2269           0 :         int n_pending = 0, i;
    2270             : 
    2271           0 :         if (base == NULL)
    2272           0 :                 base = current_base;
    2273             : 
    2274           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    2275             : 
    2276           0 :         event_debug(("%s: %d events finalizing", __func__, n_cbs));
    2277             : 
    2278             :         /* At most one can be currently executing; the rest we just
    2279             :          * cancel... But we always make sure that the finalize callback
    2280             :          * runs. */
    2281           0 :         for (i = 0; i < n_cbs; ++i) {
    2282           0 :                 struct event_callback *evcb = evcbs[i];
    2283           0 :                 if (evcb == base->current_event) {
    2284           0 :                         event_callback_finalize_nolock_(base, 0, evcb, cb);
    2285           0 :                         ++n_pending;
    2286             :                 } else {
    2287           0 :                         event_callback_cancel_nolock_(base, evcb, 0);
    2288             :                 }
    2289             :         }
    2290             : 
    2291           0 :         if (n_pending == 0) {
    2292             :                 /* Just do the first one. */
    2293           0 :                 event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
    2294             :         }
    2295             : 
    2296           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    2297           0 :         return 0;
    2298             : }
    2299             : 
    2300             : /*
    2301             :  * Set's the priority of an event - if an event is already scheduled
    2302             :  * changing the priority is going to fail.
    2303             :  */
    2304             : 
    2305             : int
    2306           3 : event_priority_set(struct event *ev, int pri)
    2307             : {
    2308           3 :         event_debug_assert_is_setup_(ev);
    2309             : 
    2310           3 :         if (ev->ev_flags & EVLIST_ACTIVE)
    2311           0 :                 return (-1);
    2312           3 :         if (pri < 0 || pri >= ev->ev_base->nactivequeues)
    2313           3 :                 return (-1);
    2314             : 
    2315           0 :         ev->ev_pri = pri;
    2316             : 
    2317           0 :         return (0);
    2318             : }
    2319             : 
    2320             : /*
    2321             :  * Checks if a specific event is pending or scheduled.
    2322             :  */
    2323             : 
    2324             : int
    2325           0 : event_pending(const struct event *ev, short event, struct timeval *tv)
    2326             : {
    2327           0 :         int flags = 0;
    2328             : 
    2329           0 :         if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
    2330           0 :                 event_warnx("%s: event has no event_base set.", __func__);
    2331           0 :                 return 0;
    2332             :         }
    2333             : 
    2334           0 :         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
    2335           0 :         event_debug_assert_is_setup_(ev);
    2336             : 
    2337           0 :         if (ev->ev_flags & EVLIST_INSERTED)
    2338           0 :                 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
    2339           0 :         if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
    2340           0 :                 flags |= ev->ev_res;
    2341           0 :         if (ev->ev_flags & EVLIST_TIMEOUT)
    2342           0 :                 flags |= EV_TIMEOUT;
    2343             : 
    2344           0 :         event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
    2345             : 
    2346             :         /* See if there is a timeout that we should report */
    2347           0 :         if (tv != NULL && (flags & event & EV_TIMEOUT)) {
    2348           0 :                 struct timeval tmp = ev->ev_timeout;
    2349           0 :                 tmp.tv_usec &= MICROSECONDS_MASK;
    2350             :                 /* correctly remamp to real time */
    2351           0 :                 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
    2352             :         }
    2353             : 
    2354           0 :         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
    2355             : 
    2356           0 :         return (flags & event);
    2357             : }
    2358             : 
    2359             : int
    2360           0 : event_initialized(const struct event *ev)
    2361             : {
    2362           0 :         if (!(ev->ev_flags & EVLIST_INIT))
    2363           0 :                 return 0;
    2364             : 
    2365           0 :         return 1;
    2366             : }
    2367             : 
    2368             : void
    2369           0 : event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
    2370             : {
    2371           0 :         event_debug_assert_is_setup_(event);
    2372             : 
    2373           0 :         if (base_out)
    2374           0 :                 *base_out = event->ev_base;
    2375           0 :         if (fd_out)
    2376           0 :                 *fd_out = event->ev_fd;
    2377           0 :         if (events_out)
    2378           0 :                 *events_out = event->ev_events;
    2379           0 :         if (callback_out)
    2380           0 :                 *callback_out = event->ev_callback;
    2381           0 :         if (arg_out)
    2382           0 :                 *arg_out = event->ev_arg;
    2383           0 : }
    2384             : 
    2385             : size_t
    2386           0 : event_get_struct_event_size(void)
    2387             : {
    2388           0 :         return sizeof(struct event);
    2389             : }
    2390             : 
    2391             : evutil_socket_t
    2392           3 : event_get_fd(const struct event *ev)
    2393             : {
    2394           3 :         event_debug_assert_is_setup_(ev);
    2395           3 :         return ev->ev_fd;
    2396             : }
    2397             : 
    2398             : struct event_base *
    2399           0 : event_get_base(const struct event *ev)
    2400             : {
    2401           0 :         event_debug_assert_is_setup_(ev);
    2402           0 :         return ev->ev_base;
    2403             : }
    2404             : 
    2405             : short
    2406           0 : event_get_events(const struct event *ev)
    2407             : {
    2408           0 :         event_debug_assert_is_setup_(ev);
    2409           0 :         return ev->ev_events;
    2410             : }
    2411             : 
    2412             : event_callback_fn
    2413           0 : event_get_callback(const struct event *ev)
    2414             : {
    2415           0 :         event_debug_assert_is_setup_(ev);
    2416           0 :         return ev->ev_callback;
    2417             : }
    2418             : 
    2419             : void *
    2420           0 : event_get_callback_arg(const struct event *ev)
    2421             : {
    2422           0 :         event_debug_assert_is_setup_(ev);
    2423           0 :         return ev->ev_arg;
    2424             : }
    2425             : 
    2426             : int
    2427           0 : event_get_priority(const struct event *ev)
    2428             : {
    2429           0 :         event_debug_assert_is_setup_(ev);
    2430           0 :         return ev->ev_pri;
    2431             : }
    2432             : 
    2433             : int
    2434          40 : event_add(struct event *ev, const struct timeval *tv)
    2435             : {
    2436             :         int res;
    2437             : 
    2438          40 :         if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
    2439           0 :                 event_warnx("%s: event has no event_base set.", __func__);
    2440           0 :                 return -1;
    2441             :         }
    2442             : 
    2443          40 :         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
    2444             : 
    2445          40 :         res = event_add_nolock_(ev, tv, 0);
    2446             : 
    2447          40 :         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
    2448             : 
    2449          40 :         return (res);
    2450             : }
    2451             : 
    2452             : /* Helper callback: wake an event_base from another thread.  This version
    2453             :  * works by writing a byte to one end of a socketpair, so that the event_base
    2454             :  * listening on the other end will wake up as the corresponding event
    2455             :  * triggers */
    2456             : static int
    2457           0 : evthread_notify_base_default(struct event_base *base)
    2458             : {
    2459             :         char buf[1];
    2460             :         int r;
    2461           0 :         buf[0] = (char) 0;
    2462             : #ifdef _WIN32
    2463             :         r = send(base->th_notify_fd[1], buf, 1, 0);
    2464             : #else
    2465           0 :         r = write(base->th_notify_fd[1], buf, 1);
    2466             : #endif
    2467           0 :         return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
    2468             : }
    2469             : 
    2470             : #ifdef EVENT__HAVE_EVENTFD
    2471             : /* Helper callback: wake an event_base from another thread.  This version
    2472             :  * assumes that you have a working eventfd() implementation. */
    2473             : static int
    2474           0 : evthread_notify_base_eventfd(struct event_base *base)
    2475             : {
    2476           0 :         ev_uint64_t msg = 1;
    2477             :         int r;
    2478             :         do {
    2479           0 :                 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
    2480           0 :         } while (r < 0 && errno == EAGAIN);
    2481             : 
    2482           0 :         return (r < 0) ? -1 : 0;
    2483             : }
    2484             : #endif
    2485             : 
    2486             : 
    2487             : /** Tell the thread currently running the event_loop for base (if any) that it
    2488             :  * needs to stop waiting in its dispatch function (if it is) and process all
    2489             :  * active callbacks. */
    2490             : static int
    2491           0 : evthread_notify_base(struct event_base *base)
    2492             : {
    2493           0 :         EVENT_BASE_ASSERT_LOCKED(base);
    2494           0 :         if (!base->th_notify_fn)
    2495           0 :                 return -1;
    2496           0 :         if (base->is_notify_pending)
    2497           0 :                 return 0;
    2498           0 :         base->is_notify_pending = 1;
    2499           0 :         return base->th_notify_fn(base);
    2500             : }
    2501             : 
    2502             : /* Implementation function to remove a timeout on a currently pending event.
    2503             :  */
    2504             : int
    2505           0 : event_remove_timer_nolock_(struct event *ev)
    2506             : {
    2507           0 :         struct event_base *base = ev->ev_base;
    2508             : 
    2509           0 :         EVENT_BASE_ASSERT_LOCKED(base);
    2510           0 :         event_debug_assert_is_setup_(ev);
    2511             : 
    2512           0 :         event_debug(("event_remove_timer_nolock: event: %p", ev));
    2513             : 
    2514             :         /* If it's not pending on a timeout, we don't need to do anything. */
    2515           0 :         if (ev->ev_flags & EVLIST_TIMEOUT) {
    2516           0 :                 event_queue_remove_timeout(base, ev);
    2517           0 :                 evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
    2518             :         }
    2519             : 
    2520           0 :         return (0);
    2521             : }
    2522             : 
    2523             : int
    2524           0 : event_remove_timer(struct event *ev)
    2525             : {
    2526             :         int res;
    2527             : 
    2528           0 :         if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
    2529           0 :                 event_warnx("%s: event has no event_base set.", __func__);
    2530           0 :                 return -1;
    2531             :         }
    2532             : 
    2533           0 :         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
    2534             : 
    2535           0 :         res = event_remove_timer_nolock_(ev);
    2536             : 
    2537           0 :         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
    2538             : 
    2539           0 :         return (res);
    2540             : }
    2541             : 
    2542             : /* Implementation function to add an event.  Works just like event_add,
    2543             :  * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
    2544             :  * we treat tv as an absolute time, not as an interval to add to the current
    2545             :  * time */
    2546             : int
    2547          40 : event_add_nolock_(struct event *ev, const struct timeval *tv,
    2548             :     int tv_is_absolute)
    2549             : {
    2550          40 :         struct event_base *base = ev->ev_base;
    2551          40 :         int res = 0;
    2552          40 :         int notify = 0;
    2553             : 
    2554          40 :         EVENT_BASE_ASSERT_LOCKED(base);
    2555          40 :         event_debug_assert_is_setup_(ev);
    2556             : 
    2557          40 :         event_debug((
    2558             :                  "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
    2559             :                  ev,
    2560             :                  EV_SOCK_ARG(ev->ev_fd),
    2561             :                  ev->ev_events & EV_READ ? "EV_READ " : " ",
    2562             :                  ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
    2563             :                  ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
    2564             :                  tv ? "EV_TIMEOUT " : " ",
    2565             :                  ev->ev_callback));
    2566             : 
    2567          40 :         EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
    2568             : 
    2569          40 :         if (ev->ev_flags & EVLIST_FINALIZING) {
    2570             :                 /* XXXX debug */
    2571           0 :                 return (-1);
    2572             :         }
    2573             : 
    2574             :         /*
    2575             :          * prepare for timeout insertion further below, if we get a
    2576             :          * failure on any step, we should not change any state.
    2577             :          */
    2578          40 :         if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
    2579           0 :                 if (min_heap_reserve_(&base->timeheap,
    2580           0 :                         1 + min_heap_size_(&base->timeheap)) == -1)
    2581           0 :                         return (-1);  /* ENOMEM == errno */
    2582             :         }
    2583             : 
    2584             :         /* If the main thread is currently executing a signal event's
    2585             :          * callback, and we are not the main thread, then we want to wait
    2586             :          * until the callback is done before we mess with the event, or else
    2587             :          * we can race on ev_ncalls and ev_pncalls below. */
    2588             : #ifndef EVENT__DISABLE_THREAD_SUPPORT
    2589          43 :         if (base->current_event == event_to_event_callback(ev) &&
    2590           3 :             (ev->ev_events & EV_SIGNAL)
    2591           0 :             && !EVBASE_IN_THREAD(base)) {
    2592           0 :                 ++base->current_event_waiters;
    2593           0 :                 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
    2594             :         }
    2595             : #endif
    2596             : 
    2597          80 :         if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
    2598          40 :             !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
    2599          40 :                 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
    2600          40 :                         res = evmap_io_add_(base, ev->ev_fd, ev);
    2601           0 :                 else if (ev->ev_events & EV_SIGNAL)
    2602           0 :                         res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
    2603          40 :                 if (res != -1)
    2604          40 :                         event_queue_insert_inserted(base, ev);
    2605          40 :                 if (res == 1) {
    2606             :                         /* evmap says we need to notify the main thread. */
    2607          40 :                         notify = 1;
    2608          40 :                         res = 0;
    2609             :                 }
    2610             :         }
    2611             : 
    2612             :         /*
    2613             :          * we should change the timeout state only if the previous event
    2614             :          * addition succeeded.
    2615             :          */
    2616          40 :         if (res != -1 && tv != NULL) {
    2617             :                 struct timeval now;
    2618             :                 int common_timeout;
    2619             : #ifdef USE_REINSERT_TIMEOUT
    2620             :                 int was_common;
    2621             :                 int old_timeout_idx;
    2622             : #endif
    2623             : 
    2624             :                 /*
    2625             :                  * for persistent timeout events, we remember the
    2626             :                  * timeout value and re-add the event.
    2627             :                  *
    2628             :                  * If tv_is_absolute, this was already set.
    2629             :                  */
    2630           0 :                 if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
    2631           0 :                         ev->ev_io_timeout = *tv;
    2632             : 
    2633             : #ifndef USE_REINSERT_TIMEOUT
    2634           0 :                 if (ev->ev_flags & EVLIST_TIMEOUT) {
    2635           0 :                         event_queue_remove_timeout(base, ev);
    2636             :                 }
    2637             : #endif
    2638             : 
    2639             :                 /* Check if it is active due to a timeout.  Rescheduling
    2640             :                  * this timeout before the callback can be executed
    2641             :                  * removes it from the active list. */
    2642           0 :                 if ((ev->ev_flags & EVLIST_ACTIVE) &&
    2643           0 :                     (ev->ev_res & EV_TIMEOUT)) {
    2644           0 :                         if (ev->ev_events & EV_SIGNAL) {
    2645             :                                 /* See if we are just active executing
    2646             :                                  * this event in a loop
    2647             :                                  */
    2648           0 :                                 if (ev->ev_ncalls && ev->ev_pncalls) {
    2649             :                                         /* Abort loop */
    2650           0 :                                         *ev->ev_pncalls = 0;
    2651             :                                 }
    2652             :                         }
    2653             : 
    2654           0 :                         event_queue_remove_active(base, event_to_event_callback(ev));
    2655             :                 }
    2656             : 
    2657           0 :                 gettime(base, &now);
    2658             : 
    2659           0 :                 common_timeout = is_common_timeout(tv, base);
    2660             : #ifdef USE_REINSERT_TIMEOUT
    2661             :                 was_common = is_common_timeout(&ev->ev_timeout, base);
    2662             :                 old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
    2663             : #endif
    2664             : 
    2665           0 :                 if (tv_is_absolute) {
    2666           0 :                         ev->ev_timeout = *tv;
    2667           0 :                 } else if (common_timeout) {
    2668           0 :                         struct timeval tmp = *tv;
    2669           0 :                         tmp.tv_usec &= MICROSECONDS_MASK;
    2670           0 :                         evutil_timeradd(&now, &tmp, &ev->ev_timeout);
    2671           0 :                         ev->ev_timeout.tv_usec |=
    2672           0 :                             (tv->tv_usec & ~MICROSECONDS_MASK);
    2673             :                 } else {
    2674           0 :                         evutil_timeradd(&now, tv, &ev->ev_timeout);
    2675             :                 }
    2676             : 
    2677           0 :                 event_debug((
    2678             :                          "event_add: event %p, timeout in %d seconds %d useconds, call %p",
    2679             :                          ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
    2680             : 
    2681             : #ifdef USE_REINSERT_TIMEOUT
    2682             :                 event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
    2683             : #else
    2684           0 :                 event_queue_insert_timeout(base, ev);
    2685             : #endif
    2686             : 
    2687           0 :                 if (common_timeout) {
    2688           0 :                         struct common_timeout_list *ctl =
    2689           0 :                             get_common_timeout_list(base, &ev->ev_timeout);
    2690           0 :                         if (ev == TAILQ_FIRST(&ctl->events)) {
    2691           0 :                                 common_timeout_schedule(ctl, &now, ev);
    2692             :                         }
    2693             :                 } else {
    2694           0 :                         struct event* top = NULL;
    2695             :                         /* See if the earliest timeout is now earlier than it
    2696             :                          * was before: if so, we will need to tell the main
    2697             :                          * thread to wake up earlier than it would otherwise.
    2698             :                          * We double check the timeout of the top element to
    2699             :                          * handle time distortions due to system suspension.
    2700             :                          */
    2701           0 :                         if (min_heap_elt_is_top_(ev))
    2702           0 :                                 notify = 1;
    2703           0 :                         else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
    2704           0 :                                          evutil_timercmp(&top->ev_timeout, &now, <))
    2705           0 :                                 notify = 1;
    2706             :                 }
    2707             :         }
    2708             : 
    2709             :         /* if we are not in the right thread, we need to wake up the loop */
    2710          40 :         if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
    2711           0 :                 evthread_notify_base(base);
    2712             : 
    2713          40 :         event_debug_note_add_(ev);
    2714             : 
    2715          40 :         return (res);
    2716             : }
    2717             : 
    2718             : static int
    2719           3 : event_del_(struct event *ev, int blocking)
    2720             : {
    2721             :         int res;
    2722             : 
    2723           3 :         if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
    2724           0 :                 event_warnx("%s: event has no event_base set.", __func__);
    2725           0 :                 return -1;
    2726             :         }
    2727             : 
    2728           3 :         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
    2729             : 
    2730           3 :         res = event_del_nolock_(ev, blocking);
    2731             : 
    2732           3 :         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
    2733             : 
    2734           3 :         return (res);
    2735             : }
    2736             : 
    2737             : int
    2738           3 : event_del(struct event *ev)
    2739             : {
    2740           3 :         return event_del_(ev, EVENT_DEL_AUTOBLOCK);
    2741             : }
    2742             : 
    2743             : int
    2744           0 : event_del_block(struct event *ev)
    2745             : {
    2746           0 :         return event_del_(ev, EVENT_DEL_BLOCK);
    2747             : }
    2748             : 
    2749             : int
    2750           0 : event_del_noblock(struct event *ev)
    2751             : {
    2752           0 :         return event_del_(ev, EVENT_DEL_NOBLOCK);
    2753             : }
    2754             : 
    2755             : /** Helper for event_del: always called with th_base_lock held.
    2756             :  *
    2757             :  * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
    2758             :  * EVEN_IF_FINALIZING} values. See those for more information.
    2759             :  */
    2760             : int
    2761           9 : event_del_nolock_(struct event *ev, int blocking)
    2762             : {
    2763             :         struct event_base *base;
    2764           9 :         int res = 0, notify = 0;
    2765             : 
    2766           9 :         event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
    2767             :                 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
    2768             : 
    2769             :         /* An event without a base has not been added */
    2770           9 :         if (ev->ev_base == NULL)
    2771           0 :                 return (-1);
    2772             : 
    2773           9 :         EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
    2774             : 
    2775           9 :         if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
    2776           9 :                 if (ev->ev_flags & EVLIST_FINALIZING) {
    2777             :                         /* XXXX Debug */
    2778           0 :                         return 0;
    2779             :                 }
    2780             :         }
    2781             : 
    2782             :         /* If the main thread is currently executing this event's callback,
    2783             :          * and we are not the main thread, then we want to wait until the
    2784             :          * callback is done before we start removing the event.  That way,
    2785             :          * when this function returns, it will be safe to free the
    2786             :          * user-supplied argument. */
    2787           9 :         base = ev->ev_base;
    2788             : #ifndef EVENT__DISABLE_THREAD_SUPPORT
    2789          12 :         if (blocking != EVENT_DEL_NOBLOCK &&
    2790           6 :             base->current_event == event_to_event_callback(ev) &&
    2791           3 :             !EVBASE_IN_THREAD(base) &&
    2792           0 :             (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
    2793           0 :                 ++base->current_event_waiters;
    2794           0 :                 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
    2795             :         }
    2796             : #endif
    2797             : 
    2798           9 :         EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
    2799             : 
    2800             :         /* See if we are just active executing this event in a loop */
    2801           9 :         if (ev->ev_events & EV_SIGNAL) {
    2802           0 :                 if (ev->ev_ncalls && ev->ev_pncalls) {
    2803             :                         /* Abort loop */
    2804           0 :                         *ev->ev_pncalls = 0;
    2805             :                 }
    2806             :         }
    2807             : 
    2808           9 :         if (ev->ev_flags & EVLIST_TIMEOUT) {
    2809             :                 /* NOTE: We never need to notify the main thread because of a
    2810             :                  * deleted timeout event: all that could happen if we don't is
    2811             :                  * that the dispatch loop might wake up too early.  But the
    2812             :                  * point of notifying the main thread _is_ to wake up the
    2813             :                  * dispatch loop early anyway, so we wouldn't gain anything by
    2814             :                  * doing it.
    2815             :                  */
    2816           0 :                 event_queue_remove_timeout(base, ev);
    2817             :         }
    2818             : 
    2819           9 :         if (ev->ev_flags & EVLIST_ACTIVE)
    2820           6 :                 event_queue_remove_active(base, event_to_event_callback(ev));
    2821           3 :         else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
    2822           0 :                 event_queue_remove_active_later(base, event_to_event_callback(ev));
    2823             : 
    2824           9 :         if (ev->ev_flags & EVLIST_INSERTED) {
    2825           6 :                 event_queue_remove_inserted(base, ev);
    2826           6 :                 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
    2827           6 :                         res = evmap_io_del_(base, ev->ev_fd, ev);
    2828             :                 else
    2829           0 :                         res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
    2830           6 :                 if (res == 1) {
    2831             :                         /* evmap says we need to notify the main thread. */
    2832           6 :                         notify = 1;
    2833           6 :                         res = 0;
    2834             :                 }
    2835             :         }
    2836             : 
    2837             :         /* if we are not in the right thread, we need to wake up the loop */
    2838           9 :         if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
    2839           0 :                 evthread_notify_base(base);
    2840             : 
    2841           9 :         event_debug_note_del_(ev);
    2842             : 
    2843           9 :         return (res);
    2844             : }
    2845             : 
    2846             : void
    2847           0 : event_active(struct event *ev, int res, short ncalls)
    2848             : {
    2849           0 :         if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
    2850           0 :                 event_warnx("%s: event has no event_base set.", __func__);
    2851           0 :                 return;
    2852             :         }
    2853             : 
    2854           0 :         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
    2855             : 
    2856           0 :         event_debug_assert_is_setup_(ev);
    2857             : 
    2858           0 :         event_active_nolock_(ev, res, ncalls);
    2859             : 
    2860           0 :         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
    2861             : }
    2862             : 
    2863             : 
    2864             : void
    2865         663 : event_active_nolock_(struct event *ev, int res, short ncalls)
    2866             : {
    2867             :         struct event_base *base;
    2868             : 
    2869         663 :         event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
    2870             :                 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
    2871             : 
    2872         663 :         base = ev->ev_base;
    2873         663 :         EVENT_BASE_ASSERT_LOCKED(base);
    2874             : 
    2875         663 :         if (ev->ev_flags & EVLIST_FINALIZING) {
    2876             :                 /* XXXX debug */
    2877           0 :                 return;
    2878             :         }
    2879             : 
    2880         663 :         switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
    2881             :         default:
    2882             :         case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
    2883           0 :                 EVUTIL_ASSERT(0);
    2884             :                 break;
    2885             :         case EVLIST_ACTIVE:
    2886             :                 /* We get different kinds of events, add them together */
    2887           8 :                 ev->ev_res |= res;
    2888           8 :                 return;
    2889             :         case EVLIST_ACTIVE_LATER:
    2890           0 :                 ev->ev_res |= res;
    2891           0 :                 break;
    2892             :         case 0:
    2893         655 :                 ev->ev_res = res;
    2894         655 :                 break;
    2895             :         }
    2896             : 
    2897         655 :         if (ev->ev_pri < base->event_running_priority)
    2898           0 :                 base->event_continue = 1;
    2899             : 
    2900         655 :         if (ev->ev_events & EV_SIGNAL) {
    2901             : #ifndef EVENT__DISABLE_THREAD_SUPPORT
    2902           0 :                 if (base->current_event == event_to_event_callback(ev) &&
    2903           0 :                     !EVBASE_IN_THREAD(base)) {
    2904           0 :                         ++base->current_event_waiters;
    2905           0 :                         EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
    2906             :                 }
    2907             : #endif
    2908           0 :                 ev->ev_ncalls = ncalls;
    2909           0 :                 ev->ev_pncalls = NULL;
    2910             :         }
    2911             : 
    2912         655 :         event_callback_activate_nolock_(base, event_to_event_callback(ev));
    2913             : }
    2914             : 
    2915             : void
    2916           0 : event_active_later_(struct event *ev, int res)
    2917             : {
    2918           0 :         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
    2919           0 :         event_active_later_nolock_(ev, res);
    2920           0 :         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
    2921           0 : }
    2922             : 
    2923             : void
    2924           0 : event_active_later_nolock_(struct event *ev, int res)
    2925             : {
    2926           0 :         struct event_base *base = ev->ev_base;
    2927           0 :         EVENT_BASE_ASSERT_LOCKED(base);
    2928             : 
    2929           0 :         if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
    2930             :                 /* We get different kinds of events, add them together */
    2931           0 :                 ev->ev_res |= res;
    2932           0 :                 return;
    2933             :         }
    2934             : 
    2935           0 :         ev->ev_res = res;
    2936             : 
    2937           0 :         event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
    2938             : }
    2939             : 
    2940             : int
    2941           0 : event_callback_activate_(struct event_base *base,
    2942             :     struct event_callback *evcb)
    2943             : {
    2944             :         int r;
    2945           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    2946           0 :         r = event_callback_activate_nolock_(base, evcb);
    2947           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    2948           0 :         return r;
    2949             : }
    2950             : 
    2951             : int
    2952         655 : event_callback_activate_nolock_(struct event_base *base,
    2953             :     struct event_callback *evcb)
    2954             : {
    2955         655 :         int r = 1;
    2956             : 
    2957         655 :         if (evcb->evcb_flags & EVLIST_FINALIZING)
    2958           0 :                 return 0;
    2959             : 
    2960         655 :         switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
    2961             :         default:
    2962           0 :                 EVUTIL_ASSERT(0);
    2963             :         case EVLIST_ACTIVE_LATER:
    2964           0 :                 event_queue_remove_active_later(base, evcb);
    2965           0 :                 r = 0;
    2966           0 :                 break;
    2967             :         case EVLIST_ACTIVE:
    2968           0 :                 return 0;
    2969             :         case 0:
    2970         655 :                 break;
    2971             :         }
    2972             : 
    2973         655 :         event_queue_insert_active(base, evcb);
    2974             : 
    2975         655 :         if (EVBASE_NEED_NOTIFY(base))
    2976           0 :                 evthread_notify_base(base);
    2977             : 
    2978         655 :         return r;
    2979             : }
    2980             : 
    2981             : int
    2982           0 : event_callback_activate_later_nolock_(struct event_base *base,
    2983             :     struct event_callback *evcb)
    2984             : {
    2985           0 :         if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
    2986           0 :                 return 0;
    2987             : 
    2988           0 :         event_queue_insert_active_later(base, evcb);
    2989           0 :         if (EVBASE_NEED_NOTIFY(base))
    2990           0 :                 evthread_notify_base(base);
    2991           0 :         return 1;
    2992             : }
    2993             : 
    2994             : void
    2995           0 : event_callback_init_(struct event_base *base,
    2996             :     struct event_callback *cb)
    2997             : {
    2998           0 :         memset(cb, 0, sizeof(*cb));
    2999           0 :         cb->evcb_pri = base->nactivequeues - 1;
    3000           0 : }
    3001             : 
    3002             : int
    3003           0 : event_callback_cancel_(struct event_base *base,
    3004             :     struct event_callback *evcb)
    3005             : {
    3006             :         int r;
    3007           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    3008           0 :         r = event_callback_cancel_nolock_(base, evcb, 0);
    3009           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    3010           0 :         return r;
    3011             : }
    3012             : 
    3013             : int
    3014           0 : event_callback_cancel_nolock_(struct event_base *base,
    3015             :     struct event_callback *evcb, int even_if_finalizing)
    3016             : {
    3017           0 :         if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
    3018           0 :                 return 0;
    3019             : 
    3020           0 :         if (evcb->evcb_flags & EVLIST_INIT)
    3021           0 :                 return event_del_nolock_(event_callback_to_event(evcb),
    3022             :                     even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
    3023             : 
    3024           0 :         switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
    3025             :         default:
    3026             :         case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
    3027           0 :                 EVUTIL_ASSERT(0);
    3028             :                 break;
    3029             :         case EVLIST_ACTIVE:
    3030             :                 /* We get different kinds of events, add them together */
    3031           0 :                 event_queue_remove_active(base, evcb);
    3032           0 :                 return 0;
    3033             :         case EVLIST_ACTIVE_LATER:
    3034           0 :                 event_queue_remove_active_later(base, evcb);
    3035           0 :                 break;
    3036             :         case 0:
    3037           0 :                 break;
    3038             :         }
    3039             : 
    3040           0 :         return 0;
    3041             : }
    3042             : 
    3043             : void
    3044           0 : event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
    3045             : {
    3046           0 :         memset(cb, 0, sizeof(*cb));
    3047           0 :         cb->evcb_cb_union.evcb_selfcb = fn;
    3048           0 :         cb->evcb_arg = arg;
    3049           0 :         cb->evcb_pri = priority;
    3050           0 :         cb->evcb_closure = EV_CLOSURE_CB_SELF;
    3051           0 : }
    3052             : 
    3053             : void
    3054           0 : event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
    3055             : {
    3056           0 :         cb->evcb_pri = priority;
    3057           0 : }
    3058             : 
    3059             : void
    3060           0 : event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
    3061             : {
    3062           0 :         if (!base)
    3063           0 :                 base = current_base;
    3064           0 :         event_callback_cancel_(base, cb);
    3065           0 : }
    3066             : 
    3067             : #define MAX_DEFERREDS_QUEUED 32
    3068             : int
    3069           0 : event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
    3070             : {
    3071           0 :         int r = 1;
    3072           0 :         if (!base)
    3073           0 :                 base = current_base;
    3074           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    3075           0 :         if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
    3076           0 :                 r = event_callback_activate_later_nolock_(base, cb);
    3077             :         } else {
    3078           0 :                 r = event_callback_activate_nolock_(base, cb);
    3079           0 :                 if (r) {
    3080           0 :                         ++base->n_deferreds_queued;
    3081             :                 }
    3082             :         }
    3083           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    3084           0 :         return r;
    3085             : }
    3086             : 
    3087             : static int
    3088         648 : timeout_next(struct event_base *base, struct timeval **tv_p)
    3089             : {
    3090             :         /* Caller must hold th_base_lock */
    3091             :         struct timeval now;
    3092             :         struct event *ev;
    3093         648 :         struct timeval *tv = *tv_p;
    3094         648 :         int res = 0;
    3095             : 
    3096         648 :         ev = min_heap_top_(&base->timeheap);
    3097             : 
    3098         648 :         if (ev == NULL) {
    3099             :                 /* if no time-based events are active wait for I/O */
    3100         648 :                 *tv_p = NULL;
    3101         648 :                 goto out;
    3102             :         }
    3103             : 
    3104           0 :         if (gettime(base, &now) == -1) {
    3105           0 :                 res = -1;
    3106           0 :                 goto out;
    3107             :         }
    3108             : 
    3109           0 :         if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
    3110           0 :                 evutil_timerclear(tv);
    3111           0 :                 goto out;
    3112             :         }
    3113             : 
    3114           0 :         evutil_timersub(&ev->ev_timeout, &now, tv);
    3115             : 
    3116           0 :         EVUTIL_ASSERT(tv->tv_sec >= 0);
    3117           0 :         EVUTIL_ASSERT(tv->tv_usec >= 0);
    3118           0 :         event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
    3119             : 
    3120             : out:
    3121         648 :         return (res);
    3122             : }
    3123             : 
    3124             : /* Activate every event whose timeout has elapsed. */
    3125             : static void
    3126         651 : timeout_process(struct event_base *base)
    3127             : {
    3128             :         /* Caller must hold lock. */
    3129             :         struct timeval now;
    3130             :         struct event *ev;
    3131             : 
    3132         651 :         if (min_heap_empty_(&base->timeheap)) {
    3133         651 :                 return;
    3134             :         }
    3135             : 
    3136           0 :         gettime(base, &now);
    3137             : 
    3138           0 :         while ((ev = min_heap_top_(&base->timeheap))) {
    3139           0 :                 if (evutil_timercmp(&ev->ev_timeout, &now, >))
    3140           0 :                         break;
    3141             : 
    3142             :                 /* delete this event from the I/O queues */
    3143           0 :                 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
    3144             : 
    3145           0 :                 event_debug(("timeout_process: event: %p, call %p",
    3146             :                          ev, ev->ev_callback));
    3147           0 :                 event_active_nolock_(ev, EV_TIMEOUT, 1);
    3148             :         }
    3149             : }
    3150             : 
    3151             : #if (EVLIST_INTERNAL >> 4) != 1
    3152             : #error "Mismatch for value of EVLIST_INTERNAL"
    3153             : #endif
    3154             : 
    3155             : #ifndef MAX
    3156             : #define MAX(a,b) (((a)>(b))?(a):(b))
    3157             : #endif
    3158             : 
    3159             : #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
    3160             : 
    3161             : /* These are a fancy way to spell
    3162             :      if (flags & EVLIST_INTERNAL)
    3163             :          base->event_count--/++;
    3164             : */
    3165             : #define DECR_EVENT_COUNT(base,flags) \
    3166             :         ((base)->event_count -= (~((flags) >> 4) & 1))
    3167             : #define INCR_EVENT_COUNT(base,flags) do {                                       \
    3168             :         ((base)->event_count += (~((flags) >> 4) & 1));                            \
    3169             :         MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);            \
    3170             : } while (0)
    3171             : 
    3172             : static void
    3173           6 : event_queue_remove_inserted(struct event_base *base, struct event *ev)
    3174             : {
    3175           6 :         EVENT_BASE_ASSERT_LOCKED(base);
    3176           6 :         if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
    3177           0 :                 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
    3178             :                     ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
    3179             :                 return;
    3180             :         }
    3181           6 :         DECR_EVENT_COUNT(base, ev->ev_flags);
    3182           6 :         ev->ev_flags &= ~EVLIST_INSERTED;
    3183           6 : }
    3184             : static void
    3185         655 : event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
    3186             : {
    3187         655 :         EVENT_BASE_ASSERT_LOCKED(base);
    3188         655 :         if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
    3189           0 :                 event_errx(1, "%s: %p not on queue %x", __func__,
    3190             :                            evcb, EVLIST_ACTIVE);
    3191             :                 return;
    3192             :         }
    3193         655 :         DECR_EVENT_COUNT(base, evcb->evcb_flags);
    3194         655 :         evcb->evcb_flags &= ~EVLIST_ACTIVE;
    3195         655 :         base->event_count_active--;
    3196             : 
    3197         655 :         TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
    3198             :             evcb, evcb_active_next);
    3199         655 : }
    3200             : static void
    3201           0 : event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
    3202             : {
    3203           0 :         EVENT_BASE_ASSERT_LOCKED(base);
    3204           0 :         if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
    3205           0 :                 event_errx(1, "%s: %p not on queue %x", __func__,
    3206             :                            evcb, EVLIST_ACTIVE_LATER);
    3207             :                 return;
    3208             :         }
    3209           0 :         DECR_EVENT_COUNT(base, evcb->evcb_flags);
    3210           0 :         evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
    3211           0 :         base->event_count_active--;
    3212             : 
    3213           0 :         TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
    3214           0 : }
    3215             : static void
    3216           0 : event_queue_remove_timeout(struct event_base *base, struct event *ev)
    3217             : {
    3218           0 :         EVENT_BASE_ASSERT_LOCKED(base);
    3219           0 :         if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
    3220           0 :                 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
    3221             :                     ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
    3222             :                 return;
    3223             :         }
    3224           0 :         DECR_EVENT_COUNT(base, ev->ev_flags);
    3225           0 :         ev->ev_flags &= ~EVLIST_TIMEOUT;
    3226             : 
    3227           0 :         if (is_common_timeout(&ev->ev_timeout, base)) {
    3228           0 :                 struct common_timeout_list *ctl =
    3229           0 :                     get_common_timeout_list(base, &ev->ev_timeout);
    3230           0 :                 TAILQ_REMOVE(&ctl->events, ev,
    3231             :                     ev_timeout_pos.ev_next_with_common_timeout);
    3232             :         } else {
    3233           0 :                 min_heap_erase_(&base->timeheap, ev);
    3234             :         }
    3235           0 : }
    3236             : 
    3237             : #ifdef USE_REINSERT_TIMEOUT
    3238             : /* Remove and reinsert 'ev' into the timeout queue. */
    3239             : static void
    3240             : event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
    3241             :     int was_common, int is_common, int old_timeout_idx)
    3242             : {
    3243             :         struct common_timeout_list *ctl;
    3244             :         if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
    3245             :                 event_queue_insert_timeout(base, ev);
    3246             :                 return;
    3247             :         }
    3248             : 
    3249             :         switch ((was_common<<1) | is_common) {
    3250             :         case 3: /* Changing from one common timeout to another */
    3251             :                 ctl = base->common_timeout_queues[old_timeout_idx];
    3252             :                 TAILQ_REMOVE(&ctl->events, ev,
    3253             :                     ev_timeout_pos.ev_next_with_common_timeout);
    3254             :                 ctl = get_common_timeout_list(base, &ev->ev_timeout);
    3255             :                 insert_common_timeout_inorder(ctl, ev);
    3256             :                 break;
    3257             :         case 2: /* Was common; is no longer common */
    3258             :                 ctl = base->common_timeout_queues[old_timeout_idx];
    3259             :                 TAILQ_REMOVE(&ctl->events, ev,
    3260             :                     ev_timeout_pos.ev_next_with_common_timeout);
    3261             :                 min_heap_push_(&base->timeheap, ev);
    3262             :                 break;
    3263             :         case 1: /* Wasn't common; has become common. */
    3264             :                 min_heap_erase_(&base->timeheap, ev);
    3265             :                 ctl = get_common_timeout_list(base, &ev->ev_timeout);
    3266             :                 insert_common_timeout_inorder(ctl, ev);
    3267             :                 break;
    3268             :         case 0: /* was in heap; is still on heap. */
    3269             :                 min_heap_adjust_(&base->timeheap, ev);
    3270             :                 break;
    3271             :         default:
    3272             :                 EVUTIL_ASSERT(0); /* unreachable */
    3273             :                 break;
    3274             :         }
    3275             : }
    3276             : #endif
    3277             : 
    3278             : /* Add 'ev' to the common timeout list in 'ev'. */
    3279             : static void
    3280           0 : insert_common_timeout_inorder(struct common_timeout_list *ctl,
    3281             :     struct event *ev)
    3282             : {
    3283             :         struct event *e;
    3284             :         /* By all logic, we should just be able to append 'ev' to the end of
    3285             :          * ctl->events, since the timeout on each 'ev' is set to {the common
    3286             :          * timeout} + {the time when we add the event}, and so the events
    3287             :          * should arrive in order of their timeeouts.  But just in case
    3288             :          * there's some wacky threading issue going on, we do a search from
    3289             :          * the end of 'ev' to find the right insertion point.
    3290             :          */
    3291           0 :         TAILQ_FOREACH_REVERSE(e, &ctl->events,
    3292             :             event_list, ev_timeout_pos.ev_next_with_common_timeout) {
    3293             :                 /* This timercmp is a little sneaky, since both ev and e have
    3294             :                  * magic values in tv_usec.  Fortunately, they ought to have
    3295             :                  * the _same_ magic values in tv_usec.  Let's assert for that.
    3296             :                  */
    3297           0 :                 EVUTIL_ASSERT(
    3298             :                         is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
    3299           0 :                 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
    3300           0 :                         TAILQ_INSERT_AFTER(&ctl->events, e, ev,
    3301             :                             ev_timeout_pos.ev_next_with_common_timeout);
    3302           0 :                         return;
    3303             :                 }
    3304             :         }
    3305           0 :         TAILQ_INSERT_HEAD(&ctl->events, ev,
    3306             :             ev_timeout_pos.ev_next_with_common_timeout);
    3307             : }
    3308             : 
    3309             : static void
    3310          40 : event_queue_insert_inserted(struct event_base *base, struct event *ev)
    3311             : {
    3312          40 :         EVENT_BASE_ASSERT_LOCKED(base);
    3313             : 
    3314          40 :         if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
    3315           0 :                 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
    3316             :                     ev, EV_SOCK_ARG(ev->ev_fd));
    3317             :                 return;
    3318             :         }
    3319             : 
    3320          40 :         INCR_EVENT_COUNT(base, ev->ev_flags);
    3321             : 
    3322          40 :         ev->ev_flags |= EVLIST_INSERTED;
    3323          40 : }
    3324             : 
    3325             : static void
    3326         655 : event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
    3327             : {
    3328         655 :         EVENT_BASE_ASSERT_LOCKED(base);
    3329             : 
    3330         655 :         if (evcb->evcb_flags & EVLIST_ACTIVE) {
    3331             :                 /* Double insertion is possible for active events */
    3332           0 :                 return;
    3333             :         }
    3334             : 
    3335         655 :         INCR_EVENT_COUNT(base, evcb->evcb_flags);
    3336             : 
    3337         655 :         evcb->evcb_flags |= EVLIST_ACTIVE;
    3338             : 
    3339         655 :         base->event_count_active++;
    3340         655 :         MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
    3341         655 :         EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
    3342         655 :         TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
    3343             :             evcb, evcb_active_next);
    3344             : }
    3345             : 
    3346             : static void
    3347           0 : event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
    3348             : {
    3349           0 :         EVENT_BASE_ASSERT_LOCKED(base);
    3350           0 :         if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
    3351             :                 /* Double insertion is possible */
    3352           0 :                 return;
    3353             :         }
    3354             : 
    3355           0 :         INCR_EVENT_COUNT(base, evcb->evcb_flags);
    3356           0 :         evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
    3357           0 :         base->event_count_active++;
    3358           0 :         MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
    3359           0 :         EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
    3360           0 :         TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
    3361             : }
    3362             : 
    3363             : static void
    3364           0 : event_queue_insert_timeout(struct event_base *base, struct event *ev)
    3365             : {
    3366           0 :         EVENT_BASE_ASSERT_LOCKED(base);
    3367             : 
    3368           0 :         if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
    3369           0 :                 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
    3370             :                     ev, EV_SOCK_ARG(ev->ev_fd));
    3371             :                 return;
    3372             :         }
    3373             : 
    3374           0 :         INCR_EVENT_COUNT(base, ev->ev_flags);
    3375             : 
    3376           0 :         ev->ev_flags |= EVLIST_TIMEOUT;
    3377             : 
    3378           0 :         if (is_common_timeout(&ev->ev_timeout, base)) {
    3379           0 :                 struct common_timeout_list *ctl =
    3380           0 :                     get_common_timeout_list(base, &ev->ev_timeout);
    3381           0 :                 insert_common_timeout_inorder(ctl, ev);
    3382             :         } else {
    3383           0 :                 min_heap_push_(&base->timeheap, ev);
    3384             :         }
    3385           0 : }
    3386             : 
    3387             : static void
    3388         654 : event_queue_make_later_events_active(struct event_base *base)
    3389             : {
    3390             :         struct event_callback *evcb;
    3391         654 :         EVENT_BASE_ASSERT_LOCKED(base);
    3392             : 
    3393        1308 :         while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
    3394           0 :                 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
    3395           0 :                 evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
    3396           0 :                 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
    3397           0 :                 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
    3398           0 :                 base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
    3399             :         }
    3400         654 : }
    3401             : 
    3402             : /* Functions for debugging */
    3403             : 
    3404             : const char *
    3405           0 : event_get_version(void)
    3406             : {
    3407           0 :         return (EVENT__VERSION);
    3408             : }
    3409             : 
    3410             : ev_uint32_t
    3411           0 : event_get_version_number(void)
    3412             : {
    3413           0 :         return (EVENT__NUMERIC_VERSION);
    3414             : }
    3415             : 
    3416             : /*
    3417             :  * No thread-safe interface needed - the information should be the same
    3418             :  * for all threads.
    3419             :  */
    3420             : 
    3421             : const char *
    3422           0 : event_get_method(void)
    3423             : {
    3424           0 :         return (current_base->evsel->name);
    3425             : }
    3426             : 
    3427             : #ifndef EVENT__DISABLE_MM_REPLACEMENT
    3428             : static void *(*mm_malloc_fn_)(size_t sz) = NULL;
    3429             : static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
    3430             : static void (*mm_free_fn_)(void *p) = NULL;
    3431             : 
    3432             : void *
    3433           0 : event_mm_malloc_(size_t sz)
    3434             : {
    3435           0 :         if (sz == 0)
    3436           0 :                 return NULL;
    3437             : 
    3438           0 :         if (mm_malloc_fn_)
    3439           0 :                 return mm_malloc_fn_(sz);
    3440             :         else
    3441           0 :                 return malloc(sz);
    3442             : }
    3443             : 
    3444             : void *
    3445          49 : event_mm_calloc_(size_t count, size_t size)
    3446             : {
    3447          49 :         if (count == 0 || size == 0)
    3448           0 :                 return NULL;
    3449             : 
    3450          49 :         if (mm_malloc_fn_) {
    3451           0 :                 size_t sz = count * size;
    3452           0 :                 void *p = NULL;
    3453           0 :                 if (count > EV_SIZE_MAX / size)
    3454           0 :                         goto error;
    3455           0 :                 p = mm_malloc_fn_(sz);
    3456           0 :                 if (p)
    3457           0 :                         return memset(p, 0, sz);
    3458             :         } else {
    3459          49 :                 void *p = calloc(count, size);
    3460             : #ifdef _WIN32
    3461             :                 /* Windows calloc doesn't reliably set ENOMEM */
    3462             :                 if (p == NULL)
    3463             :                         goto error;
    3464             : #endif
    3465          49 :                 return p;
    3466             :         }
    3467             : 
    3468             : error:
    3469           0 :         errno = ENOMEM;
    3470           0 :         return NULL;
    3471             : }
    3472             : 
    3473             : char *
    3474           0 : event_mm_strdup_(const char *str)
    3475             : {
    3476           0 :         if (!str) {
    3477           0 :                 errno = EINVAL;
    3478           0 :                 return NULL;
    3479             :         }
    3480             : 
    3481           0 :         if (mm_malloc_fn_) {
    3482           0 :                 size_t ln = strlen(str);
    3483           0 :                 void *p = NULL;
    3484           0 :                 if (ln == EV_SIZE_MAX)
    3485           0 :                         goto error;
    3486           0 :                 p = mm_malloc_fn_(ln+1);
    3487           0 :                 if (p)
    3488           0 :                         return memcpy(p, str, ln+1);
    3489             :         } else
    3490             : #ifdef _WIN32
    3491             :                 return _strdup(str);
    3492             : #else
    3493           0 :                 return strdup(str);
    3494             : #endif
    3495             : 
    3496             : error:
    3497           0 :         errno = ENOMEM;
    3498           0 :         return NULL;
    3499             : }
    3500             : 
    3501             : void *
    3502           6 : event_mm_realloc_(void *ptr, size_t sz)
    3503             : {
    3504           6 :         if (mm_realloc_fn_)
    3505           0 :                 return mm_realloc_fn_(ptr, sz);
    3506             :         else
    3507           6 :                 return realloc(ptr, sz);
    3508             : }
    3509             : 
    3510             : void
    3511           3 : event_mm_free_(void *ptr)
    3512             : {
    3513           3 :         if (mm_free_fn_)
    3514           0 :                 mm_free_fn_(ptr);
    3515             :         else
    3516           3 :                 free(ptr);
    3517           3 : }
    3518             : 
    3519             : void
    3520           0 : event_set_mem_functions(void *(*malloc_fn)(size_t sz),
    3521             :                         void *(*realloc_fn)(void *ptr, size_t sz),
    3522             :                         void (*free_fn)(void *ptr))
    3523             : {
    3524           0 :         mm_malloc_fn_ = malloc_fn;
    3525           0 :         mm_realloc_fn_ = realloc_fn;
    3526           0 :         mm_free_fn_ = free_fn;
    3527           0 : }
    3528             : #endif
    3529             : 
    3530             : #ifdef EVENT__HAVE_EVENTFD
    3531             : static void
    3532           0 : evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
    3533             : {
    3534             :         ev_uint64_t msg;
    3535             :         ev_ssize_t r;
    3536           0 :         struct event_base *base = arg;
    3537             : 
    3538           0 :         r = read(fd, (void*) &msg, sizeof(msg));
    3539           0 :         if (r<0 && errno != EAGAIN) {
    3540           0 :                 event_sock_warn(fd, "Error reading from eventfd");
    3541             :         }
    3542           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    3543           0 :         base->is_notify_pending = 0;
    3544           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    3545           0 : }
    3546             : #endif
    3547             : 
    3548             : static void
    3549           0 : evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
    3550             : {
    3551             :         unsigned char buf[1024];
    3552           0 :         struct event_base *base = arg;
    3553             : #ifdef _WIN32
    3554             :         while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
    3555             :                 ;
    3556             : #else
    3557           0 :         while (read(fd, (char*)buf, sizeof(buf)) > 0)
    3558             :                 ;
    3559             : #endif
    3560             : 
    3561           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    3562           0 :         base->is_notify_pending = 0;
    3563           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    3564           0 : }
    3565             : 
    3566             : int
    3567           0 : evthread_make_base_notifiable(struct event_base *base)
    3568             : {
    3569             :         int r;
    3570           0 :         if (!base)
    3571           0 :                 return -1;
    3572             : 
    3573           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    3574           0 :         r = evthread_make_base_notifiable_nolock_(base);
    3575           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    3576           0 :         return r;
    3577             : }
    3578             : 
    3579             : static int
    3580           0 : evthread_make_base_notifiable_nolock_(struct event_base *base)
    3581             : {
    3582             :         void (*cb)(evutil_socket_t, short, void *);
    3583             :         int (*notify)(struct event_base *);
    3584             : 
    3585           0 :         if (base->th_notify_fn != NULL) {
    3586             :                 /* The base is already notifiable: we're doing fine. */
    3587           0 :                 return 0;
    3588             :         }
    3589             : 
    3590             : #if defined(EVENT__HAVE_WORKING_KQUEUE)
    3591             :         if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
    3592             :                 base->th_notify_fn = event_kq_notify_base_;
    3593             :                 /* No need to add an event here; the backend can wake
    3594             :                  * itself up just fine. */
    3595             :                 return 0;
    3596             :         }
    3597             : #endif
    3598             : 
    3599             : #ifdef EVENT__HAVE_EVENTFD
    3600           0 :         base->th_notify_fd[0] = evutil_eventfd_(0,
    3601             :             EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
    3602           0 :         if (base->th_notify_fd[0] >= 0) {
    3603           0 :                 base->th_notify_fd[1] = -1;
    3604           0 :                 notify = evthread_notify_base_eventfd;
    3605           0 :                 cb = evthread_notify_drain_eventfd;
    3606             :         } else
    3607             : #endif
    3608           0 :         if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
    3609           0 :                 notify = evthread_notify_base_default;
    3610           0 :                 cb = evthread_notify_drain_default;
    3611             :         } else {
    3612           0 :                 return -1;
    3613             :         }
    3614             : 
    3615           0 :         base->th_notify_fn = notify;
    3616             : 
    3617             :         /* prepare an event that we can use for wakeup */
    3618           0 :         event_assign(&base->th_notify, base, base->th_notify_fd[0],
    3619             :                                  EV_READ|EV_PERSIST, cb, base);
    3620             : 
    3621             :         /* we need to mark this as internal event */
    3622           0 :         base->th_notify.ev_flags |= EVLIST_INTERNAL;
    3623           0 :         event_priority_set(&base->th_notify, 0);
    3624             : 
    3625           0 :         return event_add_nolock_(&base->th_notify, NULL, 0);
    3626             : }
    3627             : 
    3628             : int
    3629           0 : event_base_foreach_event_nolock_(struct event_base *base,
    3630             :     event_base_foreach_event_cb fn, void *arg)
    3631             : {
    3632             :         int r, i;
    3633             :         unsigned u;
    3634             :         struct event *ev;
    3635             : 
    3636             :         /* Start out with all the EVLIST_INSERTED events. */
    3637           0 :         if ((r = evmap_foreach_event_(base, fn, arg)))
    3638           0 :                 return r;
    3639             : 
    3640             :         /* Okay, now we deal with those events that have timeouts and are in
    3641             :          * the min-heap. */
    3642           0 :         for (u = 0; u < base->timeheap.n; ++u) {
    3643           0 :                 ev = base->timeheap.p[u];
    3644           0 :                 if (ev->ev_flags & EVLIST_INSERTED) {
    3645             :                         /* we already processed this one */
    3646           0 :                         continue;
    3647             :                 }
    3648           0 :                 if ((r = fn(base, ev, arg)))
    3649           0 :                         return r;
    3650             :         }
    3651             : 
    3652             :         /* Now for the events in one of the timeout queues.
    3653             :          * the min-heap. */
    3654           0 :         for (i = 0; i < base->n_common_timeouts; ++i) {
    3655           0 :                 struct common_timeout_list *ctl =
    3656           0 :                     base->common_timeout_queues[i];
    3657           0 :                 TAILQ_FOREACH(ev, &ctl->events,
    3658             :                     ev_timeout_pos.ev_next_with_common_timeout) {
    3659           0 :                         if (ev->ev_flags & EVLIST_INSERTED) {
    3660             :                                 /* we already processed this one */
    3661           0 :                                 continue;
    3662             :                         }
    3663           0 :                         if ((r = fn(base, ev, arg)))
    3664           0 :                                 return r;
    3665             :                 }
    3666             :         }
    3667             : 
    3668             :         /* Finally, we deal wit all the active events that we haven't touched
    3669             :          * yet. */
    3670           0 :         for (i = 0; i < base->nactivequeues; ++i) {
    3671             :                 struct event_callback *evcb;
    3672           0 :                 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
    3673           0 :                         if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
    3674             :                                 /* This isn't an event (evlist_init clear), or
    3675             :                                  * we already processed it. (inserted or
    3676             :                                  * timeout set */
    3677           0 :                                 continue;
    3678             :                         }
    3679           0 :                         ev = event_callback_to_event(evcb);
    3680           0 :                         if ((r = fn(base, ev, arg)))
    3681           0 :                                 return r;
    3682             :                 }
    3683             :         }
    3684             : 
    3685           0 :         return 0;
    3686             : }
    3687             : 
    3688             : /* Helper for event_base_dump_events: called on each event in the event base;
    3689             :  * dumps only the inserted events. */
    3690             : static int
    3691           0 : dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
    3692             : {
    3693           0 :         FILE *output = arg;
    3694           0 :         const char *gloss = (e->ev_events & EV_SIGNAL) ?
    3695             :             "sig" : "fd ";
    3696             : 
    3697           0 :         if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
    3698           0 :                 return 0;
    3699             : 
    3700           0 :         fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
    3701             :             (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
    3702           0 :             (e->ev_events&EV_READ)?" Read":"",
    3703           0 :             (e->ev_events&EV_WRITE)?" Write":"",
    3704           0 :             (e->ev_events&EV_CLOSED)?" EOF":"",
    3705           0 :             (e->ev_events&EV_SIGNAL)?" Signal":"",
    3706           0 :             (e->ev_events&EV_PERSIST)?" Persist":"",
    3707           0 :             (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
    3708           0 :         if (e->ev_flags & EVLIST_TIMEOUT) {
    3709             :                 struct timeval tv;
    3710           0 :                 tv.tv_sec = e->ev_timeout.tv_sec;
    3711           0 :                 tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
    3712           0 :                 evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
    3713           0 :                 fprintf(output, " Timeout=%ld.%06d",
    3714           0 :                     (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
    3715             :         }
    3716           0 :         fputc('\n', output);
    3717             : 
    3718           0 :         return 0;
    3719             : }
    3720             : 
    3721             : /* Helper for event_base_dump_events: called on each event in the event base;
    3722             :  * dumps only the active events. */
    3723             : static int
    3724           0 : dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
    3725             : {
    3726           0 :         FILE *output = arg;
    3727           0 :         const char *gloss = (e->ev_events & EV_SIGNAL) ?
    3728             :             "sig" : "fd ";
    3729             : 
    3730           0 :         if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
    3731           0 :                 return 0;
    3732             : 
    3733           0 :         fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
    3734           0 :             (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
    3735           0 :             (e->ev_res&EV_READ)?" Read":"",
    3736           0 :             (e->ev_res&EV_WRITE)?" Write":"",
    3737           0 :             (e->ev_res&EV_CLOSED)?" EOF":"",
    3738           0 :             (e->ev_res&EV_SIGNAL)?" Signal":"",
    3739           0 :             (e->ev_res&EV_TIMEOUT)?" Timeout":"",
    3740           0 :             (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
    3741           0 :             (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
    3742             : 
    3743           0 :         return 0;
    3744             : }
    3745             : 
    3746             : int
    3747           0 : event_base_foreach_event(struct event_base *base,
    3748             :     event_base_foreach_event_cb fn, void *arg)
    3749             : {
    3750             :         int r;
    3751           0 :         if ((!fn) || (!base)) {
    3752           0 :                 return -1;
    3753             :         }
    3754           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    3755           0 :         r = event_base_foreach_event_nolock_(base, fn, arg);
    3756           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    3757           0 :         return r;
    3758             : }
    3759             : 
    3760             : 
    3761             : void
    3762           0 : event_base_dump_events(struct event_base *base, FILE *output)
    3763             : {
    3764           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    3765           0 :         fprintf(output, "Inserted events:\n");
    3766           0 :         event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
    3767             : 
    3768           0 :         fprintf(output, "Active events:\n");
    3769           0 :         event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
    3770           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    3771           0 : }
    3772             : 
    3773             : void
    3774           0 : event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
    3775             : {
    3776           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    3777           0 :         evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
    3778           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    3779           0 : }
    3780             : 
    3781             : void
    3782           0 : event_base_active_by_signal(struct event_base *base, int sig)
    3783             : {
    3784           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    3785           0 :         evmap_signal_active_(base, sig, 1);
    3786           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    3787           0 : }
    3788             : 
    3789             : 
    3790             : void
    3791           0 : event_base_add_virtual_(struct event_base *base)
    3792             : {
    3793           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    3794           0 :         base->virtual_event_count++;
    3795           0 :         MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
    3796           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    3797           0 : }
    3798             : 
    3799             : void
    3800           0 : event_base_del_virtual_(struct event_base *base)
    3801             : {
    3802           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    3803           0 :         EVUTIL_ASSERT(base->virtual_event_count > 0);
    3804           0 :         base->virtual_event_count--;
    3805           0 :         if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
    3806           0 :                 evthread_notify_base(base);
    3807           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    3808           0 : }
    3809             : 
    3810             : static void
    3811           0 : event_free_debug_globals_locks(void)
    3812             : {
    3813             : #ifndef EVENT__DISABLE_THREAD_SUPPORT
    3814             : #ifndef EVENT__DISABLE_DEBUG_MODE
    3815           0 :         if (event_debug_map_lock_ != NULL) {
    3816           0 :                 EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
    3817           0 :                 event_debug_map_lock_ = NULL;
    3818           0 :                 evthreadimpl_disable_lock_debugging_();
    3819             :         }
    3820             : #endif /* EVENT__DISABLE_DEBUG_MODE */
    3821             : #endif /* EVENT__DISABLE_THREAD_SUPPORT */
    3822           0 :         return;
    3823             : }
    3824             : 
    3825             : static void
    3826           0 : event_free_debug_globals(void)
    3827             : {
    3828           0 :         event_free_debug_globals_locks();
    3829           0 : }
    3830             : 
    3831             : static void
    3832           0 : event_free_evsig_globals(void)
    3833             : {
    3834           0 :         evsig_free_globals_();
    3835           0 : }
    3836             : 
    3837             : static void
    3838           0 : event_free_evutil_globals(void)
    3839             : {
    3840           0 :         evutil_free_globals_();
    3841           0 : }
    3842             : 
    3843             : static void
    3844           0 : event_free_globals(void)
    3845             : {
    3846           0 :         event_free_debug_globals();
    3847           0 :         event_free_evsig_globals();
    3848           0 :         event_free_evutil_globals();
    3849           0 : }
    3850             : 
    3851             : void
    3852           0 : libevent_global_shutdown(void)
    3853             : {
    3854           0 :         event_disable_debug_mode();
    3855           0 :         event_free_globals();
    3856           0 : }
    3857             : 
    3858             : #ifndef EVENT__DISABLE_THREAD_SUPPORT
    3859             : int
    3860           0 : event_global_setup_locks_(const int enable_locks)
    3861             : {
    3862             : #ifndef EVENT__DISABLE_DEBUG_MODE
    3863           0 :         EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
    3864             : #endif
    3865           0 :         if (evsig_global_setup_locks_(enable_locks) < 0)
    3866           0 :                 return -1;
    3867           0 :         if (evutil_global_setup_locks_(enable_locks) < 0)
    3868           0 :                 return -1;
    3869           0 :         if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
    3870           0 :                 return -1;
    3871           0 :         return 0;
    3872             : }
    3873             : #endif
    3874             : 
    3875             : void
    3876           0 : event_base_assert_ok_(struct event_base *base)
    3877             : {
    3878           0 :         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    3879           0 :         event_base_assert_ok_nolock_(base);
    3880           0 :         EVBASE_RELEASE_LOCK(base, th_base_lock);
    3881           0 : }
    3882             : 
    3883             : void
    3884           0 : event_base_assert_ok_nolock_(struct event_base *base)
    3885             : {
    3886             :         int i;
    3887             :         int count;
    3888             : 
    3889             :         /* First do checks on the per-fd and per-signal lists */
    3890           0 :         evmap_check_integrity_(base);
    3891             : 
    3892             :         /* Check the heap property */
    3893           0 :         for (i = 1; i < (int)base->timeheap.n; ++i) {
    3894           0 :                 int parent = (i - 1) / 2;
    3895             :                 struct event *ev, *p_ev;
    3896           0 :                 ev = base->timeheap.p[i];
    3897           0 :                 p_ev = base->timeheap.p[parent];
    3898           0 :                 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
    3899           0 :                 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
    3900           0 :                 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
    3901             :         }
    3902             : 
    3903             :         /* Check that the common timeouts are fine */
    3904           0 :         for (i = 0; i < base->n_common_timeouts; ++i) {
    3905           0 :                 struct common_timeout_list *ctl = base->common_timeout_queues[i];
    3906           0 :                 struct event *last=NULL, *ev;
    3907             : 
    3908           0 :                 EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
    3909             : 
    3910           0 :                 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
    3911           0 :                         if (last)
    3912           0 :                                 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
    3913           0 :                         EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
    3914           0 :                         EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
    3915           0 :                         EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
    3916           0 :                         last = ev;
    3917             :                 }
    3918             :         }
    3919             : 
    3920             :         /* Check the active queues. */
    3921           0 :         count = 0;
    3922           0 :         for (i = 0; i < base->nactivequeues; ++i) {
    3923             :                 struct event_callback *evcb;
    3924           0 :                 EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
    3925           0 :                 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
    3926           0 :                         EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
    3927           0 :                         EVUTIL_ASSERT(evcb->evcb_pri == i);
    3928           0 :                         ++count;
    3929             :                 }
    3930             :         }
    3931             : 
    3932             :         {
    3933             :                 struct event_callback *evcb;
    3934           0 :                 TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
    3935           0 :                         EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
    3936           0 :                         ++count;
    3937             :                 }
    3938             :         }
    3939           0 :         EVUTIL_ASSERT(count == base->event_count_active);
    3940           0 : }

Generated by: LCOV version 1.13