1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
|
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef EVENT_INTERNAL_H_INCLUDED_
#define EVENT_INTERNAL_H_INCLUDED_
#ifdef __cplusplus
extern "C" {
#endif
#include "event2/event-config.h"
#include "evconfig-private.h"
#include <time.h>
#include <sys/queue.h>
#include "event2/event_struct.h"
#include "minheap-internal.h"
#include "evsignal-internal.h"
#include "mm-internal.h"
#include "defer-internal.h"
/* map union members back */
/* mutually exclusive */
#define ev_signal_next ev_.ev_signal.ev_signal_next
#define ev_io_next ev_.ev_io.ev_io_next
#define ev_io_timeout ev_.ev_io.ev_timeout
/* used only by signals */
#define ev_ncalls ev_.ev_signal.ev_ncalls
#define ev_pncalls ev_.ev_signal.ev_pncalls
/* Possible values for ev_closure in struct event. */
#define EV_CLOSURE_NONE 0
#define EV_CLOSURE_SIGNAL 1
#define EV_CLOSURE_PERSIST 2
/** Structure to define the backend of a given event_base. */
struct eventop {
/** The name of this backend. */
const char *name;
/** Function to set up an event_base to use this backend. It should
* create a new structure holding whatever information is needed to
* run the backend, and return it. The returned pointer will get
* stored by event_init into the event_base.evbase field. On failure,
* this function should return NULL. */
void *(*init)(struct event_base *);
/** Enable reading/writing on a given fd or signal. 'events' will be
* the events that we're trying to enable: one or more of EV_READ,
* EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that
* were enabled on this fd previously. 'fdinfo' will be a structure
* associated with the fd by the evmap; its size is defined by the
* fdinfo field below. It will be set to 0 the first time the fd is
* added. The function should return 0 on success and -1 on error.
*/
int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
/** As "add", except 'events' contains the events we mean to disable. */
int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
/** Function to implement the core of an event loop. It must see which
added events are ready, and cause event_active to be called for each
active event (usually via event_io_active or such). It should
return 0 on success and -1 on error.
*/
int (*dispatch)(struct event_base *, struct timeval *);
/** Function to clean up and free our data from the event_base. */
void (*dealloc)(struct event_base *);
/** Flag: set if we need to reinitialize the event base after we fork.
*/
int need_reinit;
/** Bit-array of supported event_method_features that this backend can
* provide. */
enum event_method_feature features;
/** Length of the extra information we should record for each fd that
has one or more active events. This information is recorded
as part of the evmap entry for each fd, and passed as an argument
to the add and del functions above.
*/
size_t fdinfo_len;
};
#ifdef _WIN32
/* If we're on win32, then file descriptors are not nice low densely packed
integers. Instead, they are pointer-like windows handles, and we want to
use a hashtable instead of an array to map fds to events.
*/
#define EVMAP_USE_HT
#endif
/* #define HT_CACHE_HASH_VALS */
#ifdef EVMAP_USE_HT
#define HT_NO_CACHE_HASH_VALUES
#include "ht-internal.h"
struct event_map_entry;
HT_HEAD(event_io_map, event_map_entry);
#else
#define event_io_map event_signal_map
#endif
/* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not
defined, this structure is also used as event_io_map, which maps fds to a
list of events.
*/
struct event_signal_map {
/* An array of evmap_io * or of evmap_signal *; empty entries are
* set to NULL. */
void **entries;
/* The number of entries available in entries */
int nentries;
};
/* A list of events waiting on a given 'common' timeout value. Ordinarily,
* events waiting for a timeout wait on a minheap. Sometimes, however, a
* queue can be faster.
**/
struct common_timeout_list {
/* List of events currently waiting in the queue. */
struct event_list events;
/* 'magic' timeval used to indicate the duration of events in this
* queue. */
struct timeval duration;
/* Event that triggers whenever one of the events in the queue is
* ready to activate */
struct event timeout_event;
/* The event_base that this timeout list is part of */
struct event_base *base;
};
/** Mask used to get the real tv_usec value from a common timeout. */
#define COMMON_TIMEOUT_MICROSECONDS_MASK 0x000fffff
struct event_change;
/* List of 'changes' since the last call to eventop.dispatch. Only maintained
* if the backend is using changesets. */
struct event_changelist {
struct event_change *changes;
int n_changes;
int changes_size;
};
#ifndef EVENT__DISABLE_DEBUG_MODE
/* Global internal flag: set to one if debug mode is on. */
extern int event_debug_mode_on_;
#define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_)
#else
#define EVENT_DEBUG_MODE_IS_ON() (0)
#endif
struct event_base {
/** Function pointers and other data to describe this event_base's
* backend. */
const struct eventop *evsel;
/** Pointer to backend-specific data. */
void *evbase;
/** List of changes to tell backend about at next dispatch. Only used
* by the O(1) backends. */
struct event_changelist changelist;
/** Function pointers used to describe the backend that this event_base
* uses for signals */
const struct eventop *evsigsel;
/** Data to implement the common signal handelr code. */
struct evsig_info sig;
/** Number of virtual events */
int virtual_event_count;
/** Number of total events added to this event_base */
int event_count;
/** Number of total events active in this event_base */
int event_count_active;
/** Set if we should terminate the loop once we're done processing
* events. */
int event_gotterm;
/** Set if we should terminate the loop immediately */
int event_break;
/** Set if we're running the event_base_loop function, to prevent
* reentrant invocation. */
int running_loop;
/* Active event management. */
/** An array of nactivequeues queues for active events (ones that
* have triggered, and whose callbacks need to be called). Low
* priority numbers are more important, and stall higher ones.
*/
struct event_list *activequeues;
/** The length of the activequeues array */
int nactivequeues;
/* common timeout logic */
/** An array of common_timeout_list* for all of the common timeout
* values we know. */
struct common_timeout_list **common_timeout_queues;
/** The number of entries used in common_timeout_queues */
int n_common_timeouts;
/** The total size of common_timeout_queues. */
int n_common_timeouts_allocated;
/** List of defered_cb that are active. We run these after the active
* events. */
struct deferred_cb_queue defer_queue;
/** Mapping from file descriptors to enabled (added) events */
struct event_io_map io;
/** Mapping from signal numbers to enabled (added) events. */
struct event_signal_map sigmap;
/** Stored timeval; used to detect when time is running backwards. */
struct timeval event_tv;
/** Priority queue of events with timeouts. */
struct min_heap timeheap;
/** Stored timeval: used to avoid calling gettimeofday/clock_gettime
* too often. */
struct timeval tv_cache;
#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
/** Difference between internal time (maybe from clock_gettime) and
* gettimeofday. */
struct timeval tv_clock_diff;
/** Second in which we last updated tv_clock_diff, in monotonic time. */
time_t last_updated_clock_diff;
#endif
#ifndef EVENT__DISABLE_THREAD_SUPPORT
/* threading support */
/** The thread currently running the event_loop for this base */
unsigned long th_owner_id;
/** A lock to prevent conflicting accesses to this event_base */
void *th_base_lock;
/** The event whose callback is executing right now */
struct event *current_event;
/** A condition that gets signalled when we're done processing an
* event with waiters on it. */
void *current_event_cond;
/** Number of threads blocking on current_event_cond. */
int current_event_waiters;
#endif
#ifdef _WIN32
/** IOCP support structure, if IOCP is enabled. */
struct event_iocp_port *iocp;
#endif
/** Flags that this base was configured with */
enum event_base_config_flag flags;
struct timeval max_dispatch_time;
int max_dispatch_callbacks;
int limit_callbacks_after_prio;
/* Notify main thread to wake up break, etc. */
/** True if the base already has a pending notify, and we don't need
* to add any more. */
int is_notify_pending;
/** A socketpair used by some th_notify functions to wake up the main
* thread. */
evutil_socket_t th_notify_fd[2];
/** An event used by some th_notify functions to wake up the main
* thread. */
struct event th_notify;
/** A function used to wake up the main thread from another thread. */
int (*th_notify_fn)(struct event_base *base);
};
struct event_config_entry {
TAILQ_ENTRY(event_config_entry) next;
const char *avoid_method;
};
/** Internal structure: describes the configuration we want for an event_base
* that we're about to allocate. */
struct event_config {
TAILQ_HEAD(event_configq, event_config_entry) entries;
int n_cpus_hint;
struct timeval max_dispatch_interval;
int max_dispatch_callbacks;
int limit_callbacks_after_prio;
enum event_method_feature require_features;
enum event_base_config_flag flags;
};
/* Internal use only: Functions that might be missing from <sys/queue.h> */
#if defined(EVENT__HAVE_SYS_QUEUE_H) && !defined(EVENT__HAVE_TAILQFOREACH)
#ifndef TAILQ_FIRST
#define TAILQ_FIRST(head) ((head)->tqh_first)
#endif
#ifndef TAILQ_END
#define TAILQ_END(head) NULL
#endif
#ifndef TAILQ_NEXT
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#endif
#ifndef TAILQ_FOREACH
#define TAILQ_FOREACH(var, head, field) \
for ((var) = TAILQ_FIRST(head); \
(var) != TAILQ_END(head); \
(var) = TAILQ_NEXT(var, field))
#endif
#ifndef TAILQ_INSERT_BEFORE
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (0)
#endif
#endif /* TAILQ_FOREACH */
#define N_ACTIVE_CALLBACKS(base) \
((base)->event_count_active + (base)->defer_queue.active_count)
int evsig_set_handler_(struct event_base *base, int evsignal,
void (*fn)(int));
int evsig_restore_handler_(struct event_base *base, int evsignal);
void event_active_nolock_(struct event *ev, int res, short count);
/* FIXME document. */
void event_base_add_virtual_(struct event_base *base);
void event_base_del_virtual_(struct event_base *base);
/** For debugging: unless assertions are disabled, verify the referential
integrity of the internal data structures of 'base'. This operation can
be expensive.
Returns on success; aborts on failure.
*/
void event_base_assert_ok_(struct event_base *base);
/* Callback type for event_base_foreach_event. */
typedef int (*event_base_foreach_event_cb)(struct event_base *base, struct event *, void *);
/* Helper function: Call 'fn' exactly once every inserted or active event in
* the event_base 'base'.
*
* If fn returns 0, continue on to the next event. Otherwise, return the same
* value that fn returned.
*
* Requires that 'base' be locked.
*/
int event_base_foreach_event_(struct event_base *base,
event_base_foreach_event_cb cb, void *arg);
#ifdef __cplusplus
}
#endif
#endif /* EVENT_INTERNAL_H_INCLUDED_ */
|