/bitcoin/depends/work/build/x86_64-pc-linux-gnu/libevent/2.1.12-stable-7656baec08e/bufferevent-internal.h
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2008-2012 Niels Provos and Nick Mathewson |
3 | | * |
4 | | * Redistribution and use in source and binary forms, with or without |
5 | | * modification, are permitted provided that the following conditions |
6 | | * are met: |
7 | | * 1. Redistributions of source code must retain the above copyright |
8 | | * notice, this list of conditions and the following disclaimer. |
9 | | * 2. Redistributions in binary form must reproduce the above copyright |
10 | | * notice, this list of conditions and the following disclaimer in the |
11 | | * documentation and/or other materials provided with the distribution. |
12 | | * 3. The name of the author may not be used to endorse or promote products |
13 | | * derived from this software without specific prior written permission. |
14 | | * |
15 | | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | | */ |
26 | | #ifndef BUFFEREVENT_INTERNAL_H_INCLUDED_ |
27 | | #define BUFFEREVENT_INTERNAL_H_INCLUDED_ |
28 | | |
29 | | #ifdef __cplusplus |
30 | | extern "C" { |
31 | | #endif |
32 | | |
33 | | #include "event2/event-config.h" |
34 | | #include "event2/event_struct.h" |
35 | | #include "evconfig-private.h" |
36 | | #include "event2/util.h" |
37 | | #include "defer-internal.h" |
38 | | #include "evthread-internal.h" |
39 | | #include "event2/thread.h" |
40 | | #include "ratelim-internal.h" |
41 | | #include "event2/bufferevent_struct.h" |
42 | | |
43 | | #include "ipv6-internal.h" |
44 | | #ifdef _WIN32 |
45 | | #include <ws2tcpip.h> |
46 | | #endif |
47 | | #ifdef EVENT__HAVE_NETINET_IN_H |
48 | | #include <netinet/in.h> |
49 | | #endif |
50 | | #ifdef EVENT__HAVE_NETINET_IN6_H |
51 | | #include <netinet/in6.h> |
52 | | #endif |
53 | | |
54 | | /* These flags are reasons that we might be declining to actually enable |
55 | | reading or writing on a bufferevent. |
56 | | */ |
57 | | |
58 | | /* On a all bufferevents, for reading: used when we have read up to the |
59 | | watermark value. |
60 | | |
61 | | On a filtering bufferevent, for writing: used when the underlying |
62 | | bufferevent's write buffer has been filled up to its watermark |
63 | | value. |
64 | | */ |
65 | 0 | #define BEV_SUSPEND_WM 0x01 |
66 | | /* On a base bufferevent: when we have emptied a bandwidth buckets */ |
67 | 0 | #define BEV_SUSPEND_BW 0x02 |
68 | | /* On a base bufferevent: when we have emptied the group's bandwidth bucket. */ |
69 | 0 | #define BEV_SUSPEND_BW_GROUP 0x04 |
70 | | /* On a socket bufferevent: can't do any operations while we're waiting for |
71 | | * name lookup to finish. */ |
72 | 0 | #define BEV_SUSPEND_LOOKUP 0x08 |
73 | | /* On a base bufferevent, for reading: used when a filter has choked this |
74 | | * (underlying) bufferevent because it has stopped reading from it. */ |
75 | 0 | #define BEV_SUSPEND_FILT_READ 0x10 |
76 | | |
77 | | typedef ev_uint16_t bufferevent_suspend_flags; |
78 | | |
79 | | struct bufferevent_rate_limit_group { |
80 | | /** List of all members in the group */ |
81 | | LIST_HEAD(rlim_group_member_list, bufferevent_private) members; |
82 | | /** Current limits for the group. */ |
83 | | struct ev_token_bucket rate_limit; |
84 | | struct ev_token_bucket_cfg rate_limit_cfg; |
85 | | |
86 | | /** True iff we don't want to read from any member of the group.until |
87 | | * the token bucket refills. */ |
88 | | unsigned read_suspended : 1; |
89 | | /** True iff we don't want to write from any member of the group.until |
90 | | * the token bucket refills. */ |
91 | | unsigned write_suspended : 1; |
92 | | /** True iff we were unable to suspend one of the bufferevents in the |
93 | | * group for reading the last time we tried, and we should try |
94 | | * again. */ |
95 | | unsigned pending_unsuspend_read : 1; |
96 | | /** True iff we were unable to suspend one of the bufferevents in the |
97 | | * group for writing the last time we tried, and we should try |
98 | | * again. */ |
99 | | unsigned pending_unsuspend_write : 1; |
100 | | |
101 | | /*@{*/ |
102 | | /** Total number of bytes read or written in this group since last |
103 | | * reset. */ |
104 | | ev_uint64_t total_read; |
105 | | ev_uint64_t total_written; |
106 | | /*@}*/ |
107 | | |
108 | | /** The number of bufferevents in the group. */ |
109 | | int n_members; |
110 | | |
111 | | /** The smallest number of bytes that any member of the group should |
112 | | * be limited to read or write at a time. */ |
113 | | ev_ssize_t min_share; |
114 | | ev_ssize_t configured_min_share; |
115 | | |
116 | | /** Timeout event that goes off once a tick, when the bucket is ready |
117 | | * to refill. */ |
118 | | struct event master_refill_event; |
119 | | |
120 | | /** Seed for weak random number generator. Protected by 'lock' */ |
121 | | struct evutil_weakrand_state weakrand_seed; |
122 | | |
123 | | /** Lock to protect the members of this group. This lock should nest |
124 | | * within every bufferevent lock: if you are holding this lock, do |
125 | | * not assume you can lock another bufferevent. */ |
126 | | void *lock; |
127 | | }; |
128 | | |
129 | | /** Fields for rate-limiting a single bufferevent. */ |
130 | | struct bufferevent_rate_limit { |
131 | | /* Linked-list elements for storing this bufferevent_private in a |
132 | | * group. |
133 | | * |
134 | | * Note that this field is supposed to be protected by the group |
135 | | * lock */ |
136 | | LIST_ENTRY(bufferevent_private) next_in_group; |
137 | | /** The rate-limiting group for this bufferevent, or NULL if it is |
138 | | * only rate-limited on its own. */ |
139 | | struct bufferevent_rate_limit_group *group; |
140 | | |
141 | | /* This bufferevent's current limits. */ |
142 | | struct ev_token_bucket limit; |
143 | | /* Pointer to the rate-limit configuration for this bufferevent. |
144 | | * Can be shared. XXX reference-count this? */ |
145 | | struct ev_token_bucket_cfg *cfg; |
146 | | |
147 | | /* Timeout event used when one this bufferevent's buckets are |
148 | | * empty. */ |
149 | | struct event refill_bucket_event; |
150 | | }; |
151 | | |
152 | | /** Parts of the bufferevent structure that are shared among all bufferevent |
153 | | * types, but not exposed in bufferevent_struct.h. */ |
154 | | struct bufferevent_private { |
155 | | /** The underlying bufferevent structure. */ |
156 | | struct bufferevent bev; |
157 | | |
158 | | /** Evbuffer callback to enforce watermarks on input. */ |
159 | | struct evbuffer_cb_entry *read_watermarks_cb; |
160 | | |
161 | | /** If set, we should free the lock when we free the bufferevent. */ |
162 | | unsigned own_lock : 1; |
163 | | |
164 | | /** Flag: set if we have deferred callbacks and a read callback is |
165 | | * pending. */ |
166 | | unsigned readcb_pending : 1; |
167 | | /** Flag: set if we have deferred callbacks and a write callback is |
168 | | * pending. */ |
169 | | unsigned writecb_pending : 1; |
170 | | /** Flag: set if we are currently busy connecting. */ |
171 | | unsigned connecting : 1; |
172 | | /** Flag: set if a connect failed prematurely; this is a hack for |
173 | | * getting around the bufferevent abstraction. */ |
174 | | unsigned connection_refused : 1; |
175 | | /** Set to the events pending if we have deferred callbacks and |
176 | | * an events callback is pending. */ |
177 | | short eventcb_pending; |
178 | | |
179 | | /** If set, read is suspended until one or more conditions are over. |
180 | | * The actual value here is a bitfield of those conditions; see the |
181 | | * BEV_SUSPEND_* flags above. */ |
182 | | bufferevent_suspend_flags read_suspended; |
183 | | |
184 | | /** If set, writing is suspended until one or more conditions are over. |
185 | | * The actual value here is a bitfield of those conditions; see the |
186 | | * BEV_SUSPEND_* flags above. */ |
187 | | bufferevent_suspend_flags write_suspended; |
188 | | |
189 | | /** Set to the current socket errno if we have deferred callbacks and |
190 | | * an events callback is pending. */ |
191 | | int errno_pending; |
192 | | |
193 | | /** The DNS error code for bufferevent_socket_connect_hostname */ |
194 | | int dns_error; |
195 | | |
196 | | /** Used to implement deferred callbacks */ |
197 | | struct event_callback deferred; |
198 | | |
199 | | /** The options this bufferevent was constructed with */ |
200 | | enum bufferevent_options options; |
201 | | |
202 | | /** Current reference count for this bufferevent. */ |
203 | | int refcnt; |
204 | | |
205 | | /** Lock for this bufferevent. Shared by the inbuf and the outbuf. |
206 | | * If NULL, locking is disabled. */ |
207 | | void *lock; |
208 | | |
209 | | /** No matter how big our bucket gets, don't try to read more than this |
210 | | * much in a single read operation. */ |
211 | | ev_ssize_t max_single_read; |
212 | | |
213 | | /** No matter how big our bucket gets, don't try to write more than this |
214 | | * much in a single write operation. */ |
215 | | ev_ssize_t max_single_write; |
216 | | |
217 | | /** Rate-limiting information for this bufferevent */ |
218 | | struct bufferevent_rate_limit *rate_limiting; |
219 | | |
220 | | /* Saved conn_addr, to extract IP address from it. |
221 | | * |
222 | | * Because some servers may reset/close connection without waiting clients, |
223 | | * in that case we can't extract IP address even in close_cb. |
224 | | * So we need to save it, just after we connected to remote server, or |
225 | | * after resolving (to avoid extra dns requests during retrying, since UDP |
226 | | * is slow) */ |
227 | | union { |
228 | | struct sockaddr_in6 in6; |
229 | | struct sockaddr_in in; |
230 | | } conn_address; |
231 | | |
232 | | struct evdns_getaddrinfo_request *dns_request; |
233 | | }; |
234 | | |
235 | | /** Possible operations for a control callback. */ |
236 | | enum bufferevent_ctrl_op { |
237 | | BEV_CTRL_SET_FD, |
238 | | BEV_CTRL_GET_FD, |
239 | | BEV_CTRL_GET_UNDERLYING, |
240 | | BEV_CTRL_CANCEL_ALL |
241 | | }; |
242 | | |
243 | | /** Possible data types for a control callback */ |
244 | | union bufferevent_ctrl_data { |
245 | | void *ptr; |
246 | | evutil_socket_t fd; |
247 | | }; |
248 | | |
249 | | /** |
250 | | Implementation table for a bufferevent: holds function pointers and other |
251 | | information to make the various bufferevent types work. |
252 | | */ |
253 | | struct bufferevent_ops { |
254 | | /** The name of the bufferevent's type. */ |
255 | | const char *type; |
256 | | /** At what offset into the implementation type will we find a |
257 | | bufferevent structure? |
258 | | |
259 | | Example: if the type is implemented as |
260 | | struct bufferevent_x { |
261 | | int extra_data; |
262 | | struct bufferevent bev; |
263 | | } |
264 | | then mem_offset should be offsetof(struct bufferevent_x, bev) |
265 | | */ |
266 | | off_t mem_offset; |
267 | | |
268 | | /** Enables one or more of EV_READ|EV_WRITE on a bufferevent. Does |
269 | | not need to adjust the 'enabled' field. Returns 0 on success, -1 |
270 | | on failure. |
271 | | */ |
272 | | int (*enable)(struct bufferevent *, short); |
273 | | |
274 | | /** Disables one or more of EV_READ|EV_WRITE on a bufferevent. Does |
275 | | not need to adjust the 'enabled' field. Returns 0 on success, -1 |
276 | | on failure. |
277 | | */ |
278 | | int (*disable)(struct bufferevent *, short); |
279 | | |
280 | | /** Detatches the bufferevent from related data structures. Called as |
281 | | * soon as its reference count reaches 0. */ |
282 | | void (*unlink)(struct bufferevent *); |
283 | | |
284 | | /** Free any storage and deallocate any extra data or structures used |
285 | | in this implementation. Called when the bufferevent is |
286 | | finalized. |
287 | | */ |
288 | | void (*destruct)(struct bufferevent *); |
289 | | |
290 | | /** Called when the timeouts on the bufferevent have changed.*/ |
291 | | int (*adj_timeouts)(struct bufferevent *); |
292 | | |
293 | | /** Called to flush data. */ |
294 | | int (*flush)(struct bufferevent *, short, enum bufferevent_flush_mode); |
295 | | |
296 | | /** Called to access miscellaneous fields. */ |
297 | | int (*ctrl)(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *); |
298 | | |
299 | | }; |
300 | | |
301 | | extern const struct bufferevent_ops bufferevent_ops_socket; |
302 | | extern const struct bufferevent_ops bufferevent_ops_filter; |
303 | | extern const struct bufferevent_ops bufferevent_ops_pair; |
304 | | |
305 | 0 | #define BEV_IS_SOCKET(bevp) ((bevp)->be_ops == &bufferevent_ops_socket) |
306 | 0 | #define BEV_IS_FILTER(bevp) ((bevp)->be_ops == &bufferevent_ops_filter) |
307 | 0 | #define BEV_IS_PAIR(bevp) ((bevp)->be_ops == &bufferevent_ops_pair) |
308 | | |
309 | | #if defined(EVENT__HAVE_OPENSSL) |
310 | | extern const struct bufferevent_ops bufferevent_ops_openssl; |
311 | | #define BEV_IS_OPENSSL(bevp) ((bevp)->be_ops == &bufferevent_ops_openssl) |
312 | | #else |
313 | | #define BEV_IS_OPENSSL(bevp) 0 |
314 | | #endif |
315 | | |
316 | | #ifdef _WIN32 |
317 | | extern const struct bufferevent_ops bufferevent_ops_async; |
318 | | #define BEV_IS_ASYNC(bevp) ((bevp)->be_ops == &bufferevent_ops_async) |
319 | | #else |
320 | 0 | #define BEV_IS_ASYNC(bevp) 0 |
321 | | #endif |
322 | | |
323 | | /** Initialize the shared parts of a bufferevent. */ |
324 | | EVENT2_EXPORT_SYMBOL |
325 | | int bufferevent_init_common_(struct bufferevent_private *, struct event_base *, const struct bufferevent_ops *, enum bufferevent_options options); |
326 | | |
327 | | /** For internal use: temporarily stop all reads on bufev, until the conditions |
328 | | * in 'what' are over. */ |
329 | | EVENT2_EXPORT_SYMBOL |
330 | | void bufferevent_suspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what); |
331 | | /** For internal use: clear the conditions 'what' on bufev, and re-enable |
332 | | * reading if there are no conditions left. */ |
333 | | EVENT2_EXPORT_SYMBOL |
334 | | void bufferevent_unsuspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what); |
335 | | |
336 | | /** For internal use: temporarily stop all writes on bufev, until the conditions |
337 | | * in 'what' are over. */ |
338 | | void bufferevent_suspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what); |
339 | | /** For internal use: clear the conditions 'what' on bufev, and re-enable |
340 | | * writing if there are no conditions left. */ |
341 | | void bufferevent_unsuspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what); |
342 | | |
343 | | #define bufferevent_wm_suspend_read(b) \ |
344 | 0 | bufferevent_suspend_read_((b), BEV_SUSPEND_WM) |
345 | | #define bufferevent_wm_unsuspend_read(b) \ |
346 | 0 | bufferevent_unsuspend_read_((b), BEV_SUSPEND_WM) |
347 | | |
348 | | /* |
349 | | Disable a bufferevent. Equivalent to bufferevent_disable(), but |
350 | | first resets 'connecting' flag to force EV_WRITE down for sure. |
351 | | |
352 | | XXXX this method will go away in the future; try not to add new users. |
353 | | See comment in evhttp_connection_reset_() for discussion. |
354 | | |
355 | | @param bufev the bufferevent to be disabled |
356 | | @param event any combination of EV_READ | EV_WRITE. |
357 | | @return 0 if successful, or -1 if an error occurred |
358 | | @see bufferevent_disable() |
359 | | */ |
360 | | EVENT2_EXPORT_SYMBOL |
361 | | int bufferevent_disable_hard_(struct bufferevent *bufev, short event); |
362 | | |
363 | | /** Internal: Set up locking on a bufferevent. If lock is set, use it. |
364 | | * Otherwise, use a new lock. */ |
365 | | EVENT2_EXPORT_SYMBOL |
366 | | int bufferevent_enable_locking_(struct bufferevent *bufev, void *lock); |
367 | | /** Internal: backwards compat macro for the now public function |
368 | | * Increment the reference count on bufev. */ |
369 | 0 | #define bufferevent_incref_(bufev) bufferevent_incref(bufev) |
370 | | /** Internal: Lock bufev and increase its reference count. |
371 | | * unlocking it otherwise. */ |
372 | | EVENT2_EXPORT_SYMBOL |
373 | | void bufferevent_incref_and_lock_(struct bufferevent *bufev); |
374 | | /** Internal: backwards compat macro for the now public function |
375 | | * Decrement the reference count on bufev. Returns 1 if it freed |
376 | | * the bufferevent.*/ |
377 | 0 | #define bufferevent_decref_(bufev) bufferevent_decref(bufev) |
378 | | |
379 | | /** Internal: Drop the reference count on bufev, freeing as necessary, and |
380 | | * unlocking it otherwise. Returns 1 if it freed the bufferevent. */ |
381 | | EVENT2_EXPORT_SYMBOL |
382 | | int bufferevent_decref_and_unlock_(struct bufferevent *bufev); |
383 | | |
384 | | /** Internal: If callbacks are deferred and we have a read callback, schedule |
385 | | * a readcb. Otherwise just run the readcb. Ignores watermarks. */ |
386 | | EVENT2_EXPORT_SYMBOL |
387 | | void bufferevent_run_readcb_(struct bufferevent *bufev, int options); |
388 | | /** Internal: If callbacks are deferred and we have a write callback, schedule |
389 | | * a writecb. Otherwise just run the writecb. Ignores watermarks. */ |
390 | | EVENT2_EXPORT_SYMBOL |
391 | | void bufferevent_run_writecb_(struct bufferevent *bufev, int options); |
392 | | /** Internal: If callbacks are deferred and we have an eventcb, schedule |
393 | | * it to run with events "what". Otherwise just run the eventcb. |
394 | | * See bufferevent_trigger_event for meaning of "options". */ |
395 | | EVENT2_EXPORT_SYMBOL |
396 | | void bufferevent_run_eventcb_(struct bufferevent *bufev, short what, int options); |
397 | | |
398 | | /** Internal: Run or schedule (if deferred or options contain |
399 | | * BEV_TRIG_DEFER_CALLBACKS) I/O callbacks specified in iotype. |
400 | | * Must already hold the bufev lock. Honors watermarks unless |
401 | | * BEV_TRIG_IGNORE_WATERMARKS is in options. */ |
402 | | static inline void bufferevent_trigger_nolock_(struct bufferevent *bufev, short iotype, int options); |
403 | | |
404 | | /* Making this inline since all of the common-case calls to this function in |
405 | | * libevent use constant arguments. */ |
406 | | static inline void |
407 | | bufferevent_trigger_nolock_(struct bufferevent *bufev, short iotype, int options) |
408 | 4.71M | { |
409 | 4.71M | if ((iotype & EV_READ) && ((options & BEV_TRIG_IGNORE_WATERMARKS) || Branch (409:6): [True: 0, False: 0]
Branch (409:29): [True: 0, False: 0]
Branch (409:6): [True: 2.35M, False: 2.35M]
Branch (409:29): [True: 0, False: 2.35M]
Branch (409:6): [True: 0, False: 0]
Branch (409:29): [True: 0, False: 0]
Branch (409:6): [True: 0, False: 0]
Branch (409:29): [True: 0, False: 0]
|
410 | 2.35M | evbuffer_get_length(bufev->input) >= bufev->wm_read.low)) Branch (410:6): [True: 0, False: 0]
Branch (410:6): [True: 2.35M, False: 0]
Branch (410:6): [True: 0, False: 0]
Branch (410:6): [True: 0, False: 0]
|
411 | 2.35M | bufferevent_run_readcb_(bufev, options); |
412 | 4.71M | if ((iotype & EV_WRITE) && ((options & BEV_TRIG_IGNORE_WATERMARKS) || Branch (412:6): [True: 0, False: 0]
Branch (412:30): [True: 0, False: 0]
Branch (412:6): [True: 2.35M, False: 2.35M]
Branch (412:30): [True: 0, False: 2.35M]
Branch (412:6): [True: 0, False: 0]
Branch (412:30): [True: 0, False: 0]
Branch (412:6): [True: 0, False: 0]
Branch (412:30): [True: 0, False: 0]
|
413 | 2.35M | evbuffer_get_length(bufev->output) <= bufev->wm_write.low)) Branch (413:6): [True: 0, False: 0]
Branch (413:6): [True: 2.35M, False: 0]
Branch (413:6): [True: 0, False: 0]
Branch (413:6): [True: 0, False: 0]
|
414 | 2.35M | bufferevent_run_writecb_(bufev, options); |
415 | 4.71M | } Unexecuted instantiation: http.c:bufferevent_trigger_nolock_ Unexecuted instantiation: buffer.c:bufferevent_trigger_nolock_ Unexecuted instantiation: bufferevent.c:bufferevent_trigger_nolock_ Unexecuted instantiation: bufferevent_ratelim.c:bufferevent_trigger_nolock_ bufferevent_sock.c:bufferevent_trigger_nolock_ Line | Count | Source | 408 | 4.71M | { | 409 | 4.71M | if ((iotype & EV_READ) && ((options & BEV_TRIG_IGNORE_WATERMARKS) || Branch (409:6): [True: 2.35M, False: 2.35M]
Branch (409:29): [True: 0, False: 2.35M]
| 410 | 2.35M | evbuffer_get_length(bufev->input) >= bufev->wm_read.low)) Branch (410:6): [True: 2.35M, False: 0]
| 411 | 2.35M | bufferevent_run_readcb_(bufev, options); | 412 | 4.71M | if ((iotype & EV_WRITE) && ((options & BEV_TRIG_IGNORE_WATERMARKS) || Branch (412:6): [True: 2.35M, False: 2.35M]
Branch (412:30): [True: 0, False: 2.35M]
| 413 | 2.35M | evbuffer_get_length(bufev->output) <= bufev->wm_write.low)) Branch (413:6): [True: 2.35M, False: 0]
| 414 | 2.35M | bufferevent_run_writecb_(bufev, options); | 415 | 4.71M | } |
Unexecuted instantiation: bufferevent_filter.c:bufferevent_trigger_nolock_ Unexecuted instantiation: bufferevent_pair.c:bufferevent_trigger_nolock_ |
416 | | |
417 | | /** Internal: Add the event 'ev' with timeout tv, unless tv is set to 0, in |
418 | | * which case add ev with no timeout. */ |
419 | | EVENT2_EXPORT_SYMBOL |
420 | | int bufferevent_add_event_(struct event *ev, const struct timeval *tv); |
421 | | |
422 | | /* ========= |
423 | | * These next functions implement timeouts for bufferevents that aren't doing |
424 | | * anything else with ev_read and ev_write, to handle timeouts. |
425 | | * ========= */ |
426 | | /** Internal use: Set up the ev_read and ev_write callbacks so that |
427 | | * the other "generic_timeout" functions will work on it. Call this from |
428 | | * the constructor function. */ |
429 | | EVENT2_EXPORT_SYMBOL |
430 | | void bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev); |
431 | | /** Internal use: Add or delete the generic timeout events as appropriate. |
432 | | * (If an event is enabled and a timeout is set, we add the event. Otherwise |
433 | | * we delete it.) Call this from anything that changes the timeout values, |
434 | | * that enabled EV_READ or EV_WRITE, or that disables EV_READ or EV_WRITE. */ |
435 | | EVENT2_EXPORT_SYMBOL |
436 | | int bufferevent_generic_adj_timeouts_(struct bufferevent *bev); |
437 | | EVENT2_EXPORT_SYMBOL |
438 | | int bufferevent_generic_adj_existing_timeouts_(struct bufferevent *bev); |
439 | | |
440 | | EVENT2_EXPORT_SYMBOL |
441 | | enum bufferevent_options bufferevent_get_options_(struct bufferevent *bev); |
442 | | |
443 | | EVENT2_EXPORT_SYMBOL |
444 | | const struct sockaddr* |
445 | | bufferevent_socket_get_conn_address_(struct bufferevent *bev); |
446 | | |
447 | | EVENT2_EXPORT_SYMBOL |
448 | | void |
449 | | bufferevent_socket_set_conn_address_fd_(struct bufferevent *bev, evutil_socket_t fd); |
450 | | |
451 | | EVENT2_EXPORT_SYMBOL |
452 | | void |
453 | | bufferevent_socket_set_conn_address_(struct bufferevent *bev, struct sockaddr *addr, size_t addrlen); |
454 | | |
455 | | |
456 | | /** Internal use: We have just successfully read data into an inbuf, so |
457 | | * reset the read timeout (if any). */ |
458 | | #define BEV_RESET_GENERIC_READ_TIMEOUT(bev) \ |
459 | 0 | do { \ |
460 | 0 | if (evutil_timerisset(&(bev)->timeout_read)) \ |
461 | 0 | event_add(&(bev)->ev_read, &(bev)->timeout_read); \ |
462 | 0 | } while (0) |
463 | | /** Internal use: We have just successfully written data from an inbuf, so |
464 | | * reset the read timeout (if any). */ |
465 | | #define BEV_RESET_GENERIC_WRITE_TIMEOUT(bev) \ |
466 | 0 | do { \ |
467 | 0 | if (evutil_timerisset(&(bev)->timeout_write)) \ |
468 | 0 | event_add(&(bev)->ev_write, &(bev)->timeout_write); \ |
469 | 0 | } while (0) |
470 | | #define BEV_DEL_GENERIC_READ_TIMEOUT(bev) \ |
471 | 0 | event_del(&(bev)->ev_read) |
472 | | #define BEV_DEL_GENERIC_WRITE_TIMEOUT(bev) \ |
473 | 0 | event_del(&(bev)->ev_write) |
474 | | |
475 | | |
476 | | /** Internal: Given a bufferevent, return its corresponding |
477 | | * bufferevent_private. */ |
478 | 242M | #define BEV_UPCAST(b) EVUTIL_UPCAST((b), struct bufferevent_private, bev) |
479 | | |
480 | | #ifdef EVENT__DISABLE_THREAD_SUPPORT |
481 | | #define BEV_LOCK(b) EVUTIL_NIL_STMT_ |
482 | | #define BEV_UNLOCK(b) EVUTIL_NIL_STMT_ |
483 | | #else |
484 | | /** Internal: Grab the lock (if any) on a bufferevent */ |
485 | 65.9M | #define BEV_LOCK(b) do { \ |
486 | 65.9M | struct bufferevent_private *locking = BEV_UPCAST(b); \ |
487 | 65.9M | EVLOCK_LOCK(locking->lock, 0); \ |
488 | 65.9M | } while (0) |
489 | | |
490 | | /** Internal: Release the lock (if any) on a bufferevent */ |
491 | 65.9M | #define BEV_UNLOCK(b) do { \ |
492 | 65.9M | struct bufferevent_private *locking = BEV_UPCAST(b); \ |
493 | 65.9M | EVLOCK_UNLOCK(locking->lock, 0); \ |
494 | 65.9M | } while (0) |
495 | | #endif |
496 | | |
497 | | |
498 | | /* ==== For rate-limiting. */ |
499 | | |
500 | | EVENT2_EXPORT_SYMBOL |
501 | | int bufferevent_decrement_write_buckets_(struct bufferevent_private *bev, |
502 | | ev_ssize_t bytes); |
503 | | EVENT2_EXPORT_SYMBOL |
504 | | int bufferevent_decrement_read_buckets_(struct bufferevent_private *bev, |
505 | | ev_ssize_t bytes); |
506 | | EVENT2_EXPORT_SYMBOL |
507 | | ev_ssize_t bufferevent_get_read_max_(struct bufferevent_private *bev); |
508 | | EVENT2_EXPORT_SYMBOL |
509 | | ev_ssize_t bufferevent_get_write_max_(struct bufferevent_private *bev); |
510 | | |
511 | | int bufferevent_ratelim_init_(struct bufferevent_private *bev); |
512 | | |
513 | | #ifdef __cplusplus |
514 | | } |
515 | | #endif |
516 | | |
517 | | |
518 | | #endif /* BUFFEREVENT_INTERNAL_H_INCLUDED_ */ |