/bitcoin/depends/work/build/x86_64-pc-linux-gnu/libevent/2.1.12-stable-7656baec08e/buffer.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu> |
3 | | * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson |
4 | | * |
5 | | * Redistribution and use in source and binary forms, with or without |
6 | | * modification, are permitted provided that the following conditions |
7 | | * are met: |
8 | | * 1. Redistributions of source code must retain the above copyright |
9 | | * notice, this list of conditions and the following disclaimer. |
10 | | * 2. Redistributions in binary form must reproduce the above copyright |
11 | | * notice, this list of conditions and the following disclaimer in the |
12 | | * documentation and/or other materials provided with the distribution. |
13 | | * 3. The name of the author may not be used to endorse or promote products |
14 | | * derived from this software without specific prior written permission. |
15 | | * |
16 | | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | | */ |
27 | | |
28 | | #include "event2/event-config.h" |
29 | | #include "evconfig-private.h" |
30 | | |
31 | | #ifdef _WIN32 |
32 | | #include <winsock2.h> |
33 | | #include <windows.h> |
34 | | #include <io.h> |
35 | | #endif |
36 | | |
37 | | #ifdef EVENT__HAVE_VASPRINTF |
38 | | /* If we have vasprintf, we need to define _GNU_SOURCE before we include |
39 | | * stdio.h. This comes from evconfig-private.h. |
40 | | */ |
41 | | #endif |
42 | | |
43 | | #include <sys/types.h> |
44 | | |
45 | | #ifdef EVENT__HAVE_SYS_TIME_H |
46 | | #include <sys/time.h> |
47 | | #endif |
48 | | |
49 | | #ifdef EVENT__HAVE_SYS_SOCKET_H |
50 | | #include <sys/socket.h> |
51 | | #endif |
52 | | |
53 | | #ifdef EVENT__HAVE_SYS_UIO_H |
54 | | #include <sys/uio.h> |
55 | | #endif |
56 | | |
57 | | #ifdef EVENT__HAVE_SYS_IOCTL_H |
58 | | #include <sys/ioctl.h> |
59 | | #endif |
60 | | |
61 | | #ifdef EVENT__HAVE_SYS_MMAN_H |
62 | | #include <sys/mman.h> |
63 | | #endif |
64 | | |
65 | | #ifdef EVENT__HAVE_SYS_SENDFILE_H |
66 | | #include <sys/sendfile.h> |
67 | | #endif |
68 | | #ifdef EVENT__HAVE_SYS_STAT_H |
69 | | #include <sys/stat.h> |
70 | | #endif |
71 | | |
72 | | |
73 | | #include <errno.h> |
74 | | #include <stdio.h> |
75 | | #include <stdlib.h> |
76 | | #include <string.h> |
77 | | #ifdef EVENT__HAVE_STDARG_H |
78 | | #include <stdarg.h> |
79 | | #endif |
80 | | #ifdef EVENT__HAVE_UNISTD_H |
81 | | #include <unistd.h> |
82 | | #endif |
83 | | #include <limits.h> |
84 | | |
85 | | #include "event2/event.h" |
86 | | #include "event2/buffer.h" |
87 | | #include "event2/buffer_compat.h" |
88 | | #include "event2/bufferevent.h" |
89 | | #include "event2/bufferevent_compat.h" |
90 | | #include "event2/bufferevent_struct.h" |
91 | | #include "event2/thread.h" |
92 | | #include "log-internal.h" |
93 | | #include "mm-internal.h" |
94 | | #include "util-internal.h" |
95 | | #include "evthread-internal.h" |
96 | | #include "evbuffer-internal.h" |
97 | | #include "bufferevent-internal.h" |
98 | | #include "event-internal.h" |
99 | | |
100 | | /* some systems do not have MAP_FAILED */ |
101 | | #ifndef MAP_FAILED |
102 | | #define MAP_FAILED ((void *)-1) |
103 | | #endif |
104 | | |
105 | | /* send file support */ |
106 | | #if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__) |
107 | | #define USE_SENDFILE 1 |
108 | | #define SENDFILE_IS_LINUX 1 |
109 | | #elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__) |
110 | | #define USE_SENDFILE 1 |
111 | | #define SENDFILE_IS_FREEBSD 1 |
112 | | #elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__) |
113 | | #define USE_SENDFILE 1 |
114 | | #define SENDFILE_IS_MACOSX 1 |
115 | | #elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) |
116 | | #define USE_SENDFILE 1 |
117 | | #define SENDFILE_IS_SOLARIS 1 |
118 | | #endif |
119 | | |
120 | | /* Mask of user-selectable callback flags. */ |
121 | | #define EVBUFFER_CB_USER_FLAGS 0xffff |
122 | | /* Mask of all internal-use-only flags. */ |
123 | 0 | #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000 |
124 | | |
125 | | /* Flag set if the callback is using the cb_obsolete function pointer */ |
126 | 16.5M | #define EVBUFFER_CB_OBSOLETE 0x00040000 |
127 | | |
128 | | /* evbuffer_chain support */ |
129 | 14.1M | #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off) |
130 | 35.3M | #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \ |
131 | 35.3M | 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off)) |
132 | | |
133 | 9.42M | #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0) |
134 | 9.43M | #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0) |
135 | | |
136 | | /* evbuffer_ptr support */ |
137 | 0 | #define PTR_NOT_FOUND(ptr) do { \ |
138 | 0 | (ptr)->pos = -1; \ |
139 | 0 | (ptr)->internal_.chain = NULL; \ |
140 | 0 | (ptr)->internal_.pos_in_chain = 0; \ |
141 | 0 | } while (0) |
142 | | |
143 | | static void evbuffer_chain_align(struct evbuffer_chain *chain); |
144 | | static int evbuffer_chain_should_realign(struct evbuffer_chain *chain, |
145 | | size_t datalen); |
146 | | static void evbuffer_deferred_callback(struct event_callback *cb, void *arg); |
147 | | static int evbuffer_ptr_memcmp(const struct evbuffer *buf, |
148 | | const struct evbuffer_ptr *pos, const char *mem, size_t len); |
149 | | static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf, |
150 | | size_t datlen); |
151 | | static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, |
152 | | size_t howfar); |
153 | | static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg); |
154 | | static inline void evbuffer_chain_incref(struct evbuffer_chain *chain); |
155 | | |
156 | | static struct evbuffer_chain * |
157 | | evbuffer_chain_new(size_t size) |
158 | 9.42M | { |
159 | 9.42M | struct evbuffer_chain *chain; |
160 | 9.42M | size_t to_alloc; |
161 | | |
162 | 9.42M | if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE) Branch (162:6): [True: 0, False: 9.42M]
|
163 | 0 | return (NULL); |
164 | | |
165 | 9.42M | size += EVBUFFER_CHAIN_SIZE; |
166 | | |
167 | | /* get the next largest memory that can hold the buffer */ |
168 | 9.42M | if (size < EVBUFFER_CHAIN_MAX / 2) { Branch (168:6): [True: 9.42M, False: 0]
|
169 | 9.42M | to_alloc = MIN_BUFFER_SIZE; |
170 | 16.4M | while (to_alloc < size) { Branch (170:10): [True: 7.04M, False: 9.42M]
|
171 | 7.04M | to_alloc <<= 1; |
172 | 7.04M | } |
173 | 9.42M | } else { |
174 | 0 | to_alloc = size; |
175 | 0 | } |
176 | | |
177 | | /* we get everything in one chunk */ |
178 | 9.42M | if ((chain = mm_malloc(to_alloc)) == NULL) Branch (178:6): [True: 0, False: 9.42M]
|
179 | 0 | return (NULL); |
180 | | |
181 | 9.42M | memset(chain, 0, EVBUFFER_CHAIN_SIZE); |
182 | | |
183 | 9.42M | chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE; |
184 | | |
185 | | /* this way we can manipulate the buffer to different addresses, |
186 | | * which is required for mmap for example. |
187 | | */ |
188 | 9.42M | chain->buffer = EVBUFFER_CHAIN_EXTRA(unsigned char, chain); |
189 | | |
190 | 9.42M | chain->refcnt = 1; |
191 | | |
192 | 9.42M | return (chain); |
193 | 9.42M | } |
194 | | |
195 | | static inline void |
196 | | evbuffer_chain_free(struct evbuffer_chain *chain) |
197 | 9.42M | { |
198 | 9.42M | EVUTIL_ASSERT(chain->refcnt > 0); |
199 | 9.42M | if (--chain->refcnt > 0) { Branch (199:6): [True: 0, False: 9.42M]
|
200 | | /* chain is still referenced by other chains */ |
201 | 0 | return; |
202 | 0 | } |
203 | | |
204 | 9.42M | if (CHAIN_PINNED(chain)) { |
205 | | /* will get freed once no longer dangling */ |
206 | 0 | chain->refcnt++; |
207 | 0 | chain->flags |= EVBUFFER_DANGLING; |
208 | 0 | return; |
209 | 0 | } |
210 | | |
211 | | /* safe to release chain, it's either a referencing |
212 | | * chain or all references to it have been freed */ |
213 | 9.42M | if (chain->flags & EVBUFFER_REFERENCE) { Branch (213:6): [True: 0, False: 9.42M]
|
214 | 0 | struct evbuffer_chain_reference *info = |
215 | 0 | EVBUFFER_CHAIN_EXTRA( |
216 | 0 | struct evbuffer_chain_reference, |
217 | 0 | chain); |
218 | 0 | if (info->cleanupfn) Branch (218:7): [True: 0, False: 0]
|
219 | 0 | (*info->cleanupfn)(chain->buffer, |
220 | 0 | chain->buffer_len, |
221 | 0 | info->extra); |
222 | 0 | } |
223 | 9.42M | if (chain->flags & EVBUFFER_FILESEGMENT) { Branch (223:6): [True: 0, False: 9.42M]
|
224 | 0 | struct evbuffer_chain_file_segment *info = |
225 | 0 | EVBUFFER_CHAIN_EXTRA( |
226 | 0 | struct evbuffer_chain_file_segment, |
227 | 0 | chain); |
228 | 0 | if (info->segment) { Branch (228:7): [True: 0, False: 0]
|
229 | | #ifdef _WIN32 |
230 | | if (info->segment->is_mapping) |
231 | | UnmapViewOfFile(chain->buffer); |
232 | | #endif |
233 | 0 | evbuffer_file_segment_free(info->segment); |
234 | 0 | } |
235 | 0 | } |
236 | 9.42M | if (chain->flags & EVBUFFER_MULTICAST) { Branch (236:6): [True: 0, False: 9.42M]
|
237 | 0 | struct evbuffer_multicast_parent *info = |
238 | 0 | EVBUFFER_CHAIN_EXTRA( |
239 | 0 | struct evbuffer_multicast_parent, |
240 | 0 | chain); |
241 | | /* referencing chain is being freed, decrease |
242 | | * refcounts of source chain and associated |
243 | | * evbuffer (which get freed once both reach |
244 | | * zero) */ |
245 | 0 | EVUTIL_ASSERT(info->source != NULL); |
246 | 0 | EVUTIL_ASSERT(info->parent != NULL); |
247 | 0 | EVBUFFER_LOCK(info->source); |
248 | 0 | evbuffer_chain_free(info->parent); |
249 | 0 | evbuffer_decref_and_unlock_(info->source); |
250 | 0 | } |
251 | | |
252 | 9.42M | mm_free(chain); |
253 | 9.42M | } |
254 | | |
255 | | static void |
256 | | evbuffer_free_all_chains(struct evbuffer_chain *chain) |
257 | 2.35M | { |
258 | 2.35M | struct evbuffer_chain *next; |
259 | 2.35M | for (; chain; chain = next) { Branch (259:9): [True: 0, False: 2.35M]
|
260 | 0 | next = chain->next; |
261 | 0 | evbuffer_chain_free(chain); |
262 | 0 | } |
263 | 2.35M | } |
264 | | |
265 | | #ifndef NDEBUG |
266 | | static int |
267 | | evbuffer_chains_all_empty(struct evbuffer_chain *chain) |
268 | 0 | { |
269 | 0 | for (; chain; chain = chain->next) { Branch (269:9): [True: 0, False: 0]
|
270 | 0 | if (chain->off) Branch (270:7): [True: 0, False: 0]
|
271 | 0 | return 0; |
272 | 0 | } |
273 | 0 | return 1; |
274 | 0 | } |
275 | | #else |
276 | | /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid |
277 | | "unused variable" warnings. */ |
278 | | static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { |
279 | | return 1; |
280 | | } |
281 | | #endif |
282 | | |
283 | | /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior |
284 | | * to replacing them all with a new chain. Return a pointer to the place |
285 | | * where the new chain will go. |
286 | | * |
287 | | * Internal; requires lock. The caller must fix up buf->last and buf->first |
288 | | * as needed; they might have been freed. |
289 | | */ |
290 | | static struct evbuffer_chain ** |
291 | | evbuffer_free_trailing_empty_chains(struct evbuffer *buf) |
292 | 2.35M | { |
293 | 2.35M | struct evbuffer_chain **ch = buf->last_with_datap; |
294 | | /* Find the first victim chain. It might be *last_with_datap */ |
295 | 4.71M | while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) Branch (295:9): [True: 2.35M, False: 2.35M]
Branch (295:19): [True: 2.35M, False: 0]
|
296 | 2.35M | ch = &(*ch)->next; |
297 | 2.35M | if (*ch) { Branch (297:6): [True: 0, False: 2.35M]
|
298 | 0 | EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); |
299 | 0 | evbuffer_free_all_chains(*ch); |
300 | 0 | *ch = NULL; |
301 | 0 | } |
302 | 2.35M | return ch; |
303 | 2.35M | } |
304 | | |
305 | | /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty |
306 | | * chains as necessary. Requires lock. Does not schedule callbacks. |
307 | | */ |
308 | | static void |
309 | | evbuffer_chain_insert(struct evbuffer *buf, |
310 | | struct evbuffer_chain *chain) |
311 | 9.42M | { |
312 | 9.42M | ASSERT_EVBUFFER_LOCKED(buf); |
313 | 9.42M | if (*buf->last_with_datap == NULL) { Branch (313:6): [True: 9.42M, False: 0]
|
314 | | /* There are no chains data on the buffer at all. */ |
315 | 9.42M | EVUTIL_ASSERT(buf->last_with_datap == &buf->first); |
316 | 9.42M | EVUTIL_ASSERT(buf->first == NULL); |
317 | 9.42M | buf->first = buf->last = chain; |
318 | 9.42M | } else { |
319 | 0 | struct evbuffer_chain **chp; |
320 | 0 | chp = evbuffer_free_trailing_empty_chains(buf); |
321 | 0 | *chp = chain; |
322 | 0 | if (chain->off) Branch (322:7): [True: 0, False: 0]
|
323 | 0 | buf->last_with_datap = chp; |
324 | 0 | buf->last = chain; |
325 | 0 | } |
326 | 9.42M | buf->total_len += chain->off; |
327 | 9.42M | } |
328 | | |
329 | | static inline struct evbuffer_chain * |
330 | | evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen) |
331 | 2.35M | { |
332 | 2.35M | struct evbuffer_chain *chain; |
333 | 2.35M | if ((chain = evbuffer_chain_new(datlen)) == NULL) Branch (333:6): [True: 0, False: 2.35M]
|
334 | 0 | return NULL; |
335 | 2.35M | evbuffer_chain_insert(buf, chain); |
336 | 2.35M | return chain; |
337 | 2.35M | } |
338 | | |
339 | | void |
340 | | evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag) |
341 | 0 | { |
342 | 0 | EVUTIL_ASSERT((chain->flags & flag) == 0); |
343 | 0 | chain->flags |= flag; |
344 | 0 | } |
345 | | |
346 | | void |
347 | | evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag) |
348 | 0 | { |
349 | 0 | EVUTIL_ASSERT((chain->flags & flag) != 0); |
350 | 0 | chain->flags &= ~flag; |
351 | 0 | if (chain->flags & EVBUFFER_DANGLING) Branch (351:6): [True: 0, False: 0]
|
352 | 0 | evbuffer_chain_free(chain); |
353 | 0 | } |
354 | | |
355 | | static inline void |
356 | | evbuffer_chain_incref(struct evbuffer_chain *chain) |
357 | 0 | { |
358 | 0 | ++chain->refcnt; |
359 | 0 | } |
360 | | |
361 | | struct evbuffer * |
362 | | evbuffer_new(void) |
363 | 14.1M | { |
364 | 14.1M | struct evbuffer *buffer; |
365 | | |
366 | 14.1M | buffer = mm_calloc(1, sizeof(struct evbuffer)); |
367 | 14.1M | if (buffer == NULL) Branch (367:6): [True: 0, False: 14.1M]
|
368 | 0 | return (NULL); |
369 | | |
370 | 14.1M | LIST_INIT(&buffer->callbacks); |
371 | 14.1M | buffer->refcnt = 1; |
372 | 14.1M | buffer->last_with_datap = &buffer->first; |
373 | | |
374 | 14.1M | return (buffer); |
375 | 14.1M | } |
376 | | |
377 | | int |
378 | | evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags) |
379 | 2.35M | { |
380 | 2.35M | EVBUFFER_LOCK(buf); |
381 | 2.35M | buf->flags |= (ev_uint32_t)flags; |
382 | 2.35M | EVBUFFER_UNLOCK(buf); |
383 | 2.35M | return 0; |
384 | 2.35M | } |
385 | | |
386 | | int |
387 | | evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags) |
388 | 0 | { |
389 | 0 | EVBUFFER_LOCK(buf); |
390 | 0 | buf->flags &= ~(ev_uint32_t)flags; |
391 | 0 | EVBUFFER_UNLOCK(buf); |
392 | 0 | return 0; |
393 | 0 | } |
394 | | |
395 | | void |
396 | | evbuffer_incref_(struct evbuffer *buf) |
397 | 0 | { |
398 | 0 | EVBUFFER_LOCK(buf); |
399 | 0 | ++buf->refcnt; |
400 | 0 | EVBUFFER_UNLOCK(buf); |
401 | 0 | } |
402 | | |
403 | | void |
404 | | evbuffer_incref_and_lock_(struct evbuffer *buf) |
405 | 0 | { |
406 | 0 | EVBUFFER_LOCK(buf); |
407 | 0 | ++buf->refcnt; |
408 | 0 | } |
409 | | |
410 | | int |
411 | | evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base) |
412 | 0 | { |
413 | 0 | EVBUFFER_LOCK(buffer); |
414 | 0 | buffer->cb_queue = base; |
415 | 0 | buffer->deferred_cbs = 1; |
416 | 0 | event_deferred_cb_init_(&buffer->deferred, |
417 | 0 | event_base_get_npriorities(base) / 2, |
418 | 0 | evbuffer_deferred_callback, buffer); |
419 | 0 | EVBUFFER_UNLOCK(buffer); |
420 | 0 | return 0; |
421 | 0 | } |
422 | | |
423 | | int |
424 | | evbuffer_enable_locking(struct evbuffer *buf, void *lock) |
425 | 0 | { |
426 | | #ifdef EVENT__DISABLE_THREAD_SUPPORT |
427 | | return -1; |
428 | | #else |
429 | 0 | if (buf->lock) Branch (429:6): [True: 0, False: 0]
|
430 | 0 | return -1; |
431 | | |
432 | 0 | if (!lock) { Branch (432:6): [True: 0, False: 0]
|
433 | 0 | EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); |
434 | 0 | if (!lock) Branch (434:7): [True: 0, False: 0]
|
435 | 0 | return -1; |
436 | 0 | buf->lock = lock; |
437 | 0 | buf->own_lock = 1; |
438 | 0 | } else { |
439 | 0 | buf->lock = lock; |
440 | 0 | buf->own_lock = 0; |
441 | 0 | } |
442 | | |
443 | 0 | return 0; |
444 | 0 | #endif |
445 | 0 | } |
446 | | |
447 | | void |
448 | | evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev) |
449 | 4.71M | { |
450 | 4.71M | EVBUFFER_LOCK(buf); |
451 | 4.71M | buf->parent = bev; |
452 | 4.71M | EVBUFFER_UNLOCK(buf); |
453 | 4.71M | } |
454 | | |
455 | | static void |
456 | | evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred) |
457 | 16.5M | { |
458 | 16.5M | struct evbuffer_cb_entry *cbent, *next; |
459 | 16.5M | struct evbuffer_cb_info info; |
460 | 16.5M | size_t new_size; |
461 | 16.5M | ev_uint32_t mask, masked_val; |
462 | 16.5M | int clear = 1; |
463 | | |
464 | 16.5M | if (running_deferred) { Branch (464:6): [True: 0, False: 16.5M]
|
465 | 0 | mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; |
466 | 0 | masked_val = EVBUFFER_CB_ENABLED; |
467 | 16.5M | } else if (buffer->deferred_cbs) { Branch (467:13): [True: 0, False: 16.5M]
|
468 | 0 | mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; |
469 | 0 | masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; |
470 | | /* Don't zero-out n_add/n_del, since the deferred callbacks |
471 | | will want to see them. */ |
472 | 0 | clear = 0; |
473 | 16.5M | } else { |
474 | 16.5M | mask = EVBUFFER_CB_ENABLED; |
475 | 16.5M | masked_val = EVBUFFER_CB_ENABLED; |
476 | 16.5M | } |
477 | | |
478 | 16.5M | ASSERT_EVBUFFER_LOCKED(buffer); |
479 | | |
480 | 16.5M | if (LIST_EMPTY(&buffer->callbacks)) { |
481 | 0 | buffer->n_add_for_cb = buffer->n_del_for_cb = 0; |
482 | 0 | return; |
483 | 0 | } |
484 | 16.5M | if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0) Branch (484:6): [True: 2.35M, False: 14.1M]
Branch (484:35): [True: 0, False: 2.35M]
|
485 | 0 | return; |
486 | | |
487 | 16.5M | new_size = buffer->total_len; |
488 | 16.5M | info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb; |
489 | 16.5M | info.n_added = buffer->n_add_for_cb; |
490 | 16.5M | info.n_deleted = buffer->n_del_for_cb; |
491 | 16.5M | if (clear) { Branch (491:6): [True: 16.5M, False: 0]
|
492 | 16.5M | buffer->n_add_for_cb = 0; |
493 | 16.5M | buffer->n_del_for_cb = 0; |
494 | 16.5M | } |
495 | 16.5M | for (cbent = LIST_FIRST(&buffer->callbacks); |
496 | 33.0M | cbent != LIST_END(&buffer->callbacks); Branch (496:7): [True: 16.5M, False: 16.5M]
|
497 | 16.5M | cbent = next) { |
498 | | /* Get the 'next' pointer now in case this callback decides |
499 | | * to remove itself or something. */ |
500 | 16.5M | next = LIST_NEXT(cbent, next); |
501 | | |
502 | 16.5M | if ((cbent->flags & mask) != masked_val) Branch (502:7): [True: 0, False: 16.5M]
|
503 | 0 | continue; |
504 | | |
505 | 16.5M | if ((cbent->flags & EVBUFFER_CB_OBSOLETE)) Branch (505:7): [True: 0, False: 16.5M]
|
506 | 0 | cbent->cb.cb_obsolete(buffer, |
507 | 0 | info.orig_size, new_size, cbent->cbarg); |
508 | 16.5M | else |
509 | 16.5M | cbent->cb.cb_func(buffer, &info, cbent->cbarg); |
510 | 16.5M | } |
511 | 16.5M | } |
512 | | |
513 | | void |
514 | | evbuffer_invoke_callbacks_(struct evbuffer *buffer) |
515 | 56.6M | { |
516 | 56.6M | if (LIST_EMPTY(&buffer->callbacks)) { |
517 | 40.1M | buffer->n_add_for_cb = buffer->n_del_for_cb = 0; |
518 | 40.1M | return; |
519 | 40.1M | } |
520 | | |
521 | 16.5M | if (buffer->deferred_cbs) { Branch (521:6): [True: 0, False: 16.5M]
|
522 | 0 | if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) { Branch (522:7): [True: 0, False: 0]
|
523 | 0 | evbuffer_incref_and_lock_(buffer); |
524 | 0 | if (buffer->parent) Branch (524:8): [True: 0, False: 0]
|
525 | 0 | bufferevent_incref_(buffer->parent); |
526 | 0 | EVBUFFER_UNLOCK(buffer); |
527 | 0 | } |
528 | 0 | } |
529 | | |
530 | 16.5M | evbuffer_run_callbacks(buffer, 0); |
531 | 16.5M | } |
532 | | |
533 | | static void |
534 | | evbuffer_deferred_callback(struct event_callback *cb, void *arg) |
535 | 0 | { |
536 | 0 | struct bufferevent *parent = NULL; |
537 | 0 | struct evbuffer *buffer = arg; |
538 | | |
539 | | /* XXXX It would be better to run these callbacks without holding the |
540 | | * lock */ |
541 | 0 | EVBUFFER_LOCK(buffer); |
542 | 0 | parent = buffer->parent; |
543 | 0 | evbuffer_run_callbacks(buffer, 1); |
544 | 0 | evbuffer_decref_and_unlock_(buffer); |
545 | 0 | if (parent) Branch (545:6): [True: 0, False: 0]
|
546 | 0 | bufferevent_decref_(parent); |
547 | 0 | } |
548 | | |
549 | | static void |
550 | | evbuffer_remove_all_callbacks(struct evbuffer *buffer) |
551 | 14.1M | { |
552 | 14.1M | struct evbuffer_cb_entry *cbent; |
553 | | |
554 | 16.4M | while ((cbent = LIST_FIRST(&buffer->callbacks))) { Branch (554:9): [True: 2.35M, False: 14.1M]
|
555 | 2.35M | LIST_REMOVE(cbent, next); |
556 | 2.35M | mm_free(cbent); |
557 | 2.35M | } |
558 | 14.1M | } |
559 | | |
560 | | void |
561 | | evbuffer_decref_and_unlock_(struct evbuffer *buffer) |
562 | 14.1M | { |
563 | 14.1M | struct evbuffer_chain *chain, *next; |
564 | 14.1M | ASSERT_EVBUFFER_LOCKED(buffer); |
565 | | |
566 | 14.1M | EVUTIL_ASSERT(buffer->refcnt > 0); |
567 | | |
568 | 14.1M | if (--buffer->refcnt > 0) { Branch (568:6): [True: 0, False: 14.1M]
|
569 | 0 | EVBUFFER_UNLOCK(buffer); |
570 | 0 | return; |
571 | 0 | } |
572 | | |
573 | 16.4M | for (chain = buffer->first; chain != NULL; chain = next) { Branch (573:30): [True: 2.34M, False: 14.1M]
|
574 | 2.34M | next = chain->next; |
575 | 2.34M | evbuffer_chain_free(chain); |
576 | 2.34M | } |
577 | 14.1M | evbuffer_remove_all_callbacks(buffer); |
578 | 14.1M | if (buffer->deferred_cbs) Branch (578:6): [True: 0, False: 14.1M]
|
579 | 0 | event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred); |
580 | | |
581 | 14.1M | EVBUFFER_UNLOCK(buffer); |
582 | 14.1M | if (buffer->own_lock) Branch (582:6): [True: 0, False: 14.1M]
|
583 | 0 | EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE); |
584 | 14.1M | mm_free(buffer); |
585 | 14.1M | } |
586 | | |
587 | | void |
588 | | evbuffer_free(struct evbuffer *buffer) |
589 | 14.1M | { |
590 | 14.1M | EVBUFFER_LOCK(buffer); |
591 | 14.1M | evbuffer_decref_and_unlock_(buffer); |
592 | 14.1M | } |
593 | | |
594 | | void |
595 | | evbuffer_lock(struct evbuffer *buf) |
596 | 0 | { |
597 | 0 | EVBUFFER_LOCK(buf); |
598 | 0 | } |
599 | | |
600 | | void |
601 | | evbuffer_unlock(struct evbuffer *buf) |
602 | 0 | { |
603 | 0 | EVBUFFER_UNLOCK(buf); |
604 | 0 | } |
605 | | |
606 | | size_t |
607 | | evbuffer_get_length(const struct evbuffer *buffer) |
608 | 28.2M | { |
609 | 28.2M | size_t result; |
610 | | |
611 | 28.2M | EVBUFFER_LOCK(buffer); |
612 | | |
613 | 28.2M | result = (buffer->total_len); |
614 | | |
615 | 28.2M | EVBUFFER_UNLOCK(buffer); |
616 | | |
617 | 28.2M | return result; |
618 | 28.2M | } |
619 | | |
620 | | size_t |
621 | | evbuffer_get_contiguous_space(const struct evbuffer *buf) |
622 | 0 | { |
623 | 0 | struct evbuffer_chain *chain; |
624 | 0 | size_t result; |
625 | |
|
626 | 0 | EVBUFFER_LOCK(buf); |
627 | 0 | chain = buf->first; |
628 | 0 | result = (chain != NULL ? chain->off : 0); Branch (628:12): [True: 0, False: 0]
|
629 | 0 | EVBUFFER_UNLOCK(buf); |
630 | |
|
631 | 0 | return result; |
632 | 0 | } |
633 | | |
634 | | size_t |
635 | 0 | evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) { |
636 | 0 | int n; |
637 | 0 | size_t res; |
638 | 0 | size_t to_alloc; |
639 | |
|
640 | 0 | EVBUFFER_LOCK(buf); |
641 | |
|
642 | 0 | res = to_alloc = 0; |
643 | |
|
644 | 0 | for (n = 0; n < n_vec; n++) { Branch (644:14): [True: 0, False: 0]
|
645 | 0 | to_alloc += vec[n].iov_len; |
646 | 0 | } |
647 | |
|
648 | 0 | if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) { Branch (648:6): [True: 0, False: 0]
|
649 | 0 | goto done; |
650 | 0 | } |
651 | | |
652 | 0 | for (n = 0; n < n_vec; n++) { Branch (652:14): [True: 0, False: 0]
|
653 | | /* XXX each 'add' call here does a bunch of setup that's |
654 | | * obviated by evbuffer_expand_fast_, and some cleanup that we |
655 | | * would like to do only once. Instead we should just extract |
656 | | * the part of the code that's needed. */ |
657 | |
|
658 | 0 | if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) { Branch (658:7): [True: 0, False: 0]
|
659 | 0 | goto done; |
660 | 0 | } |
661 | | |
662 | 0 | res += vec[n].iov_len; |
663 | 0 | } |
664 | | |
665 | 0 | done: |
666 | 0 | EVBUFFER_UNLOCK(buf); |
667 | 0 | return res; |
668 | 0 | } |
669 | | |
670 | | int |
671 | | evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, |
672 | | struct evbuffer_iovec *vec, int n_vecs) |
673 | 0 | { |
674 | 0 | struct evbuffer_chain *chain, **chainp; |
675 | 0 | int n = -1; |
676 | |
|
677 | 0 | EVBUFFER_LOCK(buf); |
678 | 0 | if (buf->freeze_end) Branch (678:6): [True: 0, False: 0]
|
679 | 0 | goto done; |
680 | 0 | if (n_vecs < 1) Branch (680:6): [True: 0, False: 0]
|
681 | 0 | goto done; |
682 | 0 | if (n_vecs == 1) { Branch (682:6): [True: 0, False: 0]
|
683 | 0 | if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) Branch (683:7): [True: 0, False: 0]
|
684 | 0 | goto done; |
685 | | |
686 | 0 | vec[0].iov_base = (void *)CHAIN_SPACE_PTR(chain); |
687 | 0 | vec[0].iov_len = (size_t)CHAIN_SPACE_LEN(chain); |
688 | 0 | EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size); |
689 | 0 | n = 1; |
690 | 0 | } else { |
691 | 0 | if (evbuffer_expand_fast_(buf, size, n_vecs)<0) Branch (691:7): [True: 0, False: 0]
|
692 | 0 | goto done; |
693 | 0 | n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs, |
694 | 0 | &chainp, 0); |
695 | 0 | } |
696 | | |
697 | 0 | done: |
698 | 0 | EVBUFFER_UNLOCK(buf); |
699 | 0 | return n; |
700 | |
|
701 | 0 | } |
702 | | |
703 | | static int |
704 | | advance_last_with_data(struct evbuffer *buf) |
705 | 9.44M | { |
706 | 9.44M | int n = 0; |
707 | 9.44M | struct evbuffer_chain **chainp = buf->last_with_datap; |
708 | | |
709 | 9.44M | ASSERT_EVBUFFER_LOCKED(buf); |
710 | | |
711 | 9.44M | if (!*chainp) Branch (711:6): [True: 0, False: 9.44M]
|
712 | 0 | return 0; |
713 | | |
714 | 9.44M | while ((*chainp)->next) { Branch (714:9): [True: 0, False: 9.44M]
|
715 | 0 | chainp = &(*chainp)->next; |
716 | 0 | if ((*chainp)->off) Branch (716:7): [True: 0, False: 0]
|
717 | 0 | buf->last_with_datap = chainp; |
718 | 0 | ++n; |
719 | 0 | } |
720 | 9.44M | return n; |
721 | 9.44M | } |
722 | | |
723 | | int |
724 | | evbuffer_commit_space(struct evbuffer *buf, |
725 | | struct evbuffer_iovec *vec, int n_vecs) |
726 | 0 | { |
727 | 0 | struct evbuffer_chain *chain, **firstchainp, **chainp; |
728 | 0 | int result = -1; |
729 | 0 | size_t added = 0; |
730 | 0 | int i; |
731 | |
|
732 | 0 | EVBUFFER_LOCK(buf); |
733 | |
|
734 | 0 | if (buf->freeze_end) Branch (734:6): [True: 0, False: 0]
|
735 | 0 | goto done; |
736 | 0 | if (n_vecs == 0) { Branch (736:6): [True: 0, False: 0]
|
737 | 0 | result = 0; |
738 | 0 | goto done; |
739 | 0 | } else if (n_vecs == 1 && Branch (739:13): [True: 0, False: 0]
|
740 | 0 | (buf->last && vec[0].iov_base == (void *)CHAIN_SPACE_PTR(buf->last))) { Branch (740:7): [True: 0, False: 0]
Branch (740:20): [True: 0, False: 0]
|
741 | | /* The user only got or used one chain; it might not |
742 | | * be the first one with space in it. */ |
743 | 0 | if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) Branch (743:7): [True: 0, False: 0]
|
744 | 0 | goto done; |
745 | 0 | buf->last->off += vec[0].iov_len; |
746 | 0 | added = vec[0].iov_len; |
747 | 0 | if (added) Branch (747:7): [True: 0, False: 0]
|
748 | 0 | advance_last_with_data(buf); |
749 | 0 | goto okay; |
750 | 0 | } |
751 | | |
752 | | /* Advance 'firstchain' to the first chain with space in it. */ |
753 | 0 | firstchainp = buf->last_with_datap; |
754 | 0 | if (!*firstchainp) Branch (754:6): [True: 0, False: 0]
|
755 | 0 | goto done; |
756 | 0 | if (CHAIN_SPACE_LEN(*firstchainp) == 0) { Branch (756:6): [True: 0, False: 0]
|
757 | 0 | firstchainp = &(*firstchainp)->next; |
758 | 0 | } |
759 | |
|
760 | 0 | chain = *firstchainp; |
761 | | /* pass 1: make sure that the pointers and lengths of vecs[] are in |
762 | | * bounds before we try to commit anything. */ |
763 | 0 | for (i=0; i<n_vecs; ++i) { Branch (763:12): [True: 0, False: 0]
|
764 | 0 | if (!chain) Branch (764:7): [True: 0, False: 0]
|
765 | 0 | goto done; |
766 | 0 | if (vec[i].iov_base != (void *)CHAIN_SPACE_PTR(chain) || Branch (766:7): [True: 0, False: 0]
|
767 | 0 | (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain)) Branch (767:7): [True: 0, False: 0]
|
768 | 0 | goto done; |
769 | 0 | chain = chain->next; |
770 | 0 | } |
771 | | /* pass 2: actually adjust all the chains. */ |
772 | 0 | chainp = firstchainp; |
773 | 0 | for (i=0; i<n_vecs; ++i) { Branch (773:12): [True: 0, False: 0]
|
774 | 0 | (*chainp)->off += vec[i].iov_len; |
775 | 0 | added += vec[i].iov_len; |
776 | 0 | if (vec[i].iov_len) { Branch (776:7): [True: 0, False: 0]
|
777 | 0 | buf->last_with_datap = chainp; |
778 | 0 | } |
779 | 0 | chainp = &(*chainp)->next; |
780 | 0 | } |
781 | |
|
782 | 0 | okay: |
783 | 0 | buf->total_len += added; |
784 | 0 | buf->n_add_for_cb += added; |
785 | 0 | result = 0; |
786 | 0 | evbuffer_invoke_callbacks_(buf); |
787 | |
|
788 | 0 | done: |
789 | 0 | EVBUFFER_UNLOCK(buf); |
790 | 0 | return result; |
791 | 0 | } |
792 | | |
793 | | static inline int |
794 | | HAS_PINNED_R(struct evbuffer *buf) |
795 | 9.43M | { |
796 | 9.43M | return (buf->last && CHAIN_PINNED_R(buf->last)); Branch (796:10): [True: 9.43M, False: 0]
|
797 | 9.43M | } |
798 | | |
799 | | static inline void |
800 | | ZERO_CHAIN(struct evbuffer *dst) |
801 | 9.43M | { |
802 | 9.43M | ASSERT_EVBUFFER_LOCKED(dst); |
803 | 9.43M | dst->first = NULL; |
804 | 9.43M | dst->last = NULL; |
805 | 9.43M | dst->last_with_datap = &(dst)->first; |
806 | 9.43M | dst->total_len = 0; |
807 | 9.43M | } |
808 | | |
809 | | /* Prepares the contents of src to be moved to another buffer by removing |
810 | | * read-pinned chains. The first pinned chain is saved in first, and the |
811 | | * last in last. If src has no read-pinned chains, first and last are set |
812 | | * to NULL. */ |
813 | | static int |
814 | | PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, |
815 | | struct evbuffer_chain **last) |
816 | 4.71M | { |
817 | 4.71M | struct evbuffer_chain *chain, **pinned; |
818 | | |
819 | 4.71M | ASSERT_EVBUFFER_LOCKED(src); |
820 | | |
821 | 4.71M | if (!HAS_PINNED_R(src)) { Branch (821:6): [True: 4.71M, False: 0]
|
822 | 4.71M | *first = *last = NULL; |
823 | 4.71M | return 0; |
824 | 4.71M | } |
825 | | |
826 | 0 | pinned = src->last_with_datap; |
827 | 0 | if (!CHAIN_PINNED_R(*pinned)) Branch (827:6): [True: 0, False: 0]
|
828 | 0 | pinned = &(*pinned)->next; |
829 | 0 | EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned)); |
830 | 0 | chain = *first = *pinned; |
831 | 0 | *last = src->last; |
832 | | |
833 | | /* If there's data in the first pinned chain, we need to allocate |
834 | | * a new chain and copy the data over. */ |
835 | 0 | if (chain->off) { Branch (835:6): [True: 0, False: 0]
|
836 | 0 | struct evbuffer_chain *tmp; |
837 | |
|
838 | 0 | EVUTIL_ASSERT(pinned == src->last_with_datap); |
839 | 0 | tmp = evbuffer_chain_new(chain->off); |
840 | 0 | if (!tmp) Branch (840:7): [True: 0, False: 0]
|
841 | 0 | return -1; |
842 | 0 | memcpy(tmp->buffer, chain->buffer + chain->misalign, |
843 | 0 | chain->off); |
844 | 0 | tmp->off = chain->off; |
845 | 0 | *src->last_with_datap = tmp; |
846 | 0 | src->last = tmp; |
847 | 0 | chain->misalign += chain->off; |
848 | 0 | chain->off = 0; |
849 | 0 | } else { |
850 | 0 | src->last = *src->last_with_datap; |
851 | 0 | *pinned = NULL; |
852 | 0 | } |
853 | | |
854 | 0 | return 0; |
855 | 0 | } |
856 | | |
857 | | static inline void |
858 | | RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned, |
859 | | struct evbuffer_chain *last) |
860 | 4.71M | { |
861 | 4.71M | ASSERT_EVBUFFER_LOCKED(src); |
862 | | |
863 | 4.71M | if (!pinned) { Branch (863:6): [True: 4.71M, False: 0]
|
864 | 4.71M | ZERO_CHAIN(src); |
865 | 4.71M | return; |
866 | 4.71M | } |
867 | | |
868 | 0 | src->first = pinned; |
869 | 0 | src->last = last; |
870 | 0 | src->last_with_datap = &src->first; |
871 | 0 | src->total_len = 0; |
872 | 0 | } |
873 | | |
874 | | static inline void |
875 | | COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src) |
876 | 2.35M | { |
877 | 2.35M | ASSERT_EVBUFFER_LOCKED(dst); |
878 | 2.35M | ASSERT_EVBUFFER_LOCKED(src); |
879 | 2.35M | dst->first = src->first; |
880 | 2.35M | if (src->last_with_datap == &src->first) Branch (880:6): [True: 2.35M, False: 0]
|
881 | 2.35M | dst->last_with_datap = &dst->first; |
882 | 0 | else |
883 | 0 | dst->last_with_datap = src->last_with_datap; |
884 | 2.35M | dst->last = src->last; |
885 | 2.35M | dst->total_len = src->total_len; |
886 | 2.35M | } |
887 | | |
888 | | static void |
889 | | APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) |
890 | 2.35M | { |
891 | 2.35M | struct evbuffer_chain **chp; |
892 | | |
893 | 2.35M | ASSERT_EVBUFFER_LOCKED(dst); |
894 | 2.35M | ASSERT_EVBUFFER_LOCKED(src); |
895 | | |
896 | 2.35M | chp = evbuffer_free_trailing_empty_chains(dst); |
897 | 2.35M | *chp = src->first; |
898 | | |
899 | 2.35M | if (src->last_with_datap == &src->first) Branch (899:6): [True: 2.35M, False: 0]
|
900 | 2.35M | dst->last_with_datap = chp; |
901 | 0 | else |
902 | 0 | dst->last_with_datap = src->last_with_datap; |
903 | 2.35M | dst->last = src->last; |
904 | 2.35M | dst->total_len += src->total_len; |
905 | 2.35M | } |
906 | | |
907 | | static inline void |
908 | | APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src) |
909 | 0 | { |
910 | 0 | struct evbuffer_chain *tmp; |
911 | 0 | struct evbuffer_chain *chain = src->first; |
912 | 0 | struct evbuffer_multicast_parent *extra; |
913 | |
|
914 | 0 | ASSERT_EVBUFFER_LOCKED(dst); |
915 | 0 | ASSERT_EVBUFFER_LOCKED(src); |
916 | | |
917 | 0 | for (; chain; chain = chain->next) { Branch (917:9): [True: 0, False: 0]
|
918 | 0 | if (!chain->off || chain->flags & EVBUFFER_DANGLING) { Branch (918:7): [True: 0, False: 0]
Branch (918:22): [True: 0, False: 0]
|
919 | | /* skip empty chains */ |
920 | 0 | continue; |
921 | 0 | } |
922 | | |
923 | 0 | tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent)); |
924 | 0 | if (!tmp) { Branch (924:7): [True: 0, False: 0]
|
925 | 0 | event_warn("%s: out of memory", __func__); |
926 | 0 | return; |
927 | 0 | } |
928 | 0 | extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp); |
929 | | /* reference evbuffer containing source chain so it |
930 | | * doesn't get released while the chain is still |
931 | | * being referenced to */ |
932 | 0 | evbuffer_incref_(src); |
933 | 0 | extra->source = src; |
934 | | /* reference source chain which now becomes immutable */ |
935 | 0 | evbuffer_chain_incref(chain); |
936 | 0 | extra->parent = chain; |
937 | 0 | chain->flags |= EVBUFFER_IMMUTABLE; |
938 | 0 | tmp->buffer_len = chain->buffer_len; |
939 | 0 | tmp->misalign = chain->misalign; |
940 | 0 | tmp->off = chain->off; |
941 | 0 | tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE; |
942 | 0 | tmp->buffer = chain->buffer; |
943 | 0 | evbuffer_chain_insert(dst, tmp); |
944 | 0 | } |
945 | 0 | } |
946 | | |
947 | | static void |
948 | | PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) |
949 | 0 | { |
950 | 0 | ASSERT_EVBUFFER_LOCKED(dst); |
951 | 0 | ASSERT_EVBUFFER_LOCKED(src); |
952 | 0 | src->last->next = dst->first; |
953 | 0 | dst->first = src->first; |
954 | 0 | dst->total_len += src->total_len; |
955 | 0 | if (*dst->last_with_datap == NULL) { Branch (955:6): [True: 0, False: 0]
|
956 | 0 | if (src->last_with_datap == &(src)->first) Branch (956:7): [True: 0, False: 0]
|
957 | 0 | dst->last_with_datap = &dst->first; |
958 | 0 | else |
959 | 0 | dst->last_with_datap = src->last_with_datap; |
960 | 0 | } else if (dst->last_with_datap == &dst->first) { Branch (960:13): [True: 0, False: 0]
|
961 | 0 | dst->last_with_datap = &src->last->next; |
962 | 0 | } |
963 | 0 | } |
964 | | |
965 | | int |
966 | | evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) |
967 | 4.71M | { |
968 | 4.71M | struct evbuffer_chain *pinned, *last; |
969 | 4.71M | size_t in_total_len, out_total_len; |
970 | 4.71M | int result = 0; |
971 | | |
972 | 4.71M | EVBUFFER_LOCK2(inbuf, outbuf); |
973 | 4.71M | in_total_len = inbuf->total_len; |
974 | 4.71M | out_total_len = outbuf->total_len; |
975 | | |
976 | 4.71M | if (in_total_len == 0 || outbuf == inbuf) Branch (976:6): [True: 0, False: 4.71M]
Branch (976:27): [True: 0, False: 4.71M]
|
977 | 0 | goto done; |
978 | | |
979 | 4.71M | if (outbuf->freeze_end || inbuf->freeze_start) { Branch (979:6): [True: 0, False: 4.71M]
Branch (979:28): [True: 0, False: 4.71M]
|
980 | 0 | result = -1; |
981 | 0 | goto done; |
982 | 0 | } |
983 | | |
984 | 4.71M | if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { Branch (984:6): [True: 0, False: 4.71M]
|
985 | 0 | result = -1; |
986 | 0 | goto done; |
987 | 0 | } |
988 | | |
989 | 4.71M | if (out_total_len == 0) { Branch (989:6): [True: 2.35M, False: 2.35M]
|
990 | | /* There might be an empty chain at the start of outbuf; free |
991 | | * it. */ |
992 | 2.35M | evbuffer_free_all_chains(outbuf->first); |
993 | 2.35M | COPY_CHAIN(outbuf, inbuf); |
994 | 2.35M | } else { |
995 | 2.35M | APPEND_CHAIN(outbuf, inbuf); |
996 | 2.35M | } |
997 | | |
998 | 4.71M | RESTORE_PINNED(inbuf, pinned, last); |
999 | | |
1000 | 4.71M | inbuf->n_del_for_cb += in_total_len; |
1001 | 4.71M | outbuf->n_add_for_cb += in_total_len; |
1002 | | |
1003 | 4.71M | evbuffer_invoke_callbacks_(inbuf); |
1004 | 4.71M | evbuffer_invoke_callbacks_(outbuf); |
1005 | | |
1006 | 4.71M | done: |
1007 | 4.71M | EVBUFFER_UNLOCK2(inbuf, outbuf); |
1008 | 4.71M | return result; |
1009 | 4.71M | } |
1010 | | |
1011 | | int |
1012 | | evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf) |
1013 | 0 | { |
1014 | 0 | size_t in_total_len, out_total_len; |
1015 | 0 | struct evbuffer_chain *chain; |
1016 | 0 | int result = 0; |
1017 | |
|
1018 | 0 | EVBUFFER_LOCK2(inbuf, outbuf); |
1019 | 0 | in_total_len = inbuf->total_len; |
1020 | 0 | out_total_len = outbuf->total_len; |
1021 | 0 | chain = inbuf->first; |
1022 | |
|
1023 | 0 | if (in_total_len == 0) Branch (1023:6): [True: 0, False: 0]
|
1024 | 0 | goto done; |
1025 | | |
1026 | 0 | if (outbuf->freeze_end || outbuf == inbuf) { Branch (1026:6): [True: 0, False: 0]
Branch (1026:28): [True: 0, False: 0]
|
1027 | 0 | result = -1; |
1028 | 0 | goto done; |
1029 | 0 | } |
1030 | | |
1031 | 0 | for (; chain; chain = chain->next) { Branch (1031:9): [True: 0, False: 0]
|
1032 | 0 | if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) { Branch (1032:7): [True: 0, False: 0]
|
1033 | | /* chain type can not be referenced */ |
1034 | 0 | result = -1; |
1035 | 0 | goto done; |
1036 | 0 | } |
1037 | 0 | } |
1038 | | |
1039 | 0 | if (out_total_len == 0) { Branch (1039:6): [True: 0, False: 0]
|
1040 | | /* There might be an empty chain at the start of outbuf; free |
1041 | | * it. */ |
1042 | 0 | evbuffer_free_all_chains(outbuf->first); |
1043 | 0 | } |
1044 | 0 | APPEND_CHAIN_MULTICAST(outbuf, inbuf); |
1045 | |
|
1046 | 0 | outbuf->n_add_for_cb += in_total_len; |
1047 | 0 | evbuffer_invoke_callbacks_(outbuf); |
1048 | |
|
1049 | 0 | done: |
1050 | 0 | EVBUFFER_UNLOCK2(inbuf, outbuf); |
1051 | 0 | return result; |
1052 | 0 | } |
1053 | | |
1054 | | int |
1055 | | evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) |
1056 | 0 | { |
1057 | 0 | struct evbuffer_chain *pinned, *last; |
1058 | 0 | size_t in_total_len, out_total_len; |
1059 | 0 | int result = 0; |
1060 | |
|
1061 | 0 | EVBUFFER_LOCK2(inbuf, outbuf); |
1062 | |
|
1063 | 0 | in_total_len = inbuf->total_len; |
1064 | 0 | out_total_len = outbuf->total_len; |
1065 | |
|
1066 | 0 | if (!in_total_len || inbuf == outbuf) Branch (1066:6): [True: 0, False: 0]
Branch (1066:23): [True: 0, False: 0]
|
1067 | 0 | goto done; |
1068 | | |
1069 | 0 | if (outbuf->freeze_start || inbuf->freeze_start) { Branch (1069:6): [True: 0, False: 0]
Branch (1069:30): [True: 0, False: 0]
|
1070 | 0 | result = -1; |
1071 | 0 | goto done; |
1072 | 0 | } |
1073 | | |
1074 | 0 | if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { Branch (1074:6): [True: 0, False: 0]
|
1075 | 0 | result = -1; |
1076 | 0 | goto done; |
1077 | 0 | } |
1078 | | |
1079 | 0 | if (out_total_len == 0) { Branch (1079:6): [True: 0, False: 0]
|
1080 | | /* There might be an empty chain at the start of outbuf; free |
1081 | | * it. */ |
1082 | 0 | evbuffer_free_all_chains(outbuf->first); |
1083 | 0 | COPY_CHAIN(outbuf, inbuf); |
1084 | 0 | } else { |
1085 | 0 | PREPEND_CHAIN(outbuf, inbuf); |
1086 | 0 | } |
1087 | |
|
1088 | 0 | RESTORE_PINNED(inbuf, pinned, last); |
1089 | |
|
1090 | 0 | inbuf->n_del_for_cb += in_total_len; |
1091 | 0 | outbuf->n_add_for_cb += in_total_len; |
1092 | |
|
1093 | 0 | evbuffer_invoke_callbacks_(inbuf); |
1094 | 0 | evbuffer_invoke_callbacks_(outbuf); |
1095 | 0 | done: |
1096 | 0 | EVBUFFER_UNLOCK2(inbuf, outbuf); |
1097 | 0 | return result; |
1098 | 0 | } |
1099 | | |
1100 | | int |
1101 | | evbuffer_drain(struct evbuffer *buf, size_t len) |
1102 | 30.6M | { |
1103 | 30.6M | struct evbuffer_chain *chain, *next; |
1104 | 30.6M | size_t remaining, old_len; |
1105 | 30.6M | int result = 0; |
1106 | | |
1107 | 30.6M | EVBUFFER_LOCK(buf); |
1108 | 30.6M | old_len = buf->total_len; |
1109 | | |
1110 | 30.6M | if (old_len == 0) Branch (1110:6): [True: 0, False: 30.6M]
|
1111 | 0 | goto done; |
1112 | | |
1113 | 30.6M | if (buf->freeze_start) { Branch (1113:6): [True: 0, False: 30.6M]
|
1114 | 0 | result = -1; |
1115 | 0 | goto done; |
1116 | 0 | } |
1117 | | |
1118 | 30.6M | if (len >= old_len && !HAS_PINNED_R(buf)) { Branch (1118:6): [True: 4.71M, False: 25.9M]
Branch (1118:24): [True: 4.71M, False: 0]
|
1119 | 4.71M | len = old_len; |
1120 | 11.7M | for (chain = buf->first; chain != NULL; chain = next) { Branch (1120:28): [True: 7.07M, False: 4.71M]
|
1121 | 7.07M | next = chain->next; |
1122 | 7.07M | evbuffer_chain_free(chain); |
1123 | 7.07M | } |
1124 | | |
1125 | 4.71M | ZERO_CHAIN(buf); |
1126 | 25.9M | } else { |
1127 | 25.9M | if (len >= old_len) Branch (1127:7): [True: 0, False: 25.9M]
|
1128 | 0 | len = old_len; |
1129 | | |
1130 | 25.9M | buf->total_len -= len; |
1131 | 25.9M | remaining = len; |
1132 | 25.9M | for (chain = buf->first; |
1133 | 25.9M | remaining >= chain->off; Branch (1133:8): [True: 0, False: 25.9M]
|
1134 | 25.9M | chain = next) { |
1135 | 0 | next = chain->next; |
1136 | 0 | remaining -= chain->off; |
1137 | |
|
1138 | 0 | if (chain == *buf->last_with_datap) { Branch (1138:8): [True: 0, False: 0]
|
1139 | 0 | buf->last_with_datap = &buf->first; |
1140 | 0 | } |
1141 | 0 | if (&chain->next == buf->last_with_datap) Branch (1141:8): [True: 0, False: 0]
|
1142 | 0 | buf->last_with_datap = &buf->first; |
1143 | |
|
1144 | 0 | if (CHAIN_PINNED_R(chain)) { |
1145 | 0 | EVUTIL_ASSERT(remaining == 0); |
1146 | 0 | chain->misalign += chain->off; |
1147 | 0 | chain->off = 0; |
1148 | 0 | break; |
1149 | 0 | } else |
1150 | 0 | evbuffer_chain_free(chain); |
1151 | 0 | } |
1152 | | |
1153 | 25.9M | buf->first = chain; |
1154 | 25.9M | EVUTIL_ASSERT(remaining <= chain->off); |
1155 | 25.9M | chain->misalign += remaining; |
1156 | 25.9M | chain->off -= remaining; |
1157 | 25.9M | } |
1158 | | |
1159 | 30.6M | buf->n_del_for_cb += len; |
1160 | | /* Tell someone about changes in this buffer */ |
1161 | 30.6M | evbuffer_invoke_callbacks_(buf); |
1162 | | |
1163 | 30.6M | done: |
1164 | 30.6M | EVBUFFER_UNLOCK(buf); |
1165 | 30.6M | return result; |
1166 | 30.6M | } |
1167 | | |
1168 | | /* Reads data from an event buffer and drains the bytes read */ |
1169 | | int |
1170 | | evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen) |
1171 | 14.1M | { |
1172 | 14.1M | ev_ssize_t n; |
1173 | 14.1M | EVBUFFER_LOCK(buf); |
1174 | 14.1M | n = evbuffer_copyout_from(buf, NULL, data_out, datlen); |
1175 | 14.1M | if (n > 0) { Branch (1175:6): [True: 11.7M, False: 2.35M]
|
1176 | 11.7M | if (evbuffer_drain(buf, n)<0) Branch (1176:7): [True: 0, False: 11.7M]
|
1177 | 0 | n = -1; |
1178 | 11.7M | } |
1179 | 14.1M | EVBUFFER_UNLOCK(buf); |
1180 | 14.1M | return (int)n; |
1181 | 14.1M | } |
1182 | | |
1183 | | ev_ssize_t |
1184 | | evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen) |
1185 | 0 | { |
1186 | 0 | return evbuffer_copyout_from(buf, NULL, data_out, datlen); |
1187 | 0 | } |
1188 | | |
1189 | | ev_ssize_t |
1190 | | evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos, |
1191 | | void *data_out, size_t datlen) |
1192 | 14.1M | { |
1193 | | /*XXX fails badly on sendfile case. */ |
1194 | 14.1M | struct evbuffer_chain *chain; |
1195 | 14.1M | char *data = data_out; |
1196 | 14.1M | size_t nread; |
1197 | 14.1M | ev_ssize_t result = 0; |
1198 | 14.1M | size_t pos_in_chain; |
1199 | | |
1200 | 14.1M | EVBUFFER_LOCK(buf); |
1201 | | |
1202 | 14.1M | if (pos) { Branch (1202:6): [True: 0, False: 14.1M]
|
1203 | 0 | if (datlen > (size_t)(EV_SSIZE_MAX - pos->pos)) { Branch (1203:7): [True: 0, False: 0]
|
1204 | 0 | result = -1; |
1205 | 0 | goto done; |
1206 | 0 | } |
1207 | 0 | chain = pos->internal_.chain; |
1208 | 0 | pos_in_chain = pos->internal_.pos_in_chain; |
1209 | 0 | if (datlen + pos->pos > buf->total_len) Branch (1209:7): [True: 0, False: 0]
|
1210 | 0 | datlen = buf->total_len - pos->pos; |
1211 | 14.1M | } else { |
1212 | 14.1M | chain = buf->first; |
1213 | 14.1M | pos_in_chain = 0; |
1214 | 14.1M | if (datlen > buf->total_len) Branch (1214:7): [True: 0, False: 14.1M]
|
1215 | 0 | datlen = buf->total_len; |
1216 | 14.1M | } |
1217 | | |
1218 | | |
1219 | 14.1M | if (datlen == 0) Branch (1219:6): [True: 2.35M, False: 11.7M]
|
1220 | 2.35M | goto done; |
1221 | | |
1222 | 11.7M | if (buf->freeze_start) { Branch (1222:6): [True: 0, False: 11.7M]
|
1223 | 0 | result = -1; |
1224 | 0 | goto done; |
1225 | 0 | } |
1226 | | |
1227 | 11.7M | nread = datlen; |
1228 | | |
1229 | 11.7M | while (datlen && datlen >= chain->off - pos_in_chain) { Branch (1229:9): [True: 11.7M, False: 0]
Branch (1229:19): [True: 0, False: 11.7M]
|
1230 | 0 | size_t copylen = chain->off - pos_in_chain; |
1231 | 0 | memcpy(data, |
1232 | 0 | chain->buffer + chain->misalign + pos_in_chain, |
1233 | 0 | copylen); |
1234 | 0 | data += copylen; |
1235 | 0 | datlen -= copylen; |
1236 | |
|
1237 | 0 | chain = chain->next; |
1238 | 0 | pos_in_chain = 0; |
1239 | 0 | EVUTIL_ASSERT(chain || datlen==0); |
1240 | 0 | } |
1241 | | |
1242 | 11.7M | if (datlen) { Branch (1242:6): [True: 11.7M, False: 0]
|
1243 | 11.7M | EVUTIL_ASSERT(chain); |
1244 | 11.7M | EVUTIL_ASSERT(datlen+pos_in_chain <= chain->off); |
1245 | | |
1246 | 11.7M | memcpy(data, chain->buffer + chain->misalign + pos_in_chain, |
1247 | 11.7M | datlen); |
1248 | 11.7M | } |
1249 | | |
1250 | 11.7M | result = nread; |
1251 | 14.1M | done: |
1252 | 14.1M | EVBUFFER_UNLOCK(buf); |
1253 | 14.1M | return result; |
1254 | 11.7M | } |
1255 | | |
1256 | | /* reads data from the src buffer to the dst buffer, avoids memcpy as |
1257 | | * possible. */ |
1258 | | /* XXXX should return ev_ssize_t */ |
1259 | | int |
1260 | | evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, |
1261 | | size_t datlen) |
1262 | 2.35M | { |
1263 | | /*XXX We should have an option to force this to be zero-copy.*/ |
1264 | | |
1265 | | /*XXX can fail badly on sendfile case. */ |
1266 | 2.35M | struct evbuffer_chain *chain, *previous; |
1267 | 2.35M | size_t nread = 0; |
1268 | 2.35M | int result; |
1269 | | |
1270 | 2.35M | EVBUFFER_LOCK2(src, dst); |
1271 | | |
1272 | 2.35M | chain = previous = src->first; |
1273 | | |
1274 | 2.35M | if (datlen == 0 || dst == src) { Branch (1274:6): [True: 0, False: 2.35M]
Branch (1274:21): [True: 0, False: 2.35M]
|
1275 | 0 | result = 0; |
1276 | 0 | goto done; |
1277 | 0 | } |
1278 | | |
1279 | 2.35M | if (dst->freeze_end || src->freeze_start) { Branch (1279:6): [True: 0, False: 2.35M]
Branch (1279:25): [True: 0, False: 2.35M]
|
1280 | 0 | result = -1; |
1281 | 0 | goto done; |
1282 | 0 | } |
1283 | | |
1284 | | /* short-cut if there is no more data buffered */ |
1285 | 2.35M | if (datlen >= src->total_len) { Branch (1285:6): [True: 2.35M, False: 0]
|
1286 | 2.35M | datlen = src->total_len; |
1287 | 2.35M | evbuffer_add_buffer(dst, src); |
1288 | 2.35M | result = (int)datlen; /*XXXX should return ev_ssize_t*/ |
1289 | 2.35M | goto done; |
1290 | 2.35M | } |
1291 | | |
1292 | | /* removes chains if possible */ |
1293 | 0 | while (chain->off <= datlen) { Branch (1293:9): [True: 0, False: 0]
|
1294 | | /* We can't remove the last with data from src unless we |
1295 | | * remove all chains, in which case we would have done the if |
1296 | | * block above */ |
1297 | 0 | EVUTIL_ASSERT(chain != *src->last_with_datap); |
1298 | 0 | nread += chain->off; |
1299 | 0 | datlen -= chain->off; |
1300 | 0 | previous = chain; |
1301 | 0 | if (src->last_with_datap == &chain->next) Branch (1301:7): [True: 0, False: 0]
|
1302 | 0 | src->last_with_datap = &src->first; |
1303 | 0 | chain = chain->next; |
1304 | 0 | } |
1305 | | |
1306 | 0 | if (chain != src->first) { Branch (1306:6): [True: 0, False: 0]
|
1307 | | /* we can remove the chain */ |
1308 | 0 | struct evbuffer_chain **chp; |
1309 | 0 | chp = evbuffer_free_trailing_empty_chains(dst); |
1310 | |
|
1311 | 0 | if (dst->first == NULL) { Branch (1311:7): [True: 0, False: 0]
|
1312 | 0 | dst->first = src->first; |
1313 | 0 | } else { |
1314 | 0 | *chp = src->first; |
1315 | 0 | } |
1316 | 0 | dst->last = previous; |
1317 | 0 | previous->next = NULL; |
1318 | 0 | src->first = chain; |
1319 | 0 | advance_last_with_data(dst); |
1320 | |
|
1321 | 0 | dst->total_len += nread; |
1322 | 0 | dst->n_add_for_cb += nread; |
1323 | 0 | } |
1324 | | |
1325 | | /* we know that there is more data in the src buffer than |
1326 | | * we want to read, so we manually drain the chain */ |
1327 | 0 | evbuffer_add(dst, chain->buffer + chain->misalign, datlen); |
1328 | 0 | chain->misalign += datlen; |
1329 | 0 | chain->off -= datlen; |
1330 | 0 | nread += datlen; |
1331 | | |
1332 | | /* You might think we would want to increment dst->n_add_for_cb |
1333 | | * here too. But evbuffer_add above already took care of that. |
1334 | | */ |
1335 | 0 | src->total_len -= nread; |
1336 | 0 | src->n_del_for_cb += nread; |
1337 | |
|
1338 | 0 | if (nread) { Branch (1338:6): [True: 0, False: 0]
|
1339 | 0 | evbuffer_invoke_callbacks_(dst); |
1340 | 0 | evbuffer_invoke_callbacks_(src); |
1341 | 0 | } |
1342 | 0 | result = (int)nread;/*XXXX should change return type */ |
1343 | |
|
1344 | 2.35M | done: |
1345 | 2.35M | EVBUFFER_UNLOCK2(src, dst); |
1346 | 2.35M | return result; |
1347 | 0 | } |
1348 | | |
1349 | | unsigned char * |
1350 | | evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size) |
1351 | 2.35M | { |
1352 | 2.35M | struct evbuffer_chain *chain, *next, *tmp, *last_with_data; |
1353 | 2.35M | unsigned char *buffer, *result = NULL; |
1354 | 2.35M | ev_ssize_t remaining; |
1355 | 2.35M | int removed_last_with_data = 0; |
1356 | 2.35M | int removed_last_with_datap = 0; |
1357 | | |
1358 | 2.35M | EVBUFFER_LOCK(buf); |
1359 | | |
1360 | 2.35M | chain = buf->first; |
1361 | | |
1362 | 2.35M | if (size < 0) Branch (1362:6): [True: 0, False: 2.35M]
|
1363 | 0 | size = buf->total_len; |
1364 | | /* if size > buf->total_len, we cannot guarantee to the user that she |
1365 | | * is going to have a long enough buffer afterwards; so we return |
1366 | | * NULL */ |
1367 | 2.35M | if (size == 0 || (size_t)size > buf->total_len) Branch (1367:6): [True: 0, False: 2.35M]
Branch (1367:19): [True: 0, False: 2.35M]
|
1368 | 0 | goto done; |
1369 | | |
1370 | | /* No need to pull up anything; the first size bytes are |
1371 | | * already here. */ |
1372 | 2.35M | if (chain->off >= (size_t)size) { Branch (1372:6): [True: 2.35M, False: 0]
|
1373 | 2.35M | result = chain->buffer + chain->misalign; |
1374 | 2.35M | goto done; |
1375 | 2.35M | } |
1376 | | |
1377 | | /* Make sure that none of the chains we need to copy from is pinned. */ |
1378 | 0 | remaining = size - chain->off; |
1379 | 0 | EVUTIL_ASSERT(remaining >= 0); |
1380 | 0 | for (tmp=chain->next; tmp; tmp=tmp->next) { Branch (1380:24): [True: 0, False: 0]
|
1381 | 0 | if (CHAIN_PINNED(tmp)) |
1382 | 0 | goto done; |
1383 | 0 | if (tmp->off >= (size_t)remaining) Branch (1383:7): [True: 0, False: 0]
|
1384 | 0 | break; |
1385 | 0 | remaining -= tmp->off; |
1386 | 0 | } |
1387 | | |
1388 | 0 | if (CHAIN_PINNED(chain)) { |
1389 | 0 | size_t old_off = chain->off; |
1390 | 0 | if (CHAIN_SPACE_LEN(chain) < size - chain->off) { Branch (1390:7): [True: 0, False: 0]
|
1391 | | /* not enough room at end of chunk. */ |
1392 | 0 | goto done; |
1393 | 0 | } |
1394 | 0 | buffer = CHAIN_SPACE_PTR(chain); |
1395 | 0 | tmp = chain; |
1396 | 0 | tmp->off = size; |
1397 | 0 | size -= old_off; |
1398 | 0 | chain = chain->next; |
1399 | 0 | } else if (chain->buffer_len - chain->misalign >= (size_t)size) { Branch (1399:13): [True: 0, False: 0]
|
1400 | | /* already have enough space in the first chain */ |
1401 | 0 | size_t old_off = chain->off; |
1402 | 0 | buffer = chain->buffer + chain->misalign + chain->off; |
1403 | 0 | tmp = chain; |
1404 | 0 | tmp->off = size; |
1405 | 0 | size -= old_off; |
1406 | 0 | chain = chain->next; |
1407 | 0 | } else { |
1408 | 0 | if ((tmp = evbuffer_chain_new(size)) == NULL) { Branch (1408:7): [True: 0, False: 0]
|
1409 | 0 | event_warn("%s: out of memory", __func__); |
1410 | 0 | goto done; |
1411 | 0 | } |
1412 | 0 | buffer = tmp->buffer; |
1413 | 0 | tmp->off = size; |
1414 | 0 | buf->first = tmp; |
1415 | 0 | } |
1416 | | |
1417 | | /* TODO(niels): deal with buffers that point to NULL like sendfile */ |
1418 | | |
1419 | | /* Copy and free every chunk that will be entirely pulled into tmp */ |
1420 | 0 | last_with_data = *buf->last_with_datap; |
1421 | 0 | for (; chain != NULL && (size_t)size >= chain->off; chain = next) { Branch (1421:9): [True: 0, False: 0]
Branch (1421:26): [True: 0, False: 0]
|
1422 | 0 | next = chain->next; |
1423 | |
|
1424 | 0 | if (chain->buffer) { Branch (1424:7): [True: 0, False: 0]
|
1425 | 0 | memcpy(buffer, chain->buffer + chain->misalign, chain->off); |
1426 | 0 | size -= chain->off; |
1427 | 0 | buffer += chain->off; |
1428 | 0 | } |
1429 | 0 | if (chain == last_with_data) Branch (1429:7): [True: 0, False: 0]
|
1430 | 0 | removed_last_with_data = 1; |
1431 | 0 | if (&chain->next == buf->last_with_datap) Branch (1431:7): [True: 0, False: 0]
|
1432 | 0 | removed_last_with_datap = 1; |
1433 | |
|
1434 | 0 | evbuffer_chain_free(chain); |
1435 | 0 | } |
1436 | |
|
1437 | 0 | if (chain != NULL) { Branch (1437:6): [True: 0, False: 0]
|
1438 | 0 | memcpy(buffer, chain->buffer + chain->misalign, size); |
1439 | 0 | chain->misalign += size; |
1440 | 0 | chain->off -= size; |
1441 | 0 | } else { |
1442 | 0 | buf->last = tmp; |
1443 | 0 | } |
1444 | |
|
1445 | 0 | tmp->next = chain; |
1446 | |
|
1447 | 0 | if (removed_last_with_data) { Branch (1447:6): [True: 0, False: 0]
|
1448 | 0 | buf->last_with_datap = &buf->first; |
1449 | 0 | } else if (removed_last_with_datap) { Branch (1449:13): [True: 0, False: 0]
|
1450 | 0 | if (buf->first->next && buf->first->next->off) Branch (1450:7): [True: 0, False: 0]
Branch (1450:27): [True: 0, False: 0]
|
1451 | 0 | buf->last_with_datap = &buf->first->next; |
1452 | 0 | else |
1453 | 0 | buf->last_with_datap = &buf->first; |
1454 | 0 | } |
1455 | |
|
1456 | 0 | result = (tmp->buffer + tmp->misalign); |
1457 | |
|
1458 | 2.35M | done: |
1459 | 2.35M | EVBUFFER_UNLOCK(buf); |
1460 | 2.35M | return result; |
1461 | 0 | } |
1462 | | |
1463 | | /* |
1464 | | * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. |
1465 | | * The returned buffer needs to be freed by the called. |
1466 | | */ |
1467 | | char * |
1468 | | evbuffer_readline(struct evbuffer *buffer) |
1469 | 0 | { |
1470 | 0 | return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY); |
1471 | 0 | } |
1472 | | |
1473 | | static inline ev_ssize_t |
1474 | | evbuffer_strchr(struct evbuffer_ptr *it, const char chr) |
1475 | 14.1M | { |
1476 | 14.1M | struct evbuffer_chain *chain = it->internal_.chain; |
1477 | 14.1M | size_t i = it->internal_.pos_in_chain; |
1478 | 14.1M | while (chain != NULL) { Branch (1478:9): [True: 14.1M, False: 0]
|
1479 | 14.1M | char *buffer = (char *)chain->buffer + chain->misalign; |
1480 | 14.1M | char *cp = memchr(buffer+i, chr, chain->off-i); |
1481 | 14.1M | if (cp) { Branch (1481:7): [True: 14.1M, False: 0]
|
1482 | 14.1M | it->internal_.chain = chain; |
1483 | 14.1M | it->internal_.pos_in_chain = cp - buffer; |
1484 | 14.1M | it->pos += (cp - buffer - i); |
1485 | 14.1M | return it->pos; |
1486 | 14.1M | } |
1487 | 0 | it->pos += chain->off - i; |
1488 | 0 | i = 0; |
1489 | 0 | chain = chain->next; |
1490 | 0 | } |
1491 | | |
1492 | 0 | return (-1); |
1493 | 14.1M | } |
1494 | | |
1495 | | static inline char * |
1496 | | find_eol_char(char *s, size_t len) |
1497 | 0 | { |
1498 | 0 | #define CHUNK_SZ 128 |
1499 | | /* Lots of benchmarking found this approach to be faster in practice |
1500 | | * than doing two memchrs over the whole buffer, doin a memchr on each |
1501 | | * char of the buffer, or trying to emulate memchr by hand. */ |
1502 | 0 | char *s_end, *cr, *lf; |
1503 | 0 | s_end = s+len; |
1504 | 0 | while (s < s_end) { Branch (1504:9): [True: 0, False: 0]
|
1505 | 0 | size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s); Branch (1505:18): [True: 0, False: 0]
|
1506 | 0 | cr = memchr(s, '\r', chunk); |
1507 | 0 | lf = memchr(s, '\n', chunk); |
1508 | 0 | if (cr) { Branch (1508:7): [True: 0, False: 0]
|
1509 | 0 | if (lf && lf < cr) Branch (1509:8): [True: 0, False: 0]
Branch (1509:14): [True: 0, False: 0]
|
1510 | 0 | return lf; |
1511 | 0 | return cr; |
1512 | 0 | } else if (lf) { Branch (1512:14): [True: 0, False: 0]
|
1513 | 0 | return lf; |
1514 | 0 | } |
1515 | 0 | s += CHUNK_SZ; |
1516 | 0 | } |
1517 | | |
1518 | 0 | return NULL; |
1519 | 0 | #undef CHUNK_SZ |
1520 | 0 | } |
1521 | | |
1522 | | static ev_ssize_t |
1523 | | evbuffer_find_eol_char(struct evbuffer_ptr *it) |
1524 | 0 | { |
1525 | 0 | struct evbuffer_chain *chain = it->internal_.chain; |
1526 | 0 | size_t i = it->internal_.pos_in_chain; |
1527 | 0 | while (chain != NULL) { Branch (1527:9): [True: 0, False: 0]
|
1528 | 0 | char *buffer = (char *)chain->buffer + chain->misalign; |
1529 | 0 | char *cp = find_eol_char(buffer+i, chain->off-i); |
1530 | 0 | if (cp) { Branch (1530:7): [True: 0, False: 0]
|
1531 | 0 | it->internal_.chain = chain; |
1532 | 0 | it->internal_.pos_in_chain = cp - buffer; |
1533 | 0 | it->pos += (cp - buffer) - i; |
1534 | 0 | return it->pos; |
1535 | 0 | } |
1536 | 0 | it->pos += chain->off - i; |
1537 | 0 | i = 0; |
1538 | 0 | chain = chain->next; |
1539 | 0 | } |
1540 | | |
1541 | 0 | return (-1); |
1542 | 0 | } |
1543 | | |
1544 | | static inline size_t |
1545 | | evbuffer_strspn( |
1546 | | struct evbuffer_ptr *ptr, const char *chrset) |
1547 | 0 | { |
1548 | 0 | size_t count = 0; |
1549 | 0 | struct evbuffer_chain *chain = ptr->internal_.chain; |
1550 | 0 | size_t i = ptr->internal_.pos_in_chain; |
1551 | |
|
1552 | 0 | if (!chain) Branch (1552:6): [True: 0, False: 0]
|
1553 | 0 | return 0; |
1554 | | |
1555 | 0 | while (1) { Branch (1555:9): [Folded - Ignored]
|
1556 | 0 | char *buffer = (char *)chain->buffer + chain->misalign; |
1557 | 0 | for (; i < chain->off; ++i) { Branch (1557:10): [True: 0, False: 0]
|
1558 | 0 | const char *p = chrset; |
1559 | 0 | while (*p) { Branch (1559:11): [True: 0, False: 0]
|
1560 | 0 | if (buffer[i] == *p++) Branch (1560:9): [True: 0, False: 0]
|
1561 | 0 | goto next; |
1562 | 0 | } |
1563 | 0 | ptr->internal_.chain = chain; |
1564 | 0 | ptr->internal_.pos_in_chain = i; |
1565 | 0 | ptr->pos += count; |
1566 | 0 | return count; |
1567 | 0 | next: |
1568 | 0 | ++count; |
1569 | 0 | } |
1570 | 0 | i = 0; |
1571 | |
|
1572 | 0 | if (! chain->next) { Branch (1572:7): [True: 0, False: 0]
|
1573 | 0 | ptr->internal_.chain = chain; |
1574 | 0 | ptr->internal_.pos_in_chain = i; |
1575 | 0 | ptr->pos += count; |
1576 | 0 | return count; |
1577 | 0 | } |
1578 | | |
1579 | 0 | chain = chain->next; |
1580 | 0 | } |
1581 | 0 | } |
1582 | | |
1583 | | |
1584 | | static inline int |
1585 | | evbuffer_getchr(struct evbuffer_ptr *it) |
1586 | 14.1M | { |
1587 | 14.1M | struct evbuffer_chain *chain = it->internal_.chain; |
1588 | 14.1M | size_t off = it->internal_.pos_in_chain; |
1589 | | |
1590 | 14.1M | if (chain == NULL) Branch (1590:6): [True: 0, False: 14.1M]
|
1591 | 0 | return -1; |
1592 | | |
1593 | 14.1M | return (unsigned char)chain->buffer[chain->misalign + off]; |
1594 | 14.1M | } |
1595 | | |
1596 | | struct evbuffer_ptr |
1597 | | evbuffer_search_eol(struct evbuffer *buffer, |
1598 | | struct evbuffer_ptr *start, size_t *eol_len_out, |
1599 | | enum evbuffer_eol_style eol_style) |
1600 | 14.1M | { |
1601 | 14.1M | struct evbuffer_ptr it, it2; |
1602 | 14.1M | size_t extra_drain = 0; |
1603 | 14.1M | int ok = 0; |
1604 | | |
1605 | | /* Avoid locking in trivial edge cases */ |
1606 | 14.1M | if (start && start->internal_.chain == NULL) { Branch (1606:6): [True: 0, False: 14.1M]
Branch (1606:15): [True: 0, False: 0]
|
1607 | 0 | PTR_NOT_FOUND(&it); |
1608 | 0 | if (eol_len_out) Branch (1608:7): [True: 0, False: 0]
|
1609 | 0 | *eol_len_out = extra_drain; |
1610 | 0 | return it; |
1611 | 0 | } |
1612 | | |
1613 | 14.1M | EVBUFFER_LOCK(buffer); |
1614 | | |
1615 | 14.1M | if (start) { Branch (1615:6): [True: 0, False: 14.1M]
|
1616 | 0 | memcpy(&it, start, sizeof(it)); |
1617 | 14.1M | } else { |
1618 | 14.1M | it.pos = 0; |
1619 | 14.1M | it.internal_.chain = buffer->first; |
1620 | 14.1M | it.internal_.pos_in_chain = 0; |
1621 | 14.1M | } |
1622 | | |
1623 | | /* the eol_style determines our first stop character and how many |
1624 | | * characters we are going to drain afterwards. */ |
1625 | 14.1M | switch (eol_style) { |
1626 | 0 | case EVBUFFER_EOL_ANY: Branch (1626:2): [True: 0, False: 14.1M]
|
1627 | 0 | if (evbuffer_find_eol_char(&it) < 0) Branch (1627:7): [True: 0, False: 0]
|
1628 | 0 | goto done; |
1629 | 0 | memcpy(&it2, &it, sizeof(it)); |
1630 | 0 | extra_drain = evbuffer_strspn(&it2, "\r\n"); |
1631 | 0 | break; |
1632 | 0 | case EVBUFFER_EOL_CRLF_STRICT: { Branch (1632:2): [True: 0, False: 14.1M]
|
1633 | 0 | it = evbuffer_search(buffer, "\r\n", 2, &it); |
1634 | 0 | if (it.pos < 0) Branch (1634:7): [True: 0, False: 0]
|
1635 | 0 | goto done; |
1636 | 0 | extra_drain = 2; |
1637 | 0 | break; |
1638 | 0 | } |
1639 | 14.1M | case EVBUFFER_EOL_CRLF: { Branch (1639:2): [True: 14.1M, False: 0]
|
1640 | 14.1M | ev_ssize_t start_pos = it.pos; |
1641 | | /* Look for a LF ... */ |
1642 | 14.1M | if (evbuffer_strchr(&it, '\n') < 0) Branch (1642:7): [True: 0, False: 14.1M]
|
1643 | 0 | goto done; |
1644 | 14.1M | extra_drain = 1; |
1645 | | /* ... optionally preceeded by a CR. */ |
1646 | 14.1M | if (it.pos == start_pos) Branch (1646:7): [True: 0, False: 14.1M]
|
1647 | 0 | break; /* If the first character is \n, don't back up */ |
1648 | | /* This potentially does an extra linear walk over the first |
1649 | | * few chains. Probably, that's not too expensive unless you |
1650 | | * have a really pathological setup. */ |
1651 | 14.1M | memcpy(&it2, &it, sizeof(it)); |
1652 | 14.1M | if (evbuffer_ptr_subtract(buffer, &it2, 1)<0) Branch (1652:7): [True: 0, False: 14.1M]
|
1653 | 0 | break; |
1654 | 14.1M | if (evbuffer_getchr(&it2) == '\r') { Branch (1654:7): [True: 14.1M, False: 0]
|
1655 | 14.1M | memcpy(&it, &it2, sizeof(it)); |
1656 | 14.1M | extra_drain = 2; |
1657 | 14.1M | } |
1658 | 14.1M | break; |
1659 | 14.1M | } |
1660 | 0 | case EVBUFFER_EOL_LF: Branch (1660:2): [True: 0, False: 14.1M]
|
1661 | 0 | if (evbuffer_strchr(&it, '\n') < 0) Branch (1661:7): [True: 0, False: 0]
|
1662 | 0 | goto done; |
1663 | 0 | extra_drain = 1; |
1664 | 0 | break; |
1665 | 0 | case EVBUFFER_EOL_NUL: Branch (1665:2): [True: 0, False: 14.1M]
|
1666 | 0 | if (evbuffer_strchr(&it, '\0') < 0) Branch (1666:7): [True: 0, False: 0]
|
1667 | 0 | goto done; |
1668 | 0 | extra_drain = 1; |
1669 | 0 | break; |
1670 | 0 | default: Branch (1670:2): [True: 0, False: 14.1M]
|
1671 | 0 | goto done; |
1672 | 14.1M | } |
1673 | | |
1674 | 14.1M | ok = 1; |
1675 | 14.1M | done: |
1676 | 14.1M | EVBUFFER_UNLOCK(buffer); |
1677 | | |
1678 | 14.1M | if (!ok) Branch (1678:6): [True: 0, False: 14.1M]
|
1679 | 0 | PTR_NOT_FOUND(&it); |
1680 | 14.1M | if (eol_len_out) Branch (1680:6): [True: 14.1M, False: 0]
|
1681 | 14.1M | *eol_len_out = extra_drain; |
1682 | | |
1683 | 14.1M | return it; |
1684 | 14.1M | } |
1685 | | |
1686 | | char * |
1687 | | evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, |
1688 | | enum evbuffer_eol_style eol_style) |
1689 | 14.1M | { |
1690 | 14.1M | struct evbuffer_ptr it; |
1691 | 14.1M | char *line; |
1692 | 14.1M | size_t n_to_copy=0, extra_drain=0; |
1693 | 14.1M | char *result = NULL; |
1694 | | |
1695 | 14.1M | EVBUFFER_LOCK(buffer); |
1696 | | |
1697 | 14.1M | if (buffer->freeze_start) { Branch (1697:6): [True: 0, False: 14.1M]
|
1698 | 0 | goto done; |
1699 | 0 | } |
1700 | | |
1701 | 14.1M | it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style); |
1702 | 14.1M | if (it.pos < 0) Branch (1702:6): [True: 0, False: 14.1M]
|
1703 | 0 | goto done; |
1704 | 14.1M | n_to_copy = it.pos; |
1705 | | |
1706 | 14.1M | if ((line = mm_malloc(n_to_copy+1)) == NULL) { Branch (1706:6): [True: 0, False: 14.1M]
|
1707 | 0 | event_warn("%s: out of memory", __func__); |
1708 | 0 | goto done; |
1709 | 0 | } |
1710 | | |
1711 | 14.1M | evbuffer_remove(buffer, line, n_to_copy); |
1712 | 14.1M | line[n_to_copy] = '\0'; |
1713 | | |
1714 | 14.1M | evbuffer_drain(buffer, extra_drain); |
1715 | 14.1M | result = line; |
1716 | 14.1M | done: |
1717 | 14.1M | EVBUFFER_UNLOCK(buffer); |
1718 | | |
1719 | 14.1M | if (n_read_out) Branch (1719:6): [True: 14.1M, False: 0]
|
1720 | 14.1M | *n_read_out = result ? n_to_copy : 0; Branch (1720:17): [True: 14.1M, False: 0]
|
1721 | | |
1722 | 14.1M | return result; |
1723 | 14.1M | } |
1724 | | |
1725 | 0 | #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096 |
1726 | | |
1727 | | /* Adds data to an event buffer */ |
1728 | | |
1729 | | int |
1730 | | evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen) |
1731 | 4.71M | { |
1732 | 4.71M | struct evbuffer_chain *chain, *tmp; |
1733 | 4.71M | const unsigned char *data = data_in; |
1734 | 4.71M | size_t remain, to_alloc; |
1735 | 4.71M | int result = -1; |
1736 | | |
1737 | 4.71M | EVBUFFER_LOCK(buf); |
1738 | | |
1739 | 4.71M | if (buf->freeze_end) { Branch (1739:6): [True: 0, False: 4.71M]
|
1740 | 0 | goto done; |
1741 | 0 | } |
1742 | | /* Prevent buf->total_len overflow */ |
1743 | 4.71M | if (datlen > EV_SIZE_MAX - buf->total_len) { Branch (1743:6): [True: 0, False: 4.71M]
|
1744 | 0 | goto done; |
1745 | 0 | } |
1746 | | |
1747 | 4.71M | if (*buf->last_with_datap == NULL) { Branch (1747:6): [True: 2.35M, False: 2.35M]
|
1748 | 2.35M | chain = buf->last; |
1749 | 2.35M | } else { |
1750 | 2.35M | chain = *buf->last_with_datap; |
1751 | 2.35M | } |
1752 | | |
1753 | | /* If there are no chains allocated for this buffer, allocate one |
1754 | | * big enough to hold all the data. */ |
1755 | 4.71M | if (chain == NULL) { Branch (1755:6): [True: 2.35M, False: 2.35M]
|
1756 | 2.35M | chain = evbuffer_chain_new(datlen); |
1757 | 2.35M | if (!chain) Branch (1757:7): [True: 0, False: 2.35M]
|
1758 | 0 | goto done; |
1759 | 2.35M | evbuffer_chain_insert(buf, chain); |
1760 | 2.35M | } |
1761 | | |
1762 | 4.71M | if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { Branch (1762:6): [True: 4.71M, False: 0]
|
1763 | | /* Always true for mutable buffers */ |
1764 | 4.71M | EVUTIL_ASSERT(chain->misalign >= 0 && |
1765 | 4.71M | (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); |
1766 | 4.71M | remain = chain->buffer_len - (size_t)chain->misalign - chain->off; |
1767 | 4.71M | if (remain >= datlen) { Branch (1767:7): [True: 4.71M, False: 0]
|
1768 | | /* there's enough space to hold all the data in the |
1769 | | * current last chain */ |
1770 | 4.71M | memcpy(chain->buffer + chain->misalign + chain->off, |
1771 | 4.71M | data, datlen); |
1772 | 4.71M | chain->off += datlen; |
1773 | 4.71M | buf->total_len += datlen; |
1774 | 4.71M | buf->n_add_for_cb += datlen; |
1775 | 4.71M | goto out; |
1776 | 4.71M | } else if (!CHAIN_PINNED(chain) && Branch (1776:14): [True: 0, False: 0]
|
1777 | 0 | evbuffer_chain_should_realign(chain, datlen)) { Branch (1777:7): [True: 0, False: 0]
|
1778 | | /* we can fit the data into the misalignment */ |
1779 | 0 | evbuffer_chain_align(chain); |
1780 | |
|
1781 | 0 | memcpy(chain->buffer + chain->off, data, datlen); |
1782 | 0 | chain->off += datlen; |
1783 | 0 | buf->total_len += datlen; |
1784 | 0 | buf->n_add_for_cb += datlen; |
1785 | 0 | goto out; |
1786 | 0 | } |
1787 | 4.71M | } else { |
1788 | | /* we cannot write any data to the last chain */ |
1789 | 0 | remain = 0; |
1790 | 0 | } |
1791 | | |
1792 | | /* we need to add another chain */ |
1793 | 0 | to_alloc = chain->buffer_len; |
1794 | 0 | if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2) Branch (1794:6): [True: 0, False: 0]
|
1795 | 0 | to_alloc <<= 1; |
1796 | 0 | if (datlen > to_alloc) Branch (1796:6): [True: 0, False: 0]
|
1797 | 0 | to_alloc = datlen; |
1798 | 0 | tmp = evbuffer_chain_new(to_alloc); |
1799 | 0 | if (tmp == NULL) Branch (1799:6): [True: 0, False: 0]
|
1800 | 0 | goto done; |
1801 | | |
1802 | 0 | if (remain) { Branch (1802:6): [True: 0, False: 0]
|
1803 | 0 | memcpy(chain->buffer + chain->misalign + chain->off, |
1804 | 0 | data, remain); |
1805 | 0 | chain->off += remain; |
1806 | 0 | buf->total_len += remain; |
1807 | 0 | buf->n_add_for_cb += remain; |
1808 | 0 | } |
1809 | |
|
1810 | 0 | data += remain; |
1811 | 0 | datlen -= remain; |
1812 | |
|
1813 | 0 | memcpy(tmp->buffer, data, datlen); |
1814 | 0 | tmp->off = datlen; |
1815 | 0 | evbuffer_chain_insert(buf, tmp); |
1816 | 0 | buf->n_add_for_cb += datlen; |
1817 | |
|
1818 | 4.71M | out: |
1819 | 4.71M | evbuffer_invoke_callbacks_(buf); |
1820 | 4.71M | result = 0; |
1821 | 4.71M | done: |
1822 | 4.71M | EVBUFFER_UNLOCK(buf); |
1823 | 4.71M | return result; |
1824 | 4.71M | } |
1825 | | |
1826 | | int |
1827 | | evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) |
1828 | 0 | { |
1829 | 0 | struct evbuffer_chain *chain, *tmp; |
1830 | 0 | int result = -1; |
1831 | |
|
1832 | 0 | EVBUFFER_LOCK(buf); |
1833 | |
|
1834 | 0 | if (datlen == 0) { Branch (1834:6): [True: 0, False: 0]
|
1835 | 0 | result = 0; |
1836 | 0 | goto done; |
1837 | 0 | } |
1838 | 0 | if (buf->freeze_start) { Branch (1838:6): [True: 0, False: 0]
|
1839 | 0 | goto done; |
1840 | 0 | } |
1841 | 0 | if (datlen > EV_SIZE_MAX - buf->total_len) { Branch (1841:6): [True: 0, False: 0]
|
1842 | 0 | goto done; |
1843 | 0 | } |
1844 | | |
1845 | 0 | chain = buf->first; |
1846 | |
|
1847 | 0 | if (chain == NULL) { Branch (1847:6): [True: 0, False: 0]
|
1848 | 0 | chain = evbuffer_chain_new(datlen); |
1849 | 0 | if (!chain) Branch (1849:7): [True: 0, False: 0]
|
1850 | 0 | goto done; |
1851 | 0 | evbuffer_chain_insert(buf, chain); |
1852 | 0 | } |
1853 | | |
1854 | | /* we cannot touch immutable buffers */ |
1855 | 0 | if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { Branch (1855:6): [True: 0, False: 0]
|
1856 | | /* Always true for mutable buffers */ |
1857 | 0 | EVUTIL_ASSERT(chain->misalign >= 0 && |
1858 | 0 | (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); |
1859 | | |
1860 | | /* If this chain is empty, we can treat it as |
1861 | | * 'empty at the beginning' rather than 'empty at the end' */ |
1862 | 0 | if (chain->off == 0) Branch (1862:7): [True: 0, False: 0]
|
1863 | 0 | chain->misalign = chain->buffer_len; |
1864 | |
|
1865 | 0 | if ((size_t)chain->misalign >= datlen) { Branch (1865:7): [True: 0, False: 0]
|
1866 | | /* we have enough space to fit everything */ |
1867 | 0 | memcpy(chain->buffer + chain->misalign - datlen, |
1868 | 0 | data, datlen); |
1869 | 0 | chain->off += datlen; |
1870 | 0 | chain->misalign -= datlen; |
1871 | 0 | buf->total_len += datlen; |
1872 | 0 | buf->n_add_for_cb += datlen; |
1873 | 0 | goto out; |
1874 | 0 | } else if (chain->misalign) { Branch (1874:14): [True: 0, False: 0]
|
1875 | | /* we can only fit some of the data. */ |
1876 | 0 | memcpy(chain->buffer, |
1877 | 0 | (char*)data + datlen - chain->misalign, |
1878 | 0 | (size_t)chain->misalign); |
1879 | 0 | chain->off += (size_t)chain->misalign; |
1880 | 0 | buf->total_len += (size_t)chain->misalign; |
1881 | 0 | buf->n_add_for_cb += (size_t)chain->misalign; |
1882 | 0 | datlen -= (size_t)chain->misalign; |
1883 | 0 | chain->misalign = 0; |
1884 | 0 | } |
1885 | 0 | } |
1886 | | |
1887 | | /* we need to add another chain */ |
1888 | 0 | if ((tmp = evbuffer_chain_new(datlen)) == NULL) Branch (1888:6): [True: 0, False: 0]
|
1889 | 0 | goto done; |
1890 | 0 | buf->first = tmp; |
1891 | 0 | if (buf->last_with_datap == &buf->first && chain->off) Branch (1891:6): [True: 0, False: 0]
Branch (1891:45): [True: 0, False: 0]
|
1892 | 0 | buf->last_with_datap = &tmp->next; |
1893 | |
|
1894 | 0 | tmp->next = chain; |
1895 | |
|
1896 | 0 | tmp->off = datlen; |
1897 | 0 | EVUTIL_ASSERT(datlen <= tmp->buffer_len); |
1898 | 0 | tmp->misalign = tmp->buffer_len - datlen; |
1899 | |
|
1900 | 0 | memcpy(tmp->buffer + tmp->misalign, data, datlen); |
1901 | 0 | buf->total_len += datlen; |
1902 | 0 | buf->n_add_for_cb += datlen; |
1903 | |
|
1904 | 0 | out: |
1905 | 0 | evbuffer_invoke_callbacks_(buf); |
1906 | 0 | result = 0; |
1907 | 0 | done: |
1908 | 0 | EVBUFFER_UNLOCK(buf); |
1909 | 0 | return result; |
1910 | 0 | } |
1911 | | |
1912 | | /** Helper: realigns the memory in chain->buffer so that misalign is 0. */ |
1913 | | static void |
1914 | | evbuffer_chain_align(struct evbuffer_chain *chain) |
1915 | 0 | { |
1916 | 0 | EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE)); |
1917 | 0 | EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY)); |
1918 | 0 | memmove(chain->buffer, chain->buffer + chain->misalign, chain->off); |
1919 | 0 | chain->misalign = 0; |
1920 | 0 | } |
1921 | | |
1922 | 0 | #define MAX_TO_COPY_IN_EXPAND 4096 |
1923 | 0 | #define MAX_TO_REALIGN_IN_EXPAND 2048 |
1924 | | |
1925 | | /** Helper: return true iff we should realign chain to fit datalen bytes of |
1926 | | data in it. */ |
1927 | | static int |
1928 | | evbuffer_chain_should_realign(struct evbuffer_chain *chain, |
1929 | | size_t datlen) |
1930 | 0 | { |
1931 | 0 | return chain->buffer_len - chain->off >= datlen && Branch (1931:9): [True: 0, False: 0]
|
1932 | 0 | (chain->off < chain->buffer_len / 2) && Branch (1932:6): [True: 0, False: 0]
|
1933 | 0 | (chain->off <= MAX_TO_REALIGN_IN_EXPAND); Branch (1933:6): [True: 0, False: 0]
|
1934 | 0 | } |
1935 | | |
1936 | | /* Expands the available space in the event buffer to at least datlen, all in |
1937 | | * a single chunk. Return that chunk. */ |
1938 | | static struct evbuffer_chain * |
1939 | | evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen) |
1940 | 9.44M | { |
1941 | 9.44M | struct evbuffer_chain *chain, **chainp; |
1942 | 9.44M | struct evbuffer_chain *result = NULL; |
1943 | 9.44M | ASSERT_EVBUFFER_LOCKED(buf); |
1944 | | |
1945 | 9.44M | chainp = buf->last_with_datap; |
1946 | | |
1947 | | /* XXX If *chainp is no longer writeable, but has enough space in its |
1948 | | * misalign, this might be a bad idea: we could still use *chainp, not |
1949 | | * (*chainp)->next. */ |
1950 | 9.44M | if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0) Branch (1950:6): [True: 7.08M, False: 2.35M]
Branch (1950:17): [True: 0, False: 7.08M]
|
1951 | 0 | chainp = &(*chainp)->next; |
1952 | | |
1953 | | /* 'chain' now points to the first chain with writable space (if any) |
1954 | | * We will either use it, realign it, replace it, or resize it. */ |
1955 | 9.44M | chain = *chainp; |
1956 | | |
1957 | 9.44M | if (chain == NULL || Branch (1957:6): [True: 2.35M, False: 7.08M]
|
1958 | 9.44M | (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { Branch (1958:6): [True: 0, False: 7.08M]
|
1959 | | /* We can't use the last_with_data chain at all. Just add a |
1960 | | * new one that's big enough. */ |
1961 | 2.35M | goto insert_new; |
1962 | 2.35M | } |
1963 | | |
1964 | | /* If we can fit all the data, then we don't have to do anything */ |
1965 | 7.08M | if (CHAIN_SPACE_LEN(chain) >= datlen) { Branch (1965:6): [True: 7.08M, False: 0]
|
1966 | 7.08M | result = chain; |
1967 | 7.08M | goto ok; |
1968 | 7.08M | } |
1969 | | |
1970 | | /* If the chain is completely empty, just replace it by adding a new |
1971 | | * empty chain. */ |
1972 | 0 | if (chain->off == 0) { Branch (1972:6): [True: 0, False: 0]
|
1973 | 0 | goto insert_new; |
1974 | 0 | } |
1975 | | |
1976 | | /* If the misalignment plus the remaining space fulfills our data |
1977 | | * needs, we could just force an alignment to happen. Afterwards, we |
1978 | | * have enough space. But only do this if we're saving a lot of space |
1979 | | * and not moving too much data. Otherwise the space savings are |
1980 | | * probably offset by the time lost in copying. |
1981 | | */ |
1982 | 0 | if (evbuffer_chain_should_realign(chain, datlen)) { Branch (1982:6): [True: 0, False: 0]
|
1983 | 0 | evbuffer_chain_align(chain); |
1984 | 0 | result = chain; |
1985 | 0 | goto ok; |
1986 | 0 | } |
1987 | | |
1988 | | /* At this point, we can either resize the last chunk with space in |
1989 | | * it, use the next chunk after it, or If we add a new chunk, we waste |
1990 | | * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we |
1991 | | * resize, we have to copy chain->off bytes. |
1992 | | */ |
1993 | | |
1994 | | /* Would expanding this chunk be affordable and worthwhile? */ |
1995 | 0 | if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 || Branch (1995:6): [True: 0, False: 0]
|
1996 | 0 | chain->off > MAX_TO_COPY_IN_EXPAND || Branch (1996:6): [True: 0, False: 0]
|
1997 | 0 | datlen >= (EVBUFFER_CHAIN_MAX - chain->off)) { Branch (1997:3): [True: 0, False: 0]
|
1998 | | /* It's not worth resizing this chain. Can the next one be |
1999 | | * used? */ |
2000 | 0 | if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { Branch (2000:7): [True: 0, False: 0]
Branch (2000:22): [True: 0, False: 0]
|
2001 | | /* Yes, we can just use the next chain (which should |
2002 | | * be empty. */ |
2003 | 0 | result = chain->next; |
2004 | 0 | goto ok; |
2005 | 0 | } else { |
2006 | | /* No; append a new chain (which will free all |
2007 | | * terminal empty chains.) */ |
2008 | 0 | goto insert_new; |
2009 | 0 | } |
2010 | 0 | } else { |
2011 | | /* Okay, we're going to try to resize this chain: Not doing so |
2012 | | * would waste at least 1/8 of its current allocation, and we |
2013 | | * can do so without having to copy more than |
2014 | | * MAX_TO_COPY_IN_EXPAND bytes. */ |
2015 | | /* figure out how much space we need */ |
2016 | 0 | size_t length = chain->off + datlen; |
2017 | 0 | struct evbuffer_chain *tmp = evbuffer_chain_new(length); |
2018 | 0 | if (tmp == NULL) Branch (2018:7): [True: 0, False: 0]
|
2019 | 0 | goto err; |
2020 | | |
2021 | | /* copy the data over that we had so far */ |
2022 | 0 | tmp->off = chain->off; |
2023 | 0 | memcpy(tmp->buffer, chain->buffer + chain->misalign, |
2024 | 0 | chain->off); |
2025 | | /* fix up the list */ |
2026 | 0 | EVUTIL_ASSERT(*chainp == chain); |
2027 | 0 | result = *chainp = tmp; |
2028 | |
|
2029 | 0 | if (buf->last == chain) Branch (2029:7): [True: 0, False: 0]
|
2030 | 0 | buf->last = tmp; |
2031 | |
|
2032 | 0 | tmp->next = chain->next; |
2033 | 0 | evbuffer_chain_free(chain); |
2034 | 0 | goto ok; |
2035 | 0 | } |
2036 | | |
2037 | 2.35M | insert_new: |
2038 | 2.35M | result = evbuffer_chain_insert_new(buf, datlen); |
2039 | 2.35M | if (!result) Branch (2039:6): [True: 0, False: 2.35M]
|
2040 | 0 | goto err; |
2041 | 9.44M | ok: |
2042 | 9.44M | EVUTIL_ASSERT(result); |
2043 | 9.44M | EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen); |
2044 | 9.44M | err: |
2045 | 9.44M | return result; |
2046 | 9.44M | } |
2047 | | |
2048 | | /* Make sure that datlen bytes are available for writing in the last n |
2049 | | * chains. Never copies or moves data. */ |
2050 | | int |
2051 | | evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n) |
2052 | 4.70M | { |
2053 | 4.70M | struct evbuffer_chain *chain = buf->last, *tmp, *next; |
2054 | 4.70M | size_t avail; |
2055 | 4.70M | int used; |
2056 | | |
2057 | 4.70M | ASSERT_EVBUFFER_LOCKED(buf); |
2058 | 4.70M | EVUTIL_ASSERT(n >= 2); |
2059 | | |
2060 | 4.70M | if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { Branch (2060:6): [True: 4.70M, False: 0]
Branch (2060:23): [True: 0, False: 0]
|
2061 | | /* There is no last chunk, or we can't touch the last chunk. |
2062 | | * Just add a new chunk. */ |
2063 | 4.70M | chain = evbuffer_chain_new(datlen); |
2064 | 4.70M | if (chain == NULL) Branch (2064:7): [True: 0, False: 4.70M]
|
2065 | 0 | return (-1); |
2066 | | |
2067 | 4.70M | evbuffer_chain_insert(buf, chain); |
2068 | 4.70M | return (0); |
2069 | 4.70M | } |
2070 | | |
2071 | 0 | used = 0; /* number of chains we're using space in. */ |
2072 | 0 | avail = 0; /* how much space they have. */ |
2073 | | /* How many bytes can we stick at the end of buffer as it is? Iterate |
2074 | | * over the chains at the end of the buffer, tring to see how much |
2075 | | * space we have in the first n. */ |
2076 | 0 | for (chain = *buf->last_with_datap; chain; chain = chain->next) { Branch (2076:38): [True: 0, False: 0]
|
2077 | 0 | if (chain->off) { Branch (2077:7): [True: 0, False: 0]
|
2078 | 0 | size_t space = (size_t) CHAIN_SPACE_LEN(chain); |
2079 | 0 | EVUTIL_ASSERT(chain == *buf->last_with_datap); |
2080 | 0 | if (space) { Branch (2080:8): [True: 0, False: 0]
|
2081 | 0 | avail += space; |
2082 | 0 | ++used; |
2083 | 0 | } |
2084 | 0 | } else { |
2085 | | /* No data in chain; realign it. */ |
2086 | 0 | chain->misalign = 0; |
2087 | 0 | avail += chain->buffer_len; |
2088 | 0 | ++used; |
2089 | 0 | } |
2090 | 0 | if (avail >= datlen) { Branch (2090:7): [True: 0, False: 0]
|
2091 | | /* There is already enough space. Just return */ |
2092 | 0 | return (0); |
2093 | 0 | } |
2094 | 0 | if (used == n) Branch (2094:7): [True: 0, False: 0]
|
2095 | 0 | break; |
2096 | 0 | } |
2097 | | |
2098 | | /* There wasn't enough space in the first n chains with space in |
2099 | | * them. Either add a new chain with enough space, or replace all |
2100 | | * empty chains with one that has enough space, depending on n. */ |
2101 | 0 | if (used < n) { Branch (2101:6): [True: 0, False: 0]
|
2102 | | /* The loop ran off the end of the chains before it hit n |
2103 | | * chains; we can add another. */ |
2104 | 0 | EVUTIL_ASSERT(chain == NULL); |
2105 | | |
2106 | 0 | tmp = evbuffer_chain_new(datlen - avail); |
2107 | 0 | if (tmp == NULL) Branch (2107:7): [True: 0, False: 0]
|
2108 | 0 | return (-1); |
2109 | | |
2110 | 0 | buf->last->next = tmp; |
2111 | 0 | buf->last = tmp; |
2112 | | /* (we would only set last_with_data if we added the first |
2113 | | * chain. But if the buffer had no chains, we would have |
2114 | | * just allocated a new chain earlier) */ |
2115 | 0 | return (0); |
2116 | 0 | } else { |
2117 | | /* Nuke _all_ the empty chains. */ |
2118 | 0 | int rmv_all = 0; /* True iff we removed last_with_data. */ |
2119 | 0 | chain = *buf->last_with_datap; |
2120 | 0 | if (!chain->off) { Branch (2120:7): [True: 0, False: 0]
|
2121 | 0 | EVUTIL_ASSERT(chain == buf->first); |
2122 | 0 | rmv_all = 1; |
2123 | 0 | avail = 0; |
2124 | 0 | } else { |
2125 | | /* can't overflow, since only mutable chains have |
2126 | | * huge misaligns. */ |
2127 | 0 | avail = (size_t) CHAIN_SPACE_LEN(chain); |
2128 | 0 | chain = chain->next; |
2129 | 0 | } |
2130 | | |
2131 | | |
2132 | 0 | for (; chain; chain = next) { Branch (2132:10): [True: 0, False: 0]
|
2133 | 0 | next = chain->next; |
2134 | 0 | EVUTIL_ASSERT(chain->off == 0); |
2135 | 0 | evbuffer_chain_free(chain); |
2136 | 0 | } |
2137 | 0 | EVUTIL_ASSERT(datlen >= avail); |
2138 | 0 | tmp = evbuffer_chain_new(datlen - avail); |
2139 | 0 | if (tmp == NULL) { Branch (2139:7): [True: 0, False: 0]
|
2140 | 0 | if (rmv_all) { Branch (2140:8): [True: 0, False: 0]
|
2141 | 0 | ZERO_CHAIN(buf); |
2142 | 0 | } else { |
2143 | 0 | buf->last = *buf->last_with_datap; |
2144 | 0 | (*buf->last_with_datap)->next = NULL; |
2145 | 0 | } |
2146 | 0 | return (-1); |
2147 | 0 | } |
2148 | | |
2149 | 0 | if (rmv_all) { Branch (2149:7): [True: 0, False: 0]
|
2150 | 0 | buf->first = buf->last = tmp; |
2151 | 0 | buf->last_with_datap = &buf->first; |
2152 | 0 | } else { |
2153 | 0 | (*buf->last_with_datap)->next = tmp; |
2154 | 0 | buf->last = tmp; |
2155 | 0 | } |
2156 | 0 | return (0); |
2157 | 0 | } |
2158 | 0 | } |
2159 | | |
2160 | | int |
2161 | | evbuffer_expand(struct evbuffer *buf, size_t datlen) |
2162 | 0 | { |
2163 | 0 | struct evbuffer_chain *chain; |
2164 | |
|
2165 | 0 | EVBUFFER_LOCK(buf); |
2166 | 0 | chain = evbuffer_expand_singlechain(buf, datlen); |
2167 | 0 | EVBUFFER_UNLOCK(buf); |
2168 | 0 | return chain ? 0 : -1; Branch (2168:9): [True: 0, False: 0]
|
2169 | 0 | } |
2170 | | |
2171 | | /* |
2172 | | * Reads data from a file descriptor into a buffer. |
2173 | | */ |
2174 | | |
2175 | | #if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32) |
2176 | | #define USE_IOVEC_IMPL |
2177 | | #endif |
2178 | | |
2179 | | #ifdef USE_IOVEC_IMPL |
2180 | | |
2181 | | #ifdef EVENT__HAVE_SYS_UIO_H |
2182 | | /* number of iovec we use for writev, fragmentation is going to determine |
2183 | | * how much we end up writing */ |
2184 | | |
2185 | 11.7M | #define DEFAULT_WRITE_IOVEC 128 |
2186 | | |
2187 | | #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC |
2188 | | #define NUM_WRITE_IOVEC UIO_MAXIOV |
2189 | | #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC |
2190 | | #define NUM_WRITE_IOVEC IOV_MAX |
2191 | | #else |
2192 | 4.71M | #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC |
2193 | | #endif |
2194 | | |
2195 | 7.06M | #define IOV_TYPE struct iovec |
2196 | 4.71M | #define IOV_PTR_FIELD iov_base |
2197 | 4.71M | #define IOV_LEN_FIELD iov_len |
2198 | | #define IOV_LEN_TYPE size_t |
2199 | | #else |
2200 | | #define NUM_WRITE_IOVEC 16 |
2201 | | #define IOV_TYPE WSABUF |
2202 | | #define IOV_PTR_FIELD buf |
2203 | | #define IOV_LEN_FIELD len |
2204 | | #define IOV_LEN_TYPE unsigned long |
2205 | | #endif |
2206 | | #endif |
2207 | 9.41M | #define NUM_READ_IOVEC 4 |
2208 | | |
2209 | 9.41M | #define EVBUFFER_MAX_READ 4096 |
2210 | | |
2211 | | /** Helper function to figure out which space to use for reading data into |
2212 | | an evbuffer. Internal use only. |
2213 | | |
2214 | | @param buf The buffer to read into |
2215 | | @param howmuch How much we want to read. |
2216 | | @param vecs An array of two or more iovecs or WSABUFs. |
2217 | | @param n_vecs_avail The length of vecs |
2218 | | @param chainp A pointer to a variable to hold the first chain we're |
2219 | | reading into. |
2220 | | @param exact Boolean: if true, we do not provide more than 'howmuch' |
2221 | | space in the vectors, even if more space is available. |
2222 | | @return The number of buffers we're using. |
2223 | | */ |
2224 | | int |
2225 | | evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch, |
2226 | | struct evbuffer_iovec *vecs, int n_vecs_avail, |
2227 | | struct evbuffer_chain ***chainp, int exact) |
2228 | 4.70M | { |
2229 | 4.70M | struct evbuffer_chain *chain; |
2230 | 4.70M | struct evbuffer_chain **firstchainp; |
2231 | 4.70M | size_t so_far; |
2232 | 4.70M | int i; |
2233 | 4.70M | ASSERT_EVBUFFER_LOCKED(buf); |
2234 | | |
2235 | 4.70M | if (howmuch < 0) Branch (2235:6): [True: 0, False: 4.70M]
|
2236 | 0 | return -1; |
2237 | | |
2238 | 4.70M | so_far = 0; |
2239 | | /* Let firstchain be the first chain with any space on it */ |
2240 | 4.70M | firstchainp = buf->last_with_datap; |
2241 | 4.70M | EVUTIL_ASSERT(*firstchainp); |
2242 | 4.70M | if (CHAIN_SPACE_LEN(*firstchainp) == 0) { Branch (2242:6): [True: 0, False: 4.70M]
|
2243 | 0 | firstchainp = &(*firstchainp)->next; |
2244 | 0 | } |
2245 | | |
2246 | 4.70M | chain = *firstchainp; |
2247 | 4.70M | EVUTIL_ASSERT(chain); |
2248 | 9.41M | for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { Branch (2248:14): [True: 9.41M, False: 0]
Branch (2248:34): [True: 4.70M, False: 4.70M]
|
2249 | 4.70M | size_t avail = (size_t) CHAIN_SPACE_LEN(chain); |
2250 | 4.70M | if (avail > (howmuch - so_far) && exact) Branch (2250:7): [True: 4.70M, False: 0]
Branch (2250:37): [True: 4.70M, False: 0]
|
2251 | 4.70M | avail = howmuch - so_far; |
2252 | 4.70M | vecs[i].iov_base = (void *)CHAIN_SPACE_PTR(chain); |
2253 | 4.70M | vecs[i].iov_len = avail; |
2254 | 4.70M | so_far += avail; |
2255 | 4.70M | chain = chain->next; |
2256 | 4.70M | } |
2257 | | |
2258 | 4.70M | *chainp = firstchainp; |
2259 | 4.70M | return i; |
2260 | 4.70M | } |
2261 | | |
2262 | | static int |
2263 | | get_n_bytes_readable_on_socket(evutil_socket_t fd) |
2264 | 4.70M | { |
2265 | | #if defined(FIONREAD) && defined(_WIN32) |
2266 | | unsigned long lng = EVBUFFER_MAX_READ; |
2267 | | if (ioctlsocket(fd, FIONREAD, &lng) < 0) |
2268 | | return -1; |
2269 | | /* Can overflow, but mostly harmlessly. XXXX */ |
2270 | | return (int)lng; |
2271 | | #elif defined(FIONREAD) |
2272 | 4.70M | int n = EVBUFFER_MAX_READ; |
2273 | 4.70M | if (ioctl(fd, FIONREAD, &n) < 0) Branch (2273:6): [True: 0, False: 4.70M]
|
2274 | 0 | return -1; |
2275 | 4.70M | return n; |
2276 | | #else |
2277 | | return EVBUFFER_MAX_READ; |
2278 | | #endif |
2279 | 4.70M | } |
2280 | | |
2281 | | /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t |
2282 | | * as howmuch? */ |
2283 | | int |
2284 | | evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch) |
2285 | 4.70M | { |
2286 | 4.70M | struct evbuffer_chain **chainp; |
2287 | 4.70M | int n; |
2288 | 4.70M | int result; |
2289 | | |
2290 | 4.70M | #ifdef USE_IOVEC_IMPL |
2291 | 4.70M | int nvecs, i, remaining; |
2292 | | #else |
2293 | | struct evbuffer_chain *chain; |
2294 | | unsigned char *p; |
2295 | | #endif |
2296 | | |
2297 | 4.70M | EVBUFFER_LOCK(buf); |
2298 | | |
2299 | 4.70M | if (buf->freeze_end) { Branch (2299:6): [True: 0, False: 4.70M]
|
2300 | 0 | result = -1; |
2301 | 0 | goto done; |
2302 | 0 | } |
2303 | | |
2304 | 4.70M | n = get_n_bytes_readable_on_socket(fd); |
2305 | 4.70M | if (n <= 0 || n > EVBUFFER_MAX_READ) Branch (2305:6): [True: 2.34M, False: 2.35M]
Branch (2305:16): [True: 0, False: 2.35M]
|
2306 | 2.34M | n = EVBUFFER_MAX_READ; |
2307 | 4.70M | if (howmuch < 0 || howmuch > n) Branch (2307:6): [True: 0, False: 4.70M]
Branch (2307:21): [True: 4.70M, False: 0]
|
2308 | 4.70M | howmuch = n; |
2309 | | |
2310 | 4.70M | #ifdef USE_IOVEC_IMPL |
2311 | | /* Since we can use iovecs, we're willing to use the last |
2312 | | * NUM_READ_IOVEC chains. */ |
2313 | 4.70M | if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) { Branch (2313:6): [True: 0, False: 4.70M]
|
2314 | 0 | result = -1; |
2315 | 0 | goto done; |
2316 | 4.70M | } else { |
2317 | 4.70M | IOV_TYPE vecs[NUM_READ_IOVEC]; |
2318 | 4.70M | #ifdef EVBUFFER_IOVEC_IS_NATIVE_ |
2319 | 4.70M | nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs, |
2320 | 4.70M | NUM_READ_IOVEC, &chainp, 1); |
2321 | | #else |
2322 | | /* We aren't using the native struct iovec. Therefore, |
2323 | | we are on win32. */ |
2324 | | struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC]; |
2325 | | nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2, |
2326 | | &chainp, 1); |
2327 | | |
2328 | | for (i=0; i < nvecs; ++i) |
2329 | | WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); |
2330 | | #endif |
2331 | | |
2332 | | #ifdef _WIN32 |
2333 | | { |
2334 | | DWORD bytesRead; |
2335 | | DWORD flags=0; |
2336 | | if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { |
2337 | | /* The read failed. It might be a close, |
2338 | | * or it might be an error. */ |
2339 | | if (WSAGetLastError() == WSAECONNABORTED) |
2340 | | n = 0; |
2341 | | else |
2342 | | n = -1; |
2343 | | } else |
2344 | | n = bytesRead; |
2345 | | } |
2346 | | #else |
2347 | 4.70M | n = readv(fd, vecs, nvecs); |
2348 | 4.70M | #endif |
2349 | 4.70M | } |
2350 | | |
2351 | | #else /*!USE_IOVEC_IMPL*/ |
2352 | | /* If we don't have FIONREAD, we might waste some space here */ |
2353 | | /* XXX we _will_ waste some space here if there is any space left |
2354 | | * over on buf->last. */ |
2355 | | if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { |
2356 | | result = -1; |
2357 | | goto done; |
2358 | | } |
2359 | | |
2360 | | /* We can append new data at this point */ |
2361 | | p = chain->buffer + chain->misalign + chain->off; |
2362 | | |
2363 | | #ifndef _WIN32 |
2364 | | n = read(fd, p, howmuch); |
2365 | | #else |
2366 | | n = recv(fd, p, howmuch, 0); |
2367 | | #endif |
2368 | | #endif /* USE_IOVEC_IMPL */ |
2369 | | |
2370 | 4.70M | if (n == -1) { Branch (2370:6): [True: 0, False: 4.70M]
|
2371 | 0 | result = -1; |
2372 | 0 | goto done; |
2373 | 0 | } |
2374 | 4.70M | if (n == 0) { Branch (2374:6): [True: 2.34M, False: 2.35M]
|
2375 | 2.34M | result = 0; |
2376 | 2.34M | goto done; |
2377 | 2.34M | } |
2378 | | |
2379 | 2.35M | #ifdef USE_IOVEC_IMPL |
2380 | 2.35M | remaining = n; |
2381 | 2.35M | for (i=0; i < nvecs; ++i) { Branch (2381:12): [True: 2.35M, False: 0]
|
2382 | | /* can't overflow, since only mutable chains have |
2383 | | * huge misaligns. */ |
2384 | 2.35M | size_t space = (size_t) CHAIN_SPACE_LEN(*chainp); |
2385 | | /* XXXX This is a kludge that can waste space in perverse |
2386 | | * situations. */ |
2387 | 2.35M | if (space > EVBUFFER_CHAIN_MAX) Branch (2387:7): [True: 0, False: 2.35M]
|
2388 | 0 | space = EVBUFFER_CHAIN_MAX; |
2389 | 2.35M | if ((ev_ssize_t)space < remaining) { Branch (2389:7): [True: 0, False: 2.35M]
|
2390 | 0 | (*chainp)->off += space; |
2391 | 0 | remaining -= (int)space; |
2392 | 2.35M | } else { |
2393 | 2.35M | (*chainp)->off += remaining; |
2394 | 2.35M | buf->last_with_datap = chainp; |
2395 | 2.35M | break; |
2396 | 2.35M | } |
2397 | 0 | chainp = &(*chainp)->next; |
2398 | 0 | } |
2399 | | #else |
2400 | | chain->off += n; |
2401 | | advance_last_with_data(buf); |
2402 | | #endif |
2403 | 2.35M | buf->total_len += n; |
2404 | 2.35M | buf->n_add_for_cb += n; |
2405 | | |
2406 | | /* Tell someone about changes in this buffer */ |
2407 | 2.35M | evbuffer_invoke_callbacks_(buf); |
2408 | 2.35M | result = n; |
2409 | 4.70M | done: |
2410 | 4.70M | EVBUFFER_UNLOCK(buf); |
2411 | 4.70M | return result; |
2412 | 2.35M | } |
2413 | | |
2414 | | #ifdef USE_IOVEC_IMPL |
2415 | | static inline int |
2416 | | evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd, |
2417 | | ev_ssize_t howmuch) |
2418 | 2.35M | { |
2419 | 2.35M | IOV_TYPE iov[NUM_WRITE_IOVEC]; |
2420 | 2.35M | struct evbuffer_chain *chain = buffer->first; |
2421 | 2.35M | int n, i = 0; |
2422 | | |
2423 | 2.35M | if (howmuch < 0) Branch (2423:6): [True: 0, False: 2.35M]
|
2424 | 0 | return -1; |
2425 | | |
2426 | 2.35M | ASSERT_EVBUFFER_LOCKED(buffer); |
2427 | | /* XXX make this top out at some maximal data length? if the |
2428 | | * buffer has (say) 1MB in it, split over 128 chains, there's |
2429 | | * no way it all gets written in one go. */ |
2430 | 7.07M | while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { Branch (2430:9): [True: 4.71M, False: 2.35M]
Branch (2430:26): [True: 4.71M, False: 0]
Branch (2430:49): [True: 4.71M, False: 0]
|
2431 | 4.71M | #ifdef USE_SENDFILE |
2432 | | /* we cannot write the file info via writev */ |
2433 | 4.71M | if (chain->flags & EVBUFFER_SENDFILE) Branch (2433:7): [True: 0, False: 4.71M]
|
2434 | 0 | break; |
2435 | 4.71M | #endif |
2436 | 4.71M | iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); |
2437 | 4.71M | if ((size_t)howmuch >= chain->off) { Branch (2437:7): [True: 4.71M, False: 0]
|
2438 | | /* XXXcould be problematic when windows supports mmap*/ |
2439 | 4.71M | iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off; |
2440 | 4.71M | howmuch -= chain->off; |
2441 | 4.71M | } else { |
2442 | | /* XXXcould be problematic when windows supports mmap*/ |
2443 | 0 | iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; |
2444 | 0 | break; |
2445 | 0 | } |
2446 | 4.71M | chain = chain->next; |
2447 | 4.71M | } |
2448 | 2.35M | if (! i) Branch (2448:6): [True: 0, False: 2.35M]
|
2449 | 0 | return 0; |
2450 | | |
2451 | | #ifdef _WIN32 |
2452 | | { |
2453 | | DWORD bytesSent; |
2454 | | if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL)) |
2455 | | n = -1; |
2456 | | else |
2457 | | n = bytesSent; |
2458 | | } |
2459 | | #else |
2460 | 2.35M | n = writev(fd, iov, i); |
2461 | 2.35M | #endif |
2462 | 2.35M | return (n); |
2463 | 2.35M | } |
2464 | | #endif |
2465 | | |
2466 | | #ifdef USE_SENDFILE |
2467 | | static inline int |
2468 | | evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd, |
2469 | | ev_ssize_t howmuch) |
2470 | 0 | { |
2471 | 0 | struct evbuffer_chain *chain = buffer->first; |
2472 | 0 | struct evbuffer_chain_file_segment *info = |
2473 | 0 | EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, |
2474 | 0 | chain); |
2475 | 0 | const int source_fd = info->segment->fd; |
2476 | | #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD) |
2477 | | int res; |
2478 | | ev_off_t len = chain->off; |
2479 | | #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS) |
2480 | 0 | ev_ssize_t res; |
2481 | 0 | off_t offset = chain->misalign; |
2482 | 0 | #endif |
2483 | |
|
2484 | 0 | ASSERT_EVBUFFER_LOCKED(buffer); |
2485 | | |
2486 | | #if defined(SENDFILE_IS_MACOSX) |
2487 | | res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0); |
2488 | | if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) |
2489 | | return (-1); |
2490 | | |
2491 | | return (len); |
2492 | | #elif defined(SENDFILE_IS_FREEBSD) |
2493 | | res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0); |
2494 | | if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) |
2495 | | return (-1); |
2496 | | |
2497 | | return (len); |
2498 | | #elif defined(SENDFILE_IS_LINUX) |
2499 | | /* TODO(niels): implement splice */ |
2500 | 0 | res = sendfile(dest_fd, source_fd, &offset, chain->off); |
2501 | 0 | if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { Branch (2501:6): [True: 0, False: 0]
|
2502 | | /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ |
2503 | 0 | return (0); |
2504 | 0 | } |
2505 | 0 | return (res); |
2506 | | #elif defined(SENDFILE_IS_SOLARIS) |
2507 | | { |
2508 | | const off_t offset_orig = offset; |
2509 | | res = sendfile(dest_fd, source_fd, &offset, chain->off); |
2510 | | if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { |
2511 | | if (offset - offset_orig) |
2512 | | return offset - offset_orig; |
2513 | | /* if this is EAGAIN or EINTR and no bytes were |
2514 | | * written, return 0 */ |
2515 | | return (0); |
2516 | | } |
2517 | | return (res); |
2518 | | } |
2519 | | #endif |
2520 | 0 | } |
2521 | | #endif |
2522 | | |
2523 | | int |
2524 | | evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, |
2525 | | ev_ssize_t howmuch) |
2526 | 2.35M | { |
2527 | 2.35M | int n = -1; |
2528 | | |
2529 | 2.35M | EVBUFFER_LOCK(buffer); |
2530 | | |
2531 | 2.35M | if (buffer->freeze_start) { Branch (2531:6): [True: 0, False: 2.35M]
|
2532 | 0 | goto done; |
2533 | 0 | } |
2534 | | |
2535 | 2.35M | if (howmuch < 0 || (size_t)howmuch > buffer->total_len) Branch (2535:6): [True: 0, False: 2.35M]
Branch (2535:21): [True: 2.35M, False: 0]
|
2536 | 2.35M | howmuch = buffer->total_len; |
2537 | | |
2538 | 2.35M | if (howmuch > 0) { Branch (2538:6): [True: 2.35M, False: 0]
|
2539 | 2.35M | #ifdef USE_SENDFILE |
2540 | 2.35M | struct evbuffer_chain *chain = buffer->first; |
2541 | 2.35M | if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE)) Branch (2541:7): [True: 2.35M, False: 0]
Branch (2541:24): [True: 0, False: 2.35M]
|
2542 | 0 | n = evbuffer_write_sendfile(buffer, fd, howmuch); |
2543 | 2.35M | else { |
2544 | 2.35M | #endif |
2545 | 2.35M | #ifdef USE_IOVEC_IMPL |
2546 | 2.35M | n = evbuffer_write_iovec(buffer, fd, howmuch); |
2547 | | #elif defined(_WIN32) |
2548 | | /* XXX(nickm) Don't disable this code until we know if |
2549 | | * the WSARecv code above works. */ |
2550 | | void *p = evbuffer_pullup(buffer, howmuch); |
2551 | | EVUTIL_ASSERT(p || !howmuch); |
2552 | | n = send(fd, p, howmuch, 0); |
2553 | | #else |
2554 | | void *p = evbuffer_pullup(buffer, howmuch); |
2555 | | EVUTIL_ASSERT(p || !howmuch); |
2556 | | n = write(fd, p, howmuch); |
2557 | | #endif |
2558 | 2.35M | #ifdef USE_SENDFILE |
2559 | 2.35M | } |
2560 | 2.35M | #endif |
2561 | 2.35M | } |
2562 | | |
2563 | 2.35M | if (n > 0) Branch (2563:6): [True: 2.35M, False: 0]
|
2564 | 2.35M | evbuffer_drain(buffer, n); |
2565 | | |
2566 | 2.35M | done: |
2567 | 2.35M | EVBUFFER_UNLOCK(buffer); |
2568 | 2.35M | return (n); |
2569 | 2.35M | } |
2570 | | |
2571 | | int |
2572 | | evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) |
2573 | 0 | { |
2574 | 0 | return evbuffer_write_atmost(buffer, fd, -1); |
2575 | 0 | } |
2576 | | |
2577 | | unsigned char * |
2578 | | evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len) |
2579 | 0 | { |
2580 | 0 | unsigned char *search; |
2581 | 0 | struct evbuffer_ptr ptr; |
2582 | |
|
2583 | 0 | EVBUFFER_LOCK(buffer); |
2584 | |
|
2585 | 0 | ptr = evbuffer_search(buffer, (const char *)what, len, NULL); |
2586 | 0 | if (ptr.pos < 0) { Branch (2586:6): [True: 0, False: 0]
|
2587 | 0 | search = NULL; |
2588 | 0 | } else { |
2589 | 0 | search = evbuffer_pullup(buffer, ptr.pos + len); |
2590 | 0 | if (search) Branch (2590:7): [True: 0, False: 0]
|
2591 | 0 | search += ptr.pos; |
2592 | 0 | } |
2593 | 0 | EVBUFFER_UNLOCK(buffer); |
2594 | 0 | return search; |
2595 | 0 | } |
2596 | | |
2597 | | /* Subract <b>howfar</b> from the position of <b>pos</b> within |
2598 | | * <b>buf</b>. Returns 0 on success, -1 on failure. |
2599 | | * |
2600 | | * This isn't exposed yet, because of potential inefficiency issues. |
2601 | | * Maybe it should be. */ |
2602 | | static int |
2603 | | evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, |
2604 | | size_t howfar) |
2605 | 14.1M | { |
2606 | 14.1M | if (pos->pos < 0) Branch (2606:6): [True: 0, False: 14.1M]
|
2607 | 0 | return -1; |
2608 | 14.1M | if (howfar > (size_t)pos->pos) Branch (2608:6): [True: 0, False: 14.1M]
|
2609 | 0 | return -1; |
2610 | 14.1M | if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) { Branch (2610:6): [True: 14.1M, False: 0]
Branch (2610:30): [True: 14.1M, False: 0]
|
2611 | 14.1M | pos->internal_.pos_in_chain -= howfar; |
2612 | 14.1M | pos->pos -= howfar; |
2613 | 14.1M | return 0; |
2614 | 14.1M | } else { |
2615 | 0 | const size_t newpos = pos->pos - howfar; |
2616 | | /* Here's the inefficient part: it walks over the |
2617 | | * chains until we hit newpos. */ |
2618 | 0 | return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET); |
2619 | 0 | } |
2620 | 14.1M | } |
2621 | | |
2622 | | int |
2623 | | evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos, |
2624 | | size_t position, enum evbuffer_ptr_how how) |
2625 | 0 | { |
2626 | 0 | size_t left = position; |
2627 | 0 | struct evbuffer_chain *chain = NULL; |
2628 | 0 | int result = 0; |
2629 | |
|
2630 | 0 | EVBUFFER_LOCK(buf); |
2631 | |
|
2632 | 0 | switch (how) { Branch (2632:10): [True: 0, False: 0]
|
2633 | 0 | case EVBUFFER_PTR_SET: Branch (2633:2): [True: 0, False: 0]
|
2634 | 0 | chain = buf->first; |
2635 | 0 | pos->pos = position; |
2636 | 0 | position = 0; |
2637 | 0 | break; |
2638 | 0 | case EVBUFFER_PTR_ADD: Branch (2638:2): [True: 0, False: 0]
|
2639 | | /* this avoids iterating over all previous chains if |
2640 | | we just want to advance the position */ |
2641 | 0 | if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) { Branch (2641:7): [True: 0, False: 0]
Branch (2641:23): [True: 0, False: 0]
|
2642 | 0 | EVBUFFER_UNLOCK(buf); |
2643 | 0 | return -1; |
2644 | 0 | } |
2645 | 0 | chain = pos->internal_.chain; |
2646 | 0 | pos->pos += position; |
2647 | 0 | position = pos->internal_.pos_in_chain; |
2648 | 0 | break; |
2649 | 0 | } |
2650 | | |
2651 | 0 | EVUTIL_ASSERT(EV_SIZE_MAX - left >= position); |
2652 | 0 | while (chain && position + left >= chain->off) { Branch (2652:9): [True: 0, False: 0]
Branch (2652:18): [True: 0, False: 0]
|
2653 | 0 | left -= chain->off - position; |
2654 | 0 | chain = chain->next; |
2655 | 0 | position = 0; |
2656 | 0 | } |
2657 | 0 | if (chain) { Branch (2657:6): [True: 0, False: 0]
|
2658 | 0 | pos->internal_.chain = chain; |
2659 | 0 | pos->internal_.pos_in_chain = position + left; |
2660 | 0 | } else if (left == 0) { Branch (2660:13): [True: 0, False: 0]
|
2661 | | /* The first byte in the (nonexistent) chain after the last chain */ |
2662 | 0 | pos->internal_.chain = NULL; |
2663 | 0 | pos->internal_.pos_in_chain = 0; |
2664 | 0 | } else { |
2665 | 0 | PTR_NOT_FOUND(pos); |
2666 | 0 | result = -1; |
2667 | 0 | } |
2668 | |
|
2669 | 0 | EVBUFFER_UNLOCK(buf); |
2670 | |
|
2671 | 0 | return result; |
2672 | 0 | } |
2673 | | |
2674 | | /** |
2675 | | Compare the bytes in buf at position pos to the len bytes in mem. Return |
2676 | | less than 0, 0, or greater than 0 as memcmp. |
2677 | | */ |
2678 | | static int |
2679 | | evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos, |
2680 | | const char *mem, size_t len) |
2681 | 0 | { |
2682 | 0 | struct evbuffer_chain *chain; |
2683 | 0 | size_t position; |
2684 | 0 | int r; |
2685 | |
|
2686 | 0 | ASSERT_EVBUFFER_LOCKED(buf); |
2687 | | |
2688 | 0 | if (pos->pos < 0 || Branch (2688:6): [True: 0, False: 0]
|
2689 | 0 | EV_SIZE_MAX - len < (size_t)pos->pos || Branch (2689:6): [True: 0, False: 0]
|
2690 | 0 | pos->pos + len > buf->total_len) Branch (2690:6): [True: 0, False: 0]
|
2691 | 0 | return -1; |
2692 | | |
2693 | 0 | chain = pos->internal_.chain; |
2694 | 0 | position = pos->internal_.pos_in_chain; |
2695 | 0 | while (len && chain) { Branch (2695:9): [True: 0, False: 0]
Branch (2695:16): [True: 0, False: 0]
|
2696 | 0 | size_t n_comparable; |
2697 | 0 | if (len + position > chain->off) Branch (2697:7): [True: 0, False: 0]
|
2698 | 0 | n_comparable = chain->off - position; |
2699 | 0 | else |
2700 | 0 | n_comparable = len; |
2701 | 0 | r = memcmp(chain->buffer + chain->misalign + position, mem, |
2702 | 0 | n_comparable); |
2703 | 0 | if (r) Branch (2703:7): [True: 0, False: 0]
|
2704 | 0 | return r; |
2705 | 0 | mem += n_comparable; |
2706 | 0 | len -= n_comparable; |
2707 | 0 | position = 0; |
2708 | 0 | chain = chain->next; |
2709 | 0 | } |
2710 | | |
2711 | 0 | return 0; |
2712 | 0 | } |
2713 | | |
2714 | | struct evbuffer_ptr |
2715 | | evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start) |
2716 | 0 | { |
2717 | 0 | return evbuffer_search_range(buffer, what, len, start, NULL); |
2718 | 0 | } |
2719 | | |
2720 | | struct evbuffer_ptr |
2721 | | evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) |
2722 | 0 | { |
2723 | 0 | struct evbuffer_ptr pos; |
2724 | 0 | struct evbuffer_chain *chain, *last_chain = NULL; |
2725 | 0 | const unsigned char *p; |
2726 | 0 | char first; |
2727 | |
|
2728 | 0 | EVBUFFER_LOCK(buffer); |
2729 | |
|
2730 | 0 | if (start) { Branch (2730:6): [True: 0, False: 0]
|
2731 | 0 | memcpy(&pos, start, sizeof(pos)); |
2732 | 0 | chain = pos.internal_.chain; |
2733 | 0 | } else { |
2734 | 0 | pos.pos = 0; |
2735 | 0 | chain = pos.internal_.chain = buffer->first; |
2736 | 0 | pos.internal_.pos_in_chain = 0; |
2737 | 0 | } |
2738 | |
|
2739 | 0 | if (end) Branch (2739:6): [True: 0, False: 0]
|
2740 | 0 | last_chain = end->internal_.chain; |
2741 | |
|
2742 | 0 | if (!len || len > EV_SSIZE_MAX) Branch (2742:6): [True: 0, False: 0]
Branch (2742:14): [True: 0, False: 0]
|
2743 | 0 | goto done; |
2744 | | |
2745 | 0 | first = what[0]; |
2746 | |
|
2747 | 0 | while (chain) { Branch (2747:9): [True: 0, False: 0]
|
2748 | 0 | const unsigned char *start_at = |
2749 | 0 | chain->buffer + chain->misalign + |
2750 | 0 | pos.internal_.pos_in_chain; |
2751 | 0 | p = memchr(start_at, first, |
2752 | 0 | chain->off - pos.internal_.pos_in_chain); |
2753 | 0 | if (p) { Branch (2753:7): [True: 0, False: 0]
|
2754 | 0 | pos.pos += p - start_at; |
2755 | 0 | pos.internal_.pos_in_chain += p - start_at; |
2756 | 0 | if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { Branch (2756:8): [True: 0, False: 0]
|
2757 | 0 | if (end && pos.pos + (ev_ssize_t)len > end->pos) Branch (2757:9): [True: 0, False: 0]
Branch (2757:16): [True: 0, False: 0]
|
2758 | 0 | goto not_found; |
2759 | 0 | else |
2760 | 0 | goto done; |
2761 | 0 | } |
2762 | 0 | ++pos.pos; |
2763 | 0 | ++pos.internal_.pos_in_chain; |
2764 | 0 | if (pos.internal_.pos_in_chain == chain->off) { Branch (2764:8): [True: 0, False: 0]
|
2765 | 0 | chain = pos.internal_.chain = chain->next; |
2766 | 0 | pos.internal_.pos_in_chain = 0; |
2767 | 0 | } |
2768 | 0 | } else { |
2769 | 0 | if (chain == last_chain) Branch (2769:8): [True: 0, False: 0]
|
2770 | 0 | goto not_found; |
2771 | 0 | pos.pos += chain->off - pos.internal_.pos_in_chain; |
2772 | 0 | chain = pos.internal_.chain = chain->next; |
2773 | 0 | pos.internal_.pos_in_chain = 0; |
2774 | 0 | } |
2775 | 0 | } |
2776 | | |
2777 | 0 | not_found: |
2778 | 0 | PTR_NOT_FOUND(&pos); |
2779 | 0 | done: |
2780 | 0 | EVBUFFER_UNLOCK(buffer); |
2781 | 0 | return pos; |
2782 | 0 | } |
2783 | | |
2784 | | int |
2785 | | evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, |
2786 | | struct evbuffer_ptr *start_at, |
2787 | | struct evbuffer_iovec *vec, int n_vec) |
2788 | 0 | { |
2789 | 0 | struct evbuffer_chain *chain; |
2790 | 0 | int idx = 0; |
2791 | 0 | ev_ssize_t len_so_far = 0; |
2792 | | |
2793 | | /* Avoid locking in trivial edge cases */ |
2794 | 0 | if (start_at && start_at->internal_.chain == NULL) Branch (2794:6): [True: 0, False: 0]
Branch (2794:18): [True: 0, False: 0]
|
2795 | 0 | return 0; |
2796 | | |
2797 | 0 | EVBUFFER_LOCK(buffer); |
2798 | |
|
2799 | 0 | if (start_at) { Branch (2799:6): [True: 0, False: 0]
|
2800 | 0 | chain = start_at->internal_.chain; |
2801 | 0 | len_so_far = chain->off |
2802 | 0 | - start_at->internal_.pos_in_chain; |
2803 | 0 | idx = 1; |
2804 | 0 | if (n_vec > 0) { Branch (2804:7): [True: 0, False: 0]
|
2805 | 0 | vec[0].iov_base = (void *)(chain->buffer + chain->misalign |
2806 | 0 | + start_at->internal_.pos_in_chain); |
2807 | 0 | vec[0].iov_len = len_so_far; |
2808 | 0 | } |
2809 | 0 | chain = chain->next; |
2810 | 0 | } else { |
2811 | 0 | chain = buffer->first; |
2812 | 0 | } |
2813 | |
|
2814 | 0 | if (n_vec == 0 && len < 0) { Branch (2814:6): [True: 0, False: 0]
Branch (2814:20): [True: 0, False: 0]
|
2815 | | /* If no vectors are provided and they asked for "everything", |
2816 | | * pretend they asked for the actual available amount. */ |
2817 | 0 | len = buffer->total_len; |
2818 | 0 | if (start_at) { Branch (2818:7): [True: 0, False: 0]
|
2819 | 0 | len -= start_at->pos; |
2820 | 0 | } |
2821 | 0 | } |
2822 | |
|
2823 | 0 | while (chain) { Branch (2823:9): [True: 0, False: 0]
|
2824 | 0 | if (len >= 0 && len_so_far >= len) Branch (2824:7): [True: 0, False: 0]
Branch (2824:19): [True: 0, False: 0]
|
2825 | 0 | break; |
2826 | 0 | if (idx<n_vec) { Branch (2826:7): [True: 0, False: 0]
|
2827 | 0 | vec[idx].iov_base = (void *)(chain->buffer + chain->misalign); |
2828 | 0 | vec[idx].iov_len = chain->off; |
2829 | 0 | } else if (len<0) { Branch (2829:14): [True: 0, False: 0]
|
2830 | 0 | break; |
2831 | 0 | } |
2832 | 0 | ++idx; |
2833 | 0 | len_so_far += chain->off; |
2834 | 0 | chain = chain->next; |
2835 | 0 | } |
2836 | |
|
2837 | 0 | EVBUFFER_UNLOCK(buffer); |
2838 | |
|
2839 | 0 | return idx; |
2840 | 0 | } |
2841 | | |
2842 | | |
2843 | | int |
2844 | | evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) |
2845 | 9.44M | { |
2846 | 9.44M | char *buffer; |
2847 | 9.44M | size_t space; |
2848 | 9.44M | int sz, result = -1; |
2849 | 9.44M | va_list aq; |
2850 | 9.44M | struct evbuffer_chain *chain; |
2851 | | |
2852 | | |
2853 | 9.44M | EVBUFFER_LOCK(buf); |
2854 | | |
2855 | 9.44M | if (buf->freeze_end) { Branch (2855:6): [True: 0, False: 9.44M]
|
2856 | 0 | goto done; |
2857 | 0 | } |
2858 | | |
2859 | | /* make sure that at least some space is available */ |
2860 | 9.44M | if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL) Branch (2860:6): [True: 0, False: 9.44M]
|
2861 | 0 | goto done; |
2862 | | |
2863 | 9.44M | for (;;) { |
2864 | | #if 0 |
2865 | | size_t used = chain->misalign + chain->off; |
2866 | | buffer = (char *)chain->buffer + chain->misalign + chain->off; |
2867 | | EVUTIL_ASSERT(chain->buffer_len >= used); |
2868 | | space = chain->buffer_len - used; |
2869 | | #endif |
2870 | 9.44M | buffer = (char*) CHAIN_SPACE_PTR(chain); |
2871 | 9.44M | space = (size_t) CHAIN_SPACE_LEN(chain); |
2872 | | |
2873 | | #ifndef va_copy |
2874 | | #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list)) |
2875 | | #endif |
2876 | 9.44M | va_copy(aq, ap); |
2877 | | |
2878 | 9.44M | sz = evutil_vsnprintf(buffer, space, fmt, aq); |
2879 | | |
2880 | 9.44M | va_end(aq); |
2881 | | |
2882 | 9.44M | if (sz < 0) Branch (2882:7): [True: 0, False: 9.44M]
|
2883 | 0 | goto done; |
2884 | 9.44M | if (INT_MAX >= EVBUFFER_CHAIN_MAX && Branch (2884:7): [Folded - Ignored]
|
2885 | 9.44M | (size_t)sz >= EVBUFFER_CHAIN_MAX) Branch (2885:7): [True: 0, False: 0]
|
2886 | 0 | goto done; |
2887 | 9.44M | if ((size_t)sz < space) { Branch (2887:7): [True: 9.44M, False: 0]
|
2888 | 9.44M | chain->off += sz; |
2889 | 9.44M | buf->total_len += sz; |
2890 | 9.44M | buf->n_add_for_cb += sz; |
2891 | | |
2892 | 9.44M | advance_last_with_data(buf); |
2893 | 9.44M | evbuffer_invoke_callbacks_(buf); |
2894 | 9.44M | result = sz; |
2895 | 9.44M | goto done; |
2896 | 9.44M | } |
2897 | 0 | if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL) Branch (2897:7): [True: 0, False: 0]
|
2898 | 0 | goto done; |
2899 | 0 | } |
2900 | | /* NOTREACHED */ |
2901 | | |
2902 | 9.44M | done: |
2903 | 9.44M | EVBUFFER_UNLOCK(buf); |
2904 | 9.44M | return result; |
2905 | 9.44M | } |
2906 | | |
2907 | | int |
2908 | | evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) |
2909 | 9.44M | { |
2910 | 9.44M | int res = -1; |
2911 | 9.44M | va_list ap; |
2912 | | |
2913 | 9.44M | va_start(ap, fmt); |
2914 | 9.44M | res = evbuffer_add_vprintf(buf, fmt, ap); |
2915 | 9.44M | va_end(ap); |
2916 | | |
2917 | 9.44M | return (res); |
2918 | 9.44M | } |
2919 | | |
2920 | | int |
2921 | | evbuffer_add_reference(struct evbuffer *outbuf, |
2922 | | const void *data, size_t datlen, |
2923 | | evbuffer_ref_cleanup_cb cleanupfn, void *extra) |
2924 | 0 | { |
2925 | 0 | struct evbuffer_chain *chain; |
2926 | 0 | struct evbuffer_chain_reference *info; |
2927 | 0 | int result = -1; |
2928 | |
|
2929 | 0 | chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference)); |
2930 | 0 | if (!chain) Branch (2930:6): [True: 0, False: 0]
|
2931 | 0 | return (-1); |
2932 | 0 | chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE; |
2933 | 0 | chain->buffer = (unsigned char *)data; |
2934 | 0 | chain->buffer_len = datlen; |
2935 | 0 | chain->off = datlen; |
2936 | |
|
2937 | 0 | info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain); |
2938 | 0 | info->cleanupfn = cleanupfn; |
2939 | 0 | info->extra = extra; |
2940 | |
|
2941 | 0 | EVBUFFER_LOCK(outbuf); |
2942 | 0 | if (outbuf->freeze_end) { Branch (2942:6): [True: 0, False: 0]
|
2943 | | /* don't call chain_free; we do not want to actually invoke |
2944 | | * the cleanup function */ |
2945 | 0 | mm_free(chain); |
2946 | 0 | goto done; |
2947 | 0 | } |
2948 | 0 | evbuffer_chain_insert(outbuf, chain); |
2949 | 0 | outbuf->n_add_for_cb += datlen; |
2950 | |
|
2951 | 0 | evbuffer_invoke_callbacks_(outbuf); |
2952 | |
|
2953 | 0 | result = 0; |
2954 | 0 | done: |
2955 | 0 | EVBUFFER_UNLOCK(outbuf); |
2956 | |
|
2957 | 0 | return result; |
2958 | 0 | } |
2959 | | |
2960 | | /* TODO(niels): we may want to add to automagically convert to mmap, in |
2961 | | * case evbuffer_remove() or evbuffer_pullup() are being used. |
2962 | | */ |
2963 | | struct evbuffer_file_segment * |
2964 | | evbuffer_file_segment_new( |
2965 | | int fd, ev_off_t offset, ev_off_t length, unsigned flags) |
2966 | 0 | { |
2967 | 0 | struct evbuffer_file_segment *seg = |
2968 | 0 | mm_calloc(sizeof(struct evbuffer_file_segment), 1); |
2969 | 0 | if (!seg) Branch (2969:6): [True: 0, False: 0]
|
2970 | 0 | return NULL; |
2971 | 0 | seg->refcnt = 1; |
2972 | 0 | seg->fd = fd; |
2973 | 0 | seg->flags = flags; |
2974 | 0 | seg->file_offset = offset; |
2975 | 0 | seg->cleanup_cb = NULL; |
2976 | 0 | seg->cleanup_cb_arg = NULL; |
2977 | | #ifdef _WIN32 |
2978 | | #ifndef lseek |
2979 | | #define lseek _lseeki64 |
2980 | | #endif |
2981 | | #ifndef fstat |
2982 | | #define fstat _fstat |
2983 | | #endif |
2984 | | #ifndef stat |
2985 | | #define stat _stat |
2986 | | #endif |
2987 | | #endif |
2988 | 0 | if (length == -1) { Branch (2988:6): [True: 0, False: 0]
|
2989 | 0 | struct stat st; |
2990 | 0 | if (fstat(fd, &st) < 0) Branch (2990:7): [True: 0, False: 0]
|
2991 | 0 | goto err; |
2992 | 0 | length = st.st_size; |
2993 | 0 | } |
2994 | 0 | seg->length = length; |
2995 | |
|
2996 | 0 | if (offset < 0 || length < 0 || Branch (2996:6): [True: 0, False: 0]
Branch (2996:20): [True: 0, False: 0]
|
2997 | 0 | ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) || Branch (2997:6): [True: 0, False: 0]
|
2998 | 0 | (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length)) Branch (2998:6): [True: 0, False: 0]
|
2999 | 0 | goto err; |
3000 | | |
3001 | 0 | #if defined(USE_SENDFILE) |
3002 | 0 | if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) { Branch (3002:6): [True: 0, False: 0]
|
3003 | 0 | seg->can_sendfile = 1; |
3004 | 0 | goto done; |
3005 | 0 | } |
3006 | 0 | #endif |
3007 | | |
3008 | 0 | if (evbuffer_file_segment_materialize(seg)<0) Branch (3008:6): [True: 0, False: 0]
|
3009 | 0 | goto err; |
3010 | | |
3011 | 0 | #if defined(USE_SENDFILE) |
3012 | 0 | done: |
3013 | 0 | #endif |
3014 | 0 | if (!(flags & EVBUF_FS_DISABLE_LOCKING)) { Branch (3014:6): [True: 0, False: 0]
|
3015 | 0 | EVTHREAD_ALLOC_LOCK(seg->lock, 0); |
3016 | 0 | } |
3017 | 0 | return seg; |
3018 | 0 | err: |
3019 | 0 | mm_free(seg); |
3020 | 0 | return NULL; |
3021 | 0 | } |
3022 | | |
3023 | | #ifdef EVENT__HAVE_MMAP |
3024 | | static long |
3025 | | get_page_size(void) |
3026 | 0 | { |
3027 | | #ifdef SC_PAGE_SIZE |
3028 | | return sysconf(SC_PAGE_SIZE); |
3029 | | #elif defined(_SC_PAGE_SIZE) |
3030 | 0 | return sysconf(_SC_PAGE_SIZE); |
3031 | | #else |
3032 | | return 1; |
3033 | | #endif |
3034 | 0 | } |
3035 | | #endif |
3036 | | |
3037 | | /* DOCDOC */ |
3038 | | /* Requires lock */ |
3039 | | static int |
3040 | | evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg) |
3041 | 0 | { |
3042 | 0 | const unsigned flags = seg->flags; |
3043 | 0 | const int fd = seg->fd; |
3044 | 0 | const ev_off_t length = seg->length; |
3045 | 0 | const ev_off_t offset = seg->file_offset; |
3046 | |
|
3047 | 0 | if (seg->contents) Branch (3047:6): [True: 0, False: 0]
|
3048 | 0 | return 0; /* already materialized */ |
3049 | | |
3050 | 0 | #if defined(EVENT__HAVE_MMAP) |
3051 | 0 | if (!(flags & EVBUF_FS_DISABLE_MMAP)) { Branch (3051:6): [True: 0, False: 0]
|
3052 | 0 | off_t offset_rounded = 0, offset_leftover = 0; |
3053 | 0 | void *mapped; |
3054 | 0 | if (offset) { Branch (3054:7): [True: 0, False: 0]
|
3055 | | /* mmap implementations don't generally like us |
3056 | | * to have an offset that isn't a round */ |
3057 | 0 | long page_size = get_page_size(); |
3058 | 0 | if (page_size == -1) Branch (3058:8): [True: 0, False: 0]
|
3059 | 0 | goto err; |
3060 | 0 | offset_leftover = offset % page_size; |
3061 | 0 | offset_rounded = offset - offset_leftover; |
3062 | 0 | } |
3063 | 0 | mapped = mmap(NULL, length + offset_leftover, |
3064 | 0 | PROT_READ, |
3065 | | #ifdef MAP_NOCACHE |
3066 | | MAP_NOCACHE | /* ??? */ |
3067 | | #endif |
3068 | 0 | #ifdef MAP_FILE |
3069 | 0 | MAP_FILE | |
3070 | 0 | #endif |
3071 | 0 | MAP_PRIVATE, |
3072 | 0 | fd, offset_rounded); |
3073 | 0 | if (mapped == MAP_FAILED) { Branch (3073:7): [True: 0, False: 0]
|
3074 | 0 | event_warn("%s: mmap(%d, %d, %zu) failed", |
3075 | 0 | __func__, fd, 0, (size_t)(offset + length)); |
3076 | 0 | } else { |
3077 | 0 | seg->mapping = mapped; |
3078 | 0 | seg->contents = (char*)mapped+offset_leftover; |
3079 | 0 | seg->mmap_offset = 0; |
3080 | 0 | seg->is_mapping = 1; |
3081 | 0 | goto done; |
3082 | 0 | } |
3083 | 0 | } |
3084 | 0 | #endif |
3085 | | #ifdef _WIN32 |
3086 | | if (!(flags & EVBUF_FS_DISABLE_MMAP)) { |
3087 | | intptr_t h = _get_osfhandle(fd); |
3088 | | HANDLE m; |
3089 | | ev_uint64_t total_size = length+offset; |
3090 | | if ((HANDLE)h == INVALID_HANDLE_VALUE) |
3091 | | goto err; |
3092 | | m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY, |
3093 | | (total_size >> 32), total_size & 0xfffffffful, |
3094 | | NULL); |
3095 | | if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */ |
3096 | | seg->mapping_handle = m; |
3097 | | seg->mmap_offset = offset; |
3098 | | seg->is_mapping = 1; |
3099 | | goto done; |
3100 | | } |
3101 | | } |
3102 | | #endif |
3103 | 0 | { |
3104 | 0 | ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos; |
3105 | 0 | ev_off_t read_so_far = 0; |
3106 | 0 | char *mem; |
3107 | 0 | int e; |
3108 | 0 | ev_ssize_t n = 0; |
3109 | 0 | if (!(mem = mm_malloc(length))) Branch (3109:7): [True: 0, False: 0]
|
3110 | 0 | goto err; |
3111 | 0 | if (start_pos < 0) { Branch (3111:7): [True: 0, False: 0]
|
3112 | 0 | mm_free(mem); |
3113 | 0 | goto err; |
3114 | 0 | } |
3115 | 0 | if (lseek(fd, offset, SEEK_SET) < 0) { Branch (3115:7): [True: 0, False: 0]
|
3116 | 0 | mm_free(mem); |
3117 | 0 | goto err; |
3118 | 0 | } |
3119 | 0 | while (read_so_far < length) { Branch (3119:10): [True: 0, False: 0]
|
3120 | 0 | n = read(fd, mem+read_so_far, length-read_so_far); |
3121 | 0 | if (n <= 0) Branch (3121:8): [True: 0, False: 0]
|
3122 | 0 | break; |
3123 | 0 | read_so_far += n; |
3124 | 0 | } |
3125 | |
|
3126 | 0 | e = errno; |
3127 | 0 | pos = lseek(fd, start_pos, SEEK_SET); |
3128 | 0 | if (n < 0 || (n == 0 && length > read_so_far)) { Branch (3128:7): [True: 0, False: 0]
Branch (3128:17): [True: 0, False: 0]
Branch (3128:27): [True: 0, False: 0]
|
3129 | 0 | mm_free(mem); |
3130 | 0 | errno = e; |
3131 | 0 | goto err; |
3132 | 0 | } else if (pos < 0) { Branch (3132:14): [True: 0, False: 0]
|
3133 | 0 | mm_free(mem); |
3134 | 0 | goto err; |
3135 | 0 | } |
3136 | | |
3137 | 0 | seg->contents = mem; |
3138 | 0 | } |
3139 | | |
3140 | 0 | done: |
3141 | 0 | return 0; |
3142 | 0 | err: |
3143 | 0 | return -1; |
3144 | 0 | } |
3145 | | |
3146 | | void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg, |
3147 | | evbuffer_file_segment_cleanup_cb cb, void* arg) |
3148 | 0 | { |
3149 | 0 | EVUTIL_ASSERT(seg->refcnt > 0); |
3150 | 0 | seg->cleanup_cb = cb; |
3151 | 0 | seg->cleanup_cb_arg = arg; |
3152 | 0 | } |
3153 | | |
3154 | | void |
3155 | | evbuffer_file_segment_free(struct evbuffer_file_segment *seg) |
3156 | 0 | { |
3157 | 0 | int refcnt; |
3158 | 0 | EVLOCK_LOCK(seg->lock, 0); |
3159 | 0 | refcnt = --seg->refcnt; |
3160 | 0 | EVLOCK_UNLOCK(seg->lock, 0); |
3161 | 0 | if (refcnt > 0) Branch (3161:6): [True: 0, False: 0]
|
3162 | 0 | return; |
3163 | 0 | EVUTIL_ASSERT(refcnt == 0); |
3164 | | |
3165 | 0 | if (seg->is_mapping) { Branch (3165:6): [True: 0, False: 0]
|
3166 | | #ifdef _WIN32 |
3167 | | CloseHandle(seg->mapping_handle); |
3168 | | #elif defined (EVENT__HAVE_MMAP) |
3169 | | off_t offset_leftover; |
3170 | 0 | offset_leftover = seg->file_offset % get_page_size(); |
3171 | 0 | if (munmap(seg->mapping, seg->length + offset_leftover) == -1) Branch (3171:7): [True: 0, False: 0]
|
3172 | 0 | event_warn("%s: munmap failed", __func__); |
3173 | 0 | #endif |
3174 | 0 | } else if (seg->contents) { Branch (3174:13): [True: 0, False: 0]
|
3175 | 0 | mm_free(seg->contents); |
3176 | 0 | } |
3177 | |
|
3178 | 0 | if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) { Branch (3178:6): [True: 0, False: 0]
Branch (3178:47): [True: 0, False: 0]
|
3179 | 0 | close(seg->fd); |
3180 | 0 | } |
3181 | | |
3182 | 0 | if (seg->cleanup_cb) { Branch (3182:6): [True: 0, False: 0]
|
3183 | 0 | (*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg, |
3184 | 0 | seg->flags, seg->cleanup_cb_arg); |
3185 | 0 | seg->cleanup_cb = NULL; |
3186 | 0 | seg->cleanup_cb_arg = NULL; |
3187 | 0 | } |
3188 | |
|
3189 | 0 | EVTHREAD_FREE_LOCK(seg->lock, 0); |
3190 | 0 | mm_free(seg); |
3191 | 0 | } |
3192 | | |
3193 | | int |
3194 | | evbuffer_add_file_segment(struct evbuffer *buf, |
3195 | | struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length) |
3196 | 0 | { |
3197 | 0 | struct evbuffer_chain *chain; |
3198 | 0 | struct evbuffer_chain_file_segment *extra; |
3199 | 0 | int can_use_sendfile = 0; |
3200 | |
|
3201 | 0 | EVBUFFER_LOCK(buf); |
3202 | 0 | EVLOCK_LOCK(seg->lock, 0); |
3203 | 0 | if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) { Branch (3203:6): [True: 0, False: 0]
|
3204 | 0 | can_use_sendfile = 1; |
3205 | 0 | } else { |
3206 | 0 | if (!seg->contents) { Branch (3206:7): [True: 0, False: 0]
|
3207 | 0 | if (evbuffer_file_segment_materialize(seg)<0) { Branch (3207:8): [True: 0, False: 0]
|
3208 | 0 | EVLOCK_UNLOCK(seg->lock, 0); |
3209 | 0 | EVBUFFER_UNLOCK(buf); |
3210 | 0 | return -1; |
3211 | 0 | } |
3212 | 0 | } |
3213 | 0 | } |
3214 | 0 | EVLOCK_UNLOCK(seg->lock, 0); |
3215 | |
|
3216 | 0 | if (buf->freeze_end) Branch (3216:6): [True: 0, False: 0]
|
3217 | 0 | goto err; |
3218 | | |
3219 | 0 | if (length < 0) { Branch (3219:6): [True: 0, False: 0]
|
3220 | 0 | if (offset > seg->length) Branch (3220:7): [True: 0, False: 0]
|
3221 | 0 | goto err; |
3222 | 0 | length = seg->length - offset; |
3223 | 0 | } |
3224 | | |
3225 | | /* Can we actually add this? */ |
3226 | 0 | if (offset+length > seg->length) Branch (3226:6): [True: 0, False: 0]
|
3227 | 0 | goto err; |
3228 | | |
3229 | 0 | chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment)); |
3230 | 0 | if (!chain) Branch (3230:6): [True: 0, False: 0]
|
3231 | 0 | goto err; |
3232 | 0 | extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain); |
3233 | |
|
3234 | 0 | chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT; |
3235 | 0 | if (can_use_sendfile && seg->can_sendfile) { Branch (3235:6): [True: 0, False: 0]
Branch (3235:26): [True: 0, False: 0]
|
3236 | 0 | chain->flags |= EVBUFFER_SENDFILE; |
3237 | 0 | chain->misalign = seg->file_offset + offset; |
3238 | 0 | chain->off = length; |
3239 | 0 | chain->buffer_len = chain->misalign + length; |
3240 | 0 | } else if (seg->is_mapping) { Branch (3240:13): [True: 0, False: 0]
|
3241 | | #ifdef _WIN32 |
3242 | | ev_uint64_t total_offset = seg->mmap_offset+offset; |
3243 | | ev_uint64_t offset_rounded=0, offset_remaining=0; |
3244 | | LPVOID data; |
3245 | | if (total_offset) { |
3246 | | SYSTEM_INFO si; |
3247 | | memset(&si, 0, sizeof(si)); /* cargo cult */ |
3248 | | GetSystemInfo(&si); |
3249 | | offset_remaining = total_offset % si.dwAllocationGranularity; |
3250 | | offset_rounded = total_offset - offset_remaining; |
3251 | | } |
3252 | | data = MapViewOfFile( |
3253 | | seg->mapping_handle, |
3254 | | FILE_MAP_READ, |
3255 | | offset_rounded >> 32, |
3256 | | offset_rounded & 0xfffffffful, |
3257 | | length + offset_remaining); |
3258 | | if (data == NULL) { |
3259 | | mm_free(chain); |
3260 | | goto err; |
3261 | | } |
3262 | | chain->buffer = (unsigned char*) data; |
3263 | | chain->buffer_len = length+offset_remaining; |
3264 | | chain->misalign = offset_remaining; |
3265 | | chain->off = length; |
3266 | | #else |
3267 | 0 | chain->buffer = (unsigned char*)(seg->contents + offset); |
3268 | 0 | chain->buffer_len = length; |
3269 | 0 | chain->off = length; |
3270 | 0 | #endif |
3271 | 0 | } else { |
3272 | 0 | chain->buffer = (unsigned char*)(seg->contents + offset); |
3273 | 0 | chain->buffer_len = length; |
3274 | 0 | chain->off = length; |
3275 | 0 | } |
3276 | |
|
3277 | 0 | EVLOCK_LOCK(seg->lock, 0); |
3278 | 0 | ++seg->refcnt; |
3279 | 0 | EVLOCK_UNLOCK(seg->lock, 0); |
3280 | 0 | extra->segment = seg; |
3281 | 0 | buf->n_add_for_cb += length; |
3282 | 0 | evbuffer_chain_insert(buf, chain); |
3283 | |
|
3284 | 0 | evbuffer_invoke_callbacks_(buf); |
3285 | |
|
3286 | 0 | EVBUFFER_UNLOCK(buf); |
3287 | |
|
3288 | 0 | return 0; |
3289 | 0 | err: |
3290 | 0 | EVBUFFER_UNLOCK(buf); |
3291 | 0 | evbuffer_file_segment_free(seg); /* Lowers the refcount */ |
3292 | 0 | return -1; |
3293 | 0 | } |
3294 | | |
3295 | | int |
3296 | | evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length) |
3297 | 0 | { |
3298 | 0 | struct evbuffer_file_segment *seg; |
3299 | 0 | unsigned flags = EVBUF_FS_CLOSE_ON_FREE; |
3300 | 0 | int r; |
3301 | |
|
3302 | 0 | seg = evbuffer_file_segment_new(fd, offset, length, flags); |
3303 | 0 | if (!seg) Branch (3303:6): [True: 0, False: 0]
|
3304 | 0 | return -1; |
3305 | 0 | r = evbuffer_add_file_segment(buf, seg, 0, length); |
3306 | 0 | if (r == 0) Branch (3306:6): [True: 0, False: 0]
|
3307 | 0 | evbuffer_file_segment_free(seg); |
3308 | 0 | return r; |
3309 | 0 | } |
3310 | | |
3311 | | int |
3312 | | evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg) |
3313 | 0 | { |
3314 | 0 | EVBUFFER_LOCK(buffer); |
3315 | |
|
3316 | 0 | if (!LIST_EMPTY(&buffer->callbacks)) Branch (3316:6): [True: 0, False: 0]
|
3317 | 0 | evbuffer_remove_all_callbacks(buffer); |
3318 | |
|
3319 | 0 | if (cb) { Branch (3319:6): [True: 0, False: 0]
|
3320 | 0 | struct evbuffer_cb_entry *ent = |
3321 | 0 | evbuffer_add_cb(buffer, NULL, cbarg); |
3322 | 0 | if (!ent) { Branch (3322:7): [True: 0, False: 0]
|
3323 | 0 | EVBUFFER_UNLOCK(buffer); |
3324 | 0 | return -1; |
3325 | 0 | } |
3326 | 0 | ent->cb.cb_obsolete = cb; |
3327 | 0 | ent->flags |= EVBUFFER_CB_OBSOLETE; |
3328 | 0 | } |
3329 | 0 | EVBUFFER_UNLOCK(buffer); |
3330 | 0 | return 0; |
3331 | 0 | } |
3332 | | |
3333 | | struct evbuffer_cb_entry * |
3334 | | evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) |
3335 | 2.35M | { |
3336 | 2.35M | struct evbuffer_cb_entry *e; |
3337 | 2.35M | if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry)))) Branch (3337:6): [True: 0, False: 2.35M]
|
3338 | 0 | return NULL; |
3339 | 2.35M | EVBUFFER_LOCK(buffer); |
3340 | 2.35M | e->cb.cb_func = cb; |
3341 | 2.35M | e->cbarg = cbarg; |
3342 | 2.35M | e->flags = EVBUFFER_CB_ENABLED; |
3343 | 2.35M | LIST_INSERT_HEAD(&buffer->callbacks, e, next); |
3344 | 2.35M | EVBUFFER_UNLOCK(buffer); |
3345 | 2.35M | return e; |
3346 | 2.35M | } |
3347 | | |
3348 | | int |
3349 | | evbuffer_remove_cb_entry(struct evbuffer *buffer, |
3350 | | struct evbuffer_cb_entry *ent) |
3351 | 0 | { |
3352 | 0 | EVBUFFER_LOCK(buffer); |
3353 | 0 | LIST_REMOVE(ent, next); |
3354 | 0 | EVBUFFER_UNLOCK(buffer); |
3355 | 0 | mm_free(ent); |
3356 | 0 | return 0; |
3357 | 0 | } |
3358 | | |
3359 | | int |
3360 | | evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) |
3361 | 0 | { |
3362 | 0 | struct evbuffer_cb_entry *cbent; |
3363 | 0 | int result = -1; |
3364 | 0 | EVBUFFER_LOCK(buffer); |
3365 | 0 | LIST_FOREACH(cbent, &buffer->callbacks, next) { |
3366 | 0 | if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { Branch (3366:7): [True: 0, False: 0]
Branch (3366:34): [True: 0, False: 0]
|
3367 | 0 | result = evbuffer_remove_cb_entry(buffer, cbent); |
3368 | 0 | goto done; |
3369 | 0 | } |
3370 | 0 | } |
3371 | 0 | done: |
3372 | 0 | EVBUFFER_UNLOCK(buffer); |
3373 | 0 | return result; |
3374 | 0 | } |
3375 | | |
3376 | | int |
3377 | | evbuffer_cb_set_flags(struct evbuffer *buffer, |
3378 | | struct evbuffer_cb_entry *cb, ev_uint32_t flags) |
3379 | 0 | { |
3380 | | /* the user isn't allowed to mess with these. */ |
3381 | 0 | flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; |
3382 | 0 | EVBUFFER_LOCK(buffer); |
3383 | 0 | cb->flags |= flags; |
3384 | 0 | EVBUFFER_UNLOCK(buffer); |
3385 | 0 | return 0; |
3386 | 0 | } |
3387 | | |
3388 | | int |
3389 | | evbuffer_cb_clear_flags(struct evbuffer *buffer, |
3390 | | struct evbuffer_cb_entry *cb, ev_uint32_t flags) |
3391 | 0 | { |
3392 | | /* the user isn't allowed to mess with these. */ |
3393 | 0 | flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; |
3394 | 0 | EVBUFFER_LOCK(buffer); |
3395 | 0 | cb->flags &= ~flags; |
3396 | 0 | EVBUFFER_UNLOCK(buffer); |
3397 | 0 | return 0; |
3398 | 0 | } |
3399 | | |
3400 | | int |
3401 | | evbuffer_freeze(struct evbuffer *buffer, int start) |
3402 | 11.7M | { |
3403 | 11.7M | EVBUFFER_LOCK(buffer); |
3404 | 11.7M | if (start) Branch (3404:6): [True: 4.71M, False: 7.06M]
|
3405 | 4.71M | buffer->freeze_start = 1; |
3406 | 7.06M | else |
3407 | 7.06M | buffer->freeze_end = 1; |
3408 | 11.7M | EVBUFFER_UNLOCK(buffer); |
3409 | 11.7M | return 0; |
3410 | 11.7M | } |
3411 | | |
3412 | | int |
3413 | | evbuffer_unfreeze(struct evbuffer *buffer, int start) |
3414 | 11.7M | { |
3415 | 11.7M | EVBUFFER_LOCK(buffer); |
3416 | 11.7M | if (start) Branch (3416:6): [True: 4.71M, False: 7.06M]
|
3417 | 4.71M | buffer->freeze_start = 0; |
3418 | 7.06M | else |
3419 | 7.06M | buffer->freeze_end = 0; |
3420 | 11.7M | EVBUFFER_UNLOCK(buffer); |
3421 | 11.7M | return 0; |
3422 | 11.7M | } |
3423 | | |
3424 | | #if 0 |
3425 | | void |
3426 | | evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) |
3427 | | { |
3428 | | if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { |
3429 | | cb->size_before_suspend = evbuffer_get_length(buffer); |
3430 | | cb->flags |= EVBUFFER_CB_SUSPENDED; |
3431 | | } |
3432 | | } |
3433 | | |
3434 | | void |
3435 | | evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) |
3436 | | { |
3437 | | if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { |
3438 | | unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND); |
3439 | | size_t sz = cb->size_before_suspend; |
3440 | | cb->flags &= ~(EVBUFFER_CB_SUSPENDED| |
3441 | | EVBUFFER_CB_CALL_ON_UNSUSPEND); |
3442 | | cb->size_before_suspend = 0; |
3443 | | if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { |
3444 | | cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg); |
3445 | | } |
3446 | | } |
3447 | | } |
3448 | | #endif |
3449 | | |
3450 | | int |
3451 | | evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs, |
3452 | | int max_cbs) |
3453 | 4.71M | { |
3454 | 4.71M | int r = 0; |
3455 | 4.71M | EVBUFFER_LOCK(buffer); |
3456 | 4.71M | if (buffer->deferred_cbs) { Branch (3456:6): [True: 0, False: 4.71M]
|
3457 | 0 | if (max_cbs < 1) { Branch (3457:7): [True: 0, False: 0]
|
3458 | 0 | r = -1; |
3459 | 0 | goto done; |
3460 | 0 | } |
3461 | 0 | cbs[0] = &buffer->deferred; |
3462 | 0 | r = 1; |
3463 | 0 | } |
3464 | 4.71M | done: |
3465 | 4.71M | EVBUFFER_UNLOCK(buffer); |
3466 | 4.71M | return r; |
3467 | 4.71M | } |