name##_SPLAY_MIN_MAX(struct name *head, int val) \
{ \
name##_SPLAY_MINMAX(head, val); \
- return (SPLAY_ROOT(head)); \
+ return (SPLAY_ROOT(head)); \
}
/* Main splay operation.
name##_SPLAY_MIN_MAX(struct name *head, int val) \
{ \
name##_SPLAY_MINMAX(head, val); \
- return (SPLAY_ROOT(head)); \
+ return (SPLAY_ROOT(head)); \
}
/* Main splay operation.
/* Mask of user-selectable callback flags. */
-#define EVBUFFER_CB_USER_FLAGS 0xffff
+#define EVBUFFER_CB_USER_FLAGS 0xffff
/* Mask of all internal-use-only flags. */
#define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000
/* Flag set if the callback is using the cb_obsolete function pointer */
-#define EVBUFFER_CB_OBSOLETE 0x00040000
+#define EVBUFFER_CB_OBSOLETE 0x00040000
/* evbuffer_chain support */
#define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
static inline void
evbuffer_chain_insert(struct evbuffer *buf, struct evbuffer_chain *chain)
{
- ASSERT_EVBUFFER_LOCKED(buf);
+ ASSERT_EVBUFFER_LOCKED(buf);
if (buf->first == NULL) {
buf->first = buf->last = chain;
buf->previous_to_last = NULL;
evbuffer_enable_locking(struct evbuffer *buf, void *lock)
{
#ifdef _EVENT_DISABLE_THREAD_SUPPORT
- return -1;
+ return -1;
#else
- if (buf->lock)
- return -1;
-
- if (!lock) {
- EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
- if (!lock)
- return -1;
- buf->lock = lock;
- buf->own_lock = 1;
- } else {
- buf->lock = lock;
- buf->own_lock = 0;
- }
-
- return 0;
+ if (buf->lock)
+ return -1;
+
+ if (!lock) {
+ EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ if (!lock)
+ return -1;
+ buf->lock = lock;
+ buf->own_lock = 1;
+ } else {
+ buf->lock = lock;
+ buf->own_lock = 0;
+ }
+
+ return 0;
#endif
}
evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
{
struct evbuffer_cb_entry *cbent, *next;
- struct evbuffer_cb_info info;
+ struct evbuffer_cb_info info;
size_t new_size;
ev_uint32_t mask, masked_val;
int clear = 1;
masked_val = EVBUFFER_CB_ENABLED;
}
- ASSERT_EVBUFFER_LOCKED(buffer);
+ ASSERT_EVBUFFER_LOCKED(buffer);
if (TAILQ_EMPTY(&buffer->callbacks)) {
- buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
+ buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
return;
- }
- if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
- return;
-
- new_size = buffer->total_len;
- info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
- info.n_added = buffer->n_add_for_cb;
- info.n_deleted = buffer->n_del_for_cb;
+ }
+ if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
+ return;
+
+ new_size = buffer->total_len;
+ info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
+ info.n_added = buffer->n_add_for_cb;
+ info.n_deleted = buffer->n_del_for_cb;
if (clear) {
buffer->n_add_for_cb = 0;
buffer->n_del_for_cb = 0;
event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred);
EVBUFFER_UNLOCK(buffer);
- if (buffer->own_lock)
- EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ if (buffer->own_lock)
+ EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
mm_free(buffer);
}
void
evbuffer_lock(struct evbuffer *buf)
{
- EVBUFFER_LOCK(buf);
+ EVBUFFER_LOCK(buf);
}
void
evbuffer_unlock(struct evbuffer *buf)
{
- EVBUFFER_UNLOCK(buf);
+ EVBUFFER_UNLOCK(buf);
}
size_t
evbuffer_get_length(const struct evbuffer *buffer)
{
- size_t result;
+ size_t result;
- EVBUFFER_LOCK(buffer);
+ EVBUFFER_LOCK(buffer);
result = (buffer->total_len);
- EVBUFFER_UNLOCK(buffer);
+ EVBUFFER_UNLOCK(buffer);
- return result;
+ return result;
}
size_t
evbuffer_get_contiguous_space(const struct evbuffer *buf)
{
struct evbuffer_chain *chain;
- size_t result;
+ size_t result;
- EVBUFFER_LOCK(buf);
- chain = buf->first;
+ EVBUFFER_LOCK(buf);
+ chain = buf->first;
result = (chain != NULL ? chain->off : 0);
- EVBUFFER_UNLOCK(buf);
+ EVBUFFER_UNLOCK(buf);
- return result;
+ return result;
}
int
}
#define ZERO_CHAIN(dst) do { \
- ASSERT_EVBUFFER_LOCKED(dst); \
+ ASSERT_EVBUFFER_LOCKED(dst); \
(dst)->first = NULL; \
(dst)->last = NULL; \
(dst)->previous_to_last = NULL; \
} while (0)
#define COPY_CHAIN(dst, src) do { \
- ASSERT_EVBUFFER_LOCKED(dst); \
- ASSERT_EVBUFFER_LOCKED(src); \
+ ASSERT_EVBUFFER_LOCKED(dst); \
+ ASSERT_EVBUFFER_LOCKED(src); \
(dst)->first = (src)->first; \
(dst)->previous_to_last = (src)->previous_to_last; \
(dst)->last = (src)->last; \
} while (0)
#define APPEND_CHAIN(dst, src) do { \
- ASSERT_EVBUFFER_LOCKED(dst); \
- ASSERT_EVBUFFER_LOCKED(src); \
+ ASSERT_EVBUFFER_LOCKED(dst); \
+ ASSERT_EVBUFFER_LOCKED(src); \
(dst)->last->next = (src)->first; \
(dst)->previous_to_last = (src)->previous_to_last ? \
(src)->previous_to_last : (dst)->last; \
(dst)->total_len += (src)->total_len; \
} while (0)
-#define PREPEND_CHAIN(dst, src) do { \
- ASSERT_EVBUFFER_LOCKED(dst); \
- ASSERT_EVBUFFER_LOCKED(src); \
- (src)->last->next = (dst)->first; \
- (dst)->first = (src)->first; \
- (dst)->total_len += (src)->total_len; \
- if ((dst)->previous_to_last == NULL) \
- (dst)->previous_to_last = (src)->last; \
+#define PREPEND_CHAIN(dst, src) do { \
+ ASSERT_EVBUFFER_LOCKED(dst); \
+ ASSERT_EVBUFFER_LOCKED(src); \
+ (src)->last->next = (dst)->first; \
+ (dst)->first = (src)->first; \
+ (dst)->total_len += (src)->total_len; \
+ if ((dst)->previous_to_last == NULL) \
+ (dst)->previous_to_last = (src)->last; \
} while (0)
size_t in_total_len, out_total_len;
int result = 0;
- EVBUFFER_LOCK2(inbuf, outbuf);
- in_total_len = inbuf->total_len;
+ EVBUFFER_LOCK2(inbuf, outbuf);
+ in_total_len = inbuf->total_len;
out_total_len = outbuf->total_len;
if (in_total_len == 0 || outbuf == inbuf)
/* remove everything from inbuf */
ZERO_CHAIN(inbuf);
- inbuf->n_del_for_cb += in_total_len;
- outbuf->n_add_for_cb += in_total_len;
+ inbuf->n_del_for_cb += in_total_len;
+ outbuf->n_add_for_cb += in_total_len;
evbuffer_invoke_callbacks(inbuf);
evbuffer_invoke_callbacks(outbuf);
done:
- EVBUFFER_UNLOCK2(inbuf, outbuf);
+ EVBUFFER_UNLOCK2(inbuf, outbuf);
return result;
}
size_t in_total_len, out_total_len;
int result = 0;
- EVBUFFER_LOCK2(inbuf, outbuf);
+ EVBUFFER_LOCK2(inbuf, outbuf);
- in_total_len = inbuf->total_len;
+ in_total_len = inbuf->total_len;
out_total_len = outbuf->total_len;
if (!in_total_len || inbuf == outbuf)
/* remove everything from inbuf */
ZERO_CHAIN(inbuf);
- inbuf->n_del_for_cb += in_total_len;
- outbuf->n_add_for_cb += in_total_len;
+ inbuf->n_del_for_cb += in_total_len;
+ outbuf->n_add_for_cb += in_total_len;
evbuffer_invoke_callbacks(inbuf);
evbuffer_invoke_callbacks(outbuf);
done:
- EVBUFFER_UNLOCK2(inbuf, outbuf);
+ EVBUFFER_UNLOCK2(inbuf, outbuf);
return result;
}
evbuffer_drain(struct evbuffer *buf, size_t len)
{
struct evbuffer_chain *chain, *next;
- size_t old_len;
+ size_t old_len;
int result = 0;
- EVBUFFER_LOCK(buf);
- old_len = buf->total_len;
+ EVBUFFER_LOCK(buf);
+ old_len = buf->total_len;
if (old_len == 0)
goto done;
if (len >= old_len && !(buf->last && CHAIN_PINNED_R(buf->last))) {
- len = old_len;
+ len = old_len;
for (chain = buf->first; chain != NULL; chain = next) {
next = chain->next;
chain->off -= len;
}
- buf->n_del_for_cb += len;
+ buf->n_del_for_cb += len;
/* Tell someone about changes in this buffer */
evbuffer_invoke_callbacks(buf);
done:
- EVBUFFER_UNLOCK(buf);
+ EVBUFFER_UNLOCK(buf);
return result;
}
int
evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
{
- /*XXX fails badly on sendfile case. */
+ /*XXX fails badly on sendfile case. */
struct evbuffer_chain *chain, *tmp;
char *data = data_out;
size_t nread;
- int result = 0;
+ int result = 0;
- EVBUFFER_LOCK(buf);
+ EVBUFFER_LOCK(buf);
- chain = buf->first;
+ chain = buf->first;
if (datlen >= buf->total_len)
datlen = buf->total_len;
if (datlen == 0)
- goto done;
+ goto done;
if (buf->freeze_start) {
result = -1;
buf->total_len -= nread;
- buf->n_del_for_cb += nread;
+ buf->n_del_for_cb += nread;
if (nread)
evbuffer_invoke_callbacks(buf);
result = nread;
done:
- EVBUFFER_UNLOCK(buf);
- return result;
+ EVBUFFER_UNLOCK(buf);
+ return result;
}
/* reads data from the src buffer to the dst buffer, avoids memcpy as
/*XXX can fail badly on sendfile case. */
struct evbuffer_chain *chain, *previous, *previous_to_previous = NULL;
size_t nread = 0;
- int result;
+ int result;
- EVBUFFER_LOCK2(src, dst);
+ EVBUFFER_LOCK2(src, dst);
- chain = previous = src->first;
+ chain = previous = src->first;
if (datlen == 0 || dst == src) {
result = 0;
- goto done;
- }
+ goto done;
+ }
if (dst->freeze_end || src->freeze_start) {
result = -1;
datlen = src->total_len;
evbuffer_add_buffer(dst, src);
result = datlen;
- goto done;
+ goto done;
}
/* removes chains if possible */
src->previous_to_last = NULL;
dst->total_len += nread;
- dst->n_add_for_cb += nread;
+ dst->n_add_for_cb += nread;
}
/* we know that there is more data in the src buffer than
nread += datlen;
src->total_len -= nread;
- src->n_del_for_cb += nread;
+ src->n_del_for_cb += nread;
if (nread) {
evbuffer_invoke_callbacks(dst);
evbuffer_invoke_callbacks(src);
}
- result = nread;
+ result = nread;
done:
- EVBUFFER_UNLOCK2(src, dst);
+ EVBUFFER_UNLOCK2(src, dst);
return result;
}
unsigned char *buffer, *result = NULL;
ev_ssize_t remaining;
- EVBUFFER_LOCK(buf);
+ EVBUFFER_LOCK(buf);
- chain = buf->first;
+ chain = buf->first;
if (size < 0)
size = buf->total_len;
* is going to have a long enough buffer afterwards; so we return
* NULL */
if (size == 0 || (size_t)size > buf->total_len)
- goto done;
+ goto done;
/* No need to pull up anything; the first size bytes are
* already here. */
- if (chain->off >= (size_t)size) {
- result = chain->buffer + chain->misalign;
- goto done;
- }
+ if (chain->off >= (size_t)size) {
+ result = chain->buffer + chain->misalign;
+ goto done;
+ }
/* Make sure that none of the chains we need to copy from is pinned. */
remaining = size - chain->off;
result = (tmp->buffer + tmp->misalign);
done:
- EVBUFFER_UNLOCK(buf);
- return result;
+ EVBUFFER_UNLOCK(buf);
+ return result;
}
/*
enum evbuffer_eol_style eol_style)
{
struct evbuffer_ptr it, it2;
- size_t extra_drain = 0;
+ size_t extra_drain = 0;
int ok = 0;
- EVBUFFER_LOCK(buffer);
+ EVBUFFER_LOCK(buffer);
if (start) {
memcpy(&it, start, sizeof(it));
struct evbuffer_ptr it;
char *line;
size_t n_to_copy=0, extra_drain=0;
- char *result = NULL;
+ char *result = NULL;
- EVBUFFER_LOCK(buffer);
+ EVBUFFER_LOCK(buffer);
if (buffer->freeze_start) {
goto done;
line[n_to_copy] = '\0';
evbuffer_drain(buffer, extra_drain);
- result = line;
+ result = line;
done:
- EVBUFFER_UNLOCK(buffer);
+ EVBUFFER_UNLOCK(buffer);
if (n_read_out)
*n_read_out = result ? n_to_copy : 0;
- return result;
+ return result;
}
#define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
struct evbuffer_chain *chain, *tmp;
const unsigned char *data = data_in;
size_t remain, to_alloc;
- int result = -1;
+ int result = -1;
- EVBUFFER_LOCK(buf);
+ EVBUFFER_LOCK(buf);
if (buf->freeze_end) {
goto done;
}
- chain = buf->last;
+ chain = buf->last;
/* If there are no chains allocated for this buffer, allocate one
* big enough to hold all the data. */
data, datlen);
chain->off += datlen;
buf->total_len += datlen;
- buf->n_add_for_cb += datlen;
+ buf->n_add_for_cb += datlen;
goto out;
} else if ((size_t)chain->misalign >= datlen && !CHAIN_PINNED(chain)) {
/* we can fit the data into the misalignment */
memcpy(chain->buffer + chain->off, data, datlen);
chain->off += datlen;
buf->total_len += datlen;
- buf->n_add_for_cb += datlen;
+ buf->n_add_for_cb += datlen;
goto out;
}
} else {
data, remain);
chain->off += remain;
buf->total_len += remain;
- buf->n_add_for_cb += remain;
+ buf->n_add_for_cb += remain;
}
data += remain;
out:
evbuffer_invoke_callbacks(buf);
- result = 0;
+ result = 0;
done:
- EVBUFFER_UNLOCK(buf);
+ EVBUFFER_UNLOCK(buf);
return result;
}
evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
{
struct evbuffer_chain *chain, *tmp;
- int result = -1;
+ int result = -1;
- EVBUFFER_LOCK(buf);
+ EVBUFFER_LOCK(buf);
if (buf->freeze_start) {
goto done;
}
- chain = buf->first;
+ chain = buf->first;
if (chain == NULL) {
if (evbuffer_expand(buf, datlen) == -1)
chain->off += datlen;
chain->misalign -= datlen;
buf->total_len += datlen;
- buf->n_add_for_cb += datlen;
+ buf->n_add_for_cb += datlen;
goto out;
} else if (chain->misalign) {
memcpy(chain->buffer,
chain->misalign);
chain->off += chain->misalign;
buf->total_len += chain->misalign;
- buf->n_add_for_cb += chain->misalign;
+ buf->n_add_for_cb += chain->misalign;
datlen -= chain->misalign;
chain->misalign = 0;
}
memcpy(tmp->buffer + tmp->misalign, data, datlen);
buf->total_len += datlen;
- buf->n_add_for_cb += chain->misalign;
+ buf->n_add_for_cb += chain->misalign;
out:
evbuffer_invoke_callbacks(buf);
- result = 0;
+ result = 0;
done:
- EVBUFFER_UNLOCK(buf);
+ EVBUFFER_UNLOCK(buf);
return result;
}
evbuffer_expand(struct evbuffer *buf, size_t datlen)
{
/* XXX we should either make this function less costly, or call it
- * less often. */
+ * less often. */
struct evbuffer_chain *chain, *tmp;
size_t need, length;
- int result = -1;
+ int result = -1;
- EVBUFFER_LOCK(buf);
+ EVBUFFER_LOCK(buf);
- chain = buf->last;
+ chain = buf->last;
if (chain == NULL ||
(chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
goto err;
evbuffer_chain_insert(buf, chain);
- goto ok;
+ goto ok;
}
need = chain->misalign + chain->off + datlen;
/* If we can fit all the data, then we don't have to do anything */
if (chain->buffer_len >= need)
- goto ok;
+ goto ok;
/* If the misalignment plus the remaining space fulfills our
* data needs, we just force an alignment to happen.
evbuffer_chain_free(chain);
ok:
- result = 0;
+ result = 0;
err:
- EVBUFFER_UNLOCK(buf);
+ EVBUFFER_UNLOCK(buf);
return result;
}
struct evbuffer_chain *chain = buf->last, *tmp;
size_t avail, avail_in_prev = 0;
- ASSERT_EVBUFFER_LOCKED(buf);
+ ASSERT_EVBUFFER_LOCKED(buf);
if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
chain = evbuffer_chain_new(datlen);
{
struct evbuffer_chain *chain;
int n = EVBUFFER_MAX_READ;
- int result;
+ int result;
#ifdef USE_IOVEC_IMPL
int nvecs;
long lng = n;
#endif
- EVBUFFER_LOCK(buf);
+ EVBUFFER_LOCK(buf);
chain = buf->last;
/* Since we can use iovecs, we're willing to use the last
* _two_ chains. */
if (_evbuffer_expand_fast(buf, howmuch) == -1) {
- result = -1;
- goto done;
+ result = -1;
+ goto done;
} else {
IOV_TYPE vecs[2];
#ifdef _EVBUFFER_IOVEC_IS_NATIVE
* over on buf->last. */
if (evbuffer_expand(buf, howmuch) == -1) {
result = -1;
- goto done;
- }
+ goto done;
+ }
chain = buf->last;
if (n == -1) {
result = -1;
- goto done;
- }
+ goto done;
+ }
if (n == 0) {
result = 0;
- goto done;
- }
+ goto done;
+ }
#ifdef USE_IOVEC_IMPL
if (nvecs == 2) {
chain->off += n;
#endif
buf->total_len += n;
- buf->n_add_for_cb += n;
+ buf->n_add_for_cb += n;
/* Tell someone about changes in this buffer */
evbuffer_invoke_callbacks(buf);
- result = n;
+ result = n;
done:
- EVBUFFER_UNLOCK(buf);
+ EVBUFFER_UNLOCK(buf);
return result;
}
if (howmuch < 0)
return -1;
- ASSERT_EVBUFFER_LOCKED(buffer);
+ ASSERT_EVBUFFER_LOCKED(buffer);
/* XXX make this top out at some maximal data length? if the
* buffer has (say) 1MB in it, split over 128 chains, there's
* no way it all gets written in one go. */
off_t offset = chain->misalign;
#endif
- ASSERT_EVBUFFER_LOCKED(buffer);
+ ASSERT_EVBUFFER_LOCKED(buffer);
#if defined(SENDFILE_IS_MACOSX)
res = sendfile(info->fd, fd, chain->misalign, &len, NULL, 0);
{
int n = -1;
- EVBUFFER_LOCK(buffer);
+ EVBUFFER_LOCK(buffer);
if (buffer->freeze_start) {
goto done;
#endif
}
- if (n > 0)
- evbuffer_drain(buffer, n);
+ if (n > 0)
+ evbuffer_drain(buffer, n);
done:
- EVBUFFER_UNLOCK(buffer);
+ EVBUFFER_UNLOCK(buffer);
return (n);
}
unsigned char *
evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
{
- unsigned char *search;
- struct evbuffer_ptr ptr;
+ unsigned char *search;
+ struct evbuffer_ptr ptr;
- EVBUFFER_LOCK(buffer);
+ EVBUFFER_LOCK(buffer);
- ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
- if (ptr.pos < 0) {
- search = NULL;
- } else {
- search = evbuffer_pullup(buffer, ptr.pos + len);
+ ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
+ if (ptr.pos < 0) {
+ search = NULL;
+ } else {
+ search = evbuffer_pullup(buffer, ptr.pos + len);
if (search)
search += ptr.pos;
- }
- EVBUFFER_UNLOCK(buffer);
- return search;
+ }
+ EVBUFFER_UNLOCK(buffer);
+ return search;
}
int
evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
size_t position, enum evbuffer_ptr_how how)
{
- size_t left = position;
+ size_t left = position;
struct evbuffer_chain *chain = NULL;
- EVBUFFER_LOCK(buf);
+ EVBUFFER_LOCK(buf);
switch (how) {
case EVBUFFER_PTR_SET:
pos->pos = -1;
}
- EVBUFFER_UNLOCK(buf);
+ EVBUFFER_UNLOCK(buf);
return chain != NULL ? 0 : -1;
}
evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
const char *mem, size_t len)
{
- struct evbuffer_chain *chain;
- size_t position;
- int r;
+ struct evbuffer_chain *chain;
+ size_t position;
+ int r;
- ASSERT_EVBUFFER_LOCKED(buf);
+ ASSERT_EVBUFFER_LOCKED(buf);
- if (pos->pos + len > buf->total_len)
- return -1;
+ if (pos->pos + len > buf->total_len)
+ return -1;
- chain = pos->_internal.chain;
- position = pos->_internal.pos_in_chain;
- while (len && chain) {
- size_t n_comparable;
- if (len + position > chain->off)
- n_comparable = chain->off - position;
- else
- n_comparable = len;
- r = memcmp(chain->buffer + chain->misalign + position, mem,
- n_comparable);
- if (r)
- return r;
- mem += n_comparable;
- len -= n_comparable;
- position = 0;
- chain = chain->next;
- }
+ chain = pos->_internal.chain;
+ position = pos->_internal.pos_in_chain;
+ while (len && chain) {
+ size_t n_comparable;
+ if (len + position > chain->off)
+ n_comparable = chain->off - position;
+ else
+ n_comparable = len;
+ r = memcmp(chain->buffer + chain->misalign + position, mem,
+ n_comparable);
+ if (r)
+ return r;
+ mem += n_comparable;
+ len -= n_comparable;
+ position = 0;
+ chain = chain->next;
+ }
- return 0;
+ return 0;
}
struct evbuffer_ptr
struct evbuffer_ptr
evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
{
- struct evbuffer_ptr pos;
- struct evbuffer_chain *chain, *last_chain = NULL;
+ struct evbuffer_ptr pos;
+ struct evbuffer_chain *chain, *last_chain = NULL;
const unsigned char *p;
- char first;
+ char first;
- EVBUFFER_LOCK(buffer);
+ EVBUFFER_LOCK(buffer);
- if (start) {
- memcpy(&pos, start, sizeof(pos));
- chain = pos._internal.chain;
- } else {
- pos.pos = 0;
- chain = pos._internal.chain = buffer->first;
- pos._internal.pos_in_chain = 0;
- }
+ if (start) {
+ memcpy(&pos, start, sizeof(pos));
+ chain = pos._internal.chain;
+ } else {
+ pos.pos = 0;
+ chain = pos._internal.chain = buffer->first;
+ pos._internal.pos_in_chain = 0;
+ }
if (end)
last_chain = end->_internal.chain;
- if (!len)
- goto done;
-
- first = what[0];
-
- while (chain) {
- const unsigned char *start_at =
- chain->buffer + chain->misalign +
- pos._internal.pos_in_chain;
- p = memchr(start_at, first,
- chain->off - pos._internal.pos_in_chain);
- if (p) {
- pos.pos += p - start_at;
- pos._internal.pos_in_chain += p - start_at;
- if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
+ if (!len)
+ goto done;
+
+ first = what[0];
+
+ while (chain) {
+ const unsigned char *start_at =
+ chain->buffer + chain->misalign +
+ pos._internal.pos_in_chain;
+ p = memchr(start_at, first,
+ chain->off - pos._internal.pos_in_chain);
+ if (p) {
+ pos.pos += p - start_at;
+ pos._internal.pos_in_chain += p - start_at;
+ if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
if (end && pos.pos + len > end->pos)
goto not_found;
else
goto done;
}
- ++pos.pos;
- ++pos._internal.pos_in_chain;
- if (pos._internal.pos_in_chain == chain->off) {
- chain = pos._internal.chain = chain->next;
- pos._internal.pos_in_chain = 0;
- }
- } else {
+ ++pos.pos;
+ ++pos._internal.pos_in_chain;
+ if (pos._internal.pos_in_chain == chain->off) {
+ chain = pos._internal.chain = chain->next;
+ pos._internal.pos_in_chain = 0;
+ }
+ } else {
if (chain == last_chain)
goto not_found;
- pos.pos += chain->off - pos._internal.pos_in_chain;
- chain = pos._internal.chain = chain->next;
- pos._internal.pos_in_chain = 0;
- }
- }
+ pos.pos += chain->off - pos._internal.pos_in_chain;
+ chain = pos._internal.chain = chain->next;
+ pos._internal.pos_in_chain = 0;
+ }
+ }
not_found:
- pos.pos = -1;
- pos._internal.chain = NULL;
+ pos.pos = -1;
+ pos._internal.chain = NULL;
done:
- EVBUFFER_UNLOCK(buffer);
- return pos;
+ EVBUFFER_UNLOCK(buffer);
+ return pos;
}
int
int sz, result = -1;
va_list aq;
- EVBUFFER_LOCK(buf);
+ EVBUFFER_LOCK(buf);
if (buf->freeze_end) {
goto done;
if ((size_t)sz < space) {
chain->off += sz;
buf->total_len += sz;
- buf->n_add_for_cb += sz;
+ buf->n_add_for_cb += sz;
evbuffer_invoke_callbacks(buf);
result = sz;
- goto done;
+ goto done;
}
if (evbuffer_expand(buf, sz + 1) == -1)
- goto done;
- }
+ goto done;
+ }
/* NOTREACHED */
done:
- EVBUFFER_UNLOCK(buf);
- return result;
+ EVBUFFER_UNLOCK(buf);
+ return result;
}
int
info->cleanupfn = cleanupfn;
info->extra = extra;
- EVBUFFER_LOCK(outbuf);
+ EVBUFFER_LOCK(outbuf);
if (outbuf->freeze_end) {
/* don't call chain_free; we do not want to actually invoke
* the cleanup function */
goto done;
}
evbuffer_chain_insert(outbuf, chain);
- outbuf->n_add_for_cb += datlen;
+ outbuf->n_add_for_cb += datlen;
evbuffer_invoke_callbacks(outbuf);
result = 0;
done:
- EVBUFFER_UNLOCK(outbuf);
+ EVBUFFER_UNLOCK(outbuf);
return result;
}
info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
info->fd = fd;
- EVBUFFER_LOCK(outbuf);
+ EVBUFFER_LOCK(outbuf);
if (outbuf->freeze_end) {
mm_free(chain);
ok = 0;
info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
info->fd = fd;
- EVBUFFER_LOCK(outbuf);
+ EVBUFFER_LOCK(outbuf);
if (outbuf->freeze_end) {
info->fd = -1;
evbuffer_chain_free(chain);
length -= read;
}
- EVBUFFER_LOCK(outbuf);
+ EVBUFFER_LOCK(outbuf);
if (outbuf->freeze_end) {
evbuffer_free(tmp);
ok = 0;
if (ok)
evbuffer_invoke_callbacks(outbuf);
- EVBUFFER_UNLOCK(outbuf);
+ EVBUFFER_UNLOCK(outbuf);
return ok ? 0 : -1;
}
void
evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
{
- EVBUFFER_LOCK(buffer);
+ EVBUFFER_LOCK(buffer);
if (!TAILQ_EMPTY(&buffer->callbacks))
evbuffer_remove_all_callbacks(buffer);
if (cb) {
- struct evbuffer_cb_entry *ent =
- evbuffer_add_cb(buffer, NULL, cbarg);
- ent->cb.cb_obsolete = cb;
- ent->flags |= EVBUFFER_CB_OBSOLETE;
- }
- EVBUFFER_UNLOCK(buffer);
+ struct evbuffer_cb_entry *ent =
+ evbuffer_add_cb(buffer, NULL, cbarg);
+ ent->cb.cb_obsolete = cb;
+ ent->flags |= EVBUFFER_CB_OBSOLETE;
+ }
+ EVBUFFER_UNLOCK(buffer);
}
struct evbuffer_cb_entry *
struct evbuffer_cb_entry *e;
if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
return NULL;
- EVBUFFER_LOCK(buffer);
+ EVBUFFER_LOCK(buffer);
e->cb.cb_func = cb;
e->cbarg = cbarg;
e->flags = EVBUFFER_CB_ENABLED;
TAILQ_INSERT_HEAD(&buffer->callbacks, e, next);
- EVBUFFER_UNLOCK(buffer);
+ EVBUFFER_UNLOCK(buffer);
return e;
}
evbuffer_remove_cb_entry(struct evbuffer *buffer,
struct evbuffer_cb_entry *ent)
{
- EVBUFFER_LOCK(buffer);
+ EVBUFFER_LOCK(buffer);
TAILQ_REMOVE(&buffer->callbacks, ent, next);
- EVBUFFER_UNLOCK(buffer);
+ EVBUFFER_UNLOCK(buffer);
mm_free(ent);
return 0;
}
evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
{
struct evbuffer_cb_entry *cbent;
- int result = -1;
- EVBUFFER_LOCK(buffer);
+ int result = -1;
+ EVBUFFER_LOCK(buffer);
TAILQ_FOREACH(cbent, &buffer->callbacks, next) {
if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
result = evbuffer_remove_cb_entry(buffer, cbent);
- goto done;
+ goto done;
}
}
done:
- EVBUFFER_UNLOCK(buffer);
+ EVBUFFER_UNLOCK(buffer);
return result;
}
{
/* the user isn't allowed to mess with these. */
flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
- EVBUFFER_LOCK(buffer);
+ EVBUFFER_LOCK(buffer);
cb->flags |= flags;
- EVBUFFER_UNLOCK(buffer);
+ EVBUFFER_UNLOCK(buffer);
return 0;
}
{
/* the user isn't allowed to mess with these. */
flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
- EVBUFFER_LOCK(buffer);
+ EVBUFFER_LOCK(buffer);
cb->flags &= ~flags;
- EVBUFFER_UNLOCK(buffer);
+ EVBUFFER_UNLOCK(buffer);
return 0;
}
_evbuffer_incref(buf);
if (WSARecv(buf_o->fd, buf_o->buffers, nvecs, &bytesRead, &flags,
- &ol->overlapped, NULL)) {
+ &ol->overlapped, NULL)) {
int error = WSAGetLastError();
if (error != WSA_IO_PENDING) {
/* An actual error. */
/** Called when the timeouts on the bufferevent have changed.*/
int (*adj_timeouts)(struct bufferevent *);
- /** Called to flush data. */
- int (*flush)(struct bufferevent *, short, enum bufferevent_flush_mode);
+ /** Called to flush data. */
+ int (*flush)(struct bufferevent *, short, enum bufferevent_flush_mode);
/** Called to access miscellaneous fields. */
int (*ctrl)(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
void *arg)
{
struct bufferevent *bufev = arg;
- size_t size;
+ size_t size;
size = evbuffer_get_length(buf);
return bufev->output;
}
-/*
- * Returns 0 on success;
- * -1 on failure.
- */
-
int
bufferevent_write(struct bufferevent *bufev, const void *data, size_t size)
{
{
int r = -1;
BEV_LOCK(bufev);
- if (bufev->be_ops->flush)
- r = bufev->be_ops->flush(bufev, iotype, mode);
+ if (bufev->be_ops->flush)
+ r = bufev->be_ops->flush(bufev, iotype, mode);
BEV_UNLOCK(bufev);
return r;
}
be_async_disable,
be_async_destruct,
_bufferevent_generic_adj_timeouts,
- be_async_flush,
- be_async_ctrl,
+ be_async_flush,
+ be_async_ctrl,
};
struct bufferevent_async {
struct bufferevent_filtered {
struct bufferevent_private bev;
- /** The bufferevent that we read/write filtered data from/to. */
+ /** The bufferevent that we read/write filtered data from/to. */
struct bufferevent *underlying;
- /** A callback on our outbuf to notice when somebody adds data */
+ /** A callback on our outbuf to notice when somebody adds data */
struct evbuffer_cb_entry *outbuf_cb;
- /** True iff we have received an EOF callback from the underlying
- * bufferevent. */
+ /** True iff we have received an EOF callback from the underlying
+ * bufferevent. */
unsigned got_eof;
- /** Function to free context when we're done. */
+ /** Function to free context when we're done. */
void (*free_context)(void *);
- /** Input filter */
+ /** Input filter */
bufferevent_filter_cb process_in;
- /** Output filter */
+ /** Output filter */
bufferevent_filter_cb process_out;
-
- /** User-supplied argument to the filters. */
+ /** User-supplied argument to the filters. */
void *context;
};
be_filter_disable,
be_filter_destruct,
_bufferevent_generic_adj_timeouts,
- be_filter_flush,
+ be_filter_flush,
be_filter_ctrl,
};
be_underlying_writebuf_full(struct bufferevent_filtered *bevf,
enum bufferevent_flush_mode state)
{
- struct bufferevent *u = bevf->underlying;
- return state == BEV_NORMAL &&
- u->wm_write.high &&
- evbuffer_get_length(u->output) >= u->wm_write.high;
+ struct bufferevent *u = bevf->underlying;
+ return state == BEV_NORMAL &&
+ u->wm_write.high &&
+ evbuffer_get_length(u->output) >= u->wm_write.high;
}
/** Return 1 if our input buffer is at or over its high watermark such that we
be_readbuf_full(struct bufferevent_filtered *bevf,
enum bufferevent_flush_mode state)
{
- struct bufferevent *bufev = downcast(bevf);
- return state == BEV_NORMAL &&
- bufev->wm_read.high &&
- evbuffer_get_length(bufev->input) >= bufev->wm_read.high;
+ struct bufferevent *bufev = downcast(bevf);
+ return state == BEV_NORMAL &&
+ bufev->wm_read.high &&
+ evbuffer_get_length(bufev->input) >= bufev->wm_read.high;
}
bufev_f->context = ctx;
bufferevent_setcb(bufev_f->underlying,
- be_filter_readcb, be_filter_writecb, be_filter_eventcb, bufev_f);
+ be_filter_readcb, be_filter_writecb, be_filter_eventcb, bufev_f);
bufev_f->outbuf_cb = evbuffer_add_cb(downcast(bufev_f)->output,
bufferevent_filtered_outbuf_cb, bufev_f);
enum bufferevent_filter_result res;
struct bufferevent *bev = downcast(bevf);
- if (state == BEV_NORMAL) {
- /* If we're in 'normal' mode, don't urge data on the filter
- * unless we're reading data and under our high-water mark.*/
- if (!(bev->enabled & EV_READ) ||
- be_readbuf_full(bevf, state))
- return BEV_OK;
- }
+ if (state == BEV_NORMAL) {
+ /* If we're in 'normal' mode, don't urge data on the filter
+ * unless we're reading data and under our high-water mark.*/
+ if (!(bev->enabled & EV_READ) ||
+ be_readbuf_full(bevf, state))
+ return BEV_OK;
+ }
do {
- ev_ssize_t limit = -1;
- if (state == BEV_NORMAL && bev->wm_read.high)
- limit = bev->wm_read.high -
- evbuffer_get_length(bev->input);
+ ev_ssize_t limit = -1;
+ if (state == BEV_NORMAL && bev->wm_read.high)
+ limit = bev->wm_read.high -
+ evbuffer_get_length(bev->input);
res = bevf->process_in(bevf->underlying->input,
- bev->input, limit, state, bevf->context);
+ bev->input, limit, state, bevf->context);
if (res == BEV_OK)
*processed_out = 1;
} while (res == BEV_OK &&
(bev->enabled & EV_READ) &&
evbuffer_get_length(bevf->underlying->input) &&
- !be_readbuf_full(bevf, state));
+ !be_readbuf_full(bevf, state));
if (*processed_out)
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
{
/* Requires references and lock: might call writecb */
enum bufferevent_filter_result res = BEV_OK;
- struct bufferevent *bufev = downcast(bevf);
- int again = 0;
-
- if (state == BEV_NORMAL) {
- /* If we're in 'normal' mode, don't urge data on the
- * filter unless we're writing data, and the underlying
- * bufferevent is accepting data, and we have data to
- * give the filter. If we're in 'flush' or 'finish',
- * call the filter no matter what. */
- if (!(bufev->enabled & EV_WRITE) ||
- be_underlying_writebuf_full(bevf, state) ||
- !evbuffer_get_length(bufev->output))
- return BEV_OK;
- }
-
- /* disable the callback that calls this function
- when the user adds to the output buffer. */
- evbuffer_cb_set_flags(bufev->output, bevf->outbuf_cb, 0);
-
- do {
- int processed = 0;
- again = 0;
-
- do {
- ev_ssize_t limit = -1;
- if (state == BEV_NORMAL &&
- bevf->underlying->wm_write.high)
- limit = bevf->underlying->wm_write.high -
- evbuffer_get_length(bevf->underlying->output);
-
- res = bevf->process_out(downcast(bevf)->output,
- bevf->underlying->output,
- limit,
- state,
- bevf->context);
-
- if (res == BEV_OK)
- processed = *processed_out = 1;
- } while (/* Stop if the filter wasn't successful...*/
- res == BEV_OK &&
- /* Or if we aren't writing any more. */
- (bufev->enabled & EV_WRITE) &&
- /* Of if we have nothing more to write and we are
- * not flushing. */
- evbuffer_get_length(bufev->output) &&
- /* Or if we have filled the underlying output buffer. */
- !be_underlying_writebuf_full(bevf,state));
-
- if (processed &&
- evbuffer_get_length(bufev->output) <= bufev->wm_write.low) {
- /* call the write callback.*/
- _bufferevent_run_writecb(bufev);
-
- if (res == BEV_OK &&
- (bufev->enabled & EV_WRITE) &&
- evbuffer_get_length(bufev->output) &&
- !be_underlying_writebuf_full(bevf, state)) {
- again = 1;
- }
- }
- } while (again);
-
- /* reenable the outbuf_cb */
- evbuffer_cb_set_flags(bufev->output,bevf->outbuf_cb,
- EVBUFFER_CB_ENABLED);
+ struct bufferevent *bufev = downcast(bevf);
+ int again = 0;
+
+ if (state == BEV_NORMAL) {
+ /* If we're in 'normal' mode, don't urge data on the
+ * filter unless we're writing data, and the underlying
+ * bufferevent is accepting data, and we have data to
+ * give the filter. If we're in 'flush' or 'finish',
+ * call the filter no matter what. */
+ if (!(bufev->enabled & EV_WRITE) ||
+ be_underlying_writebuf_full(bevf, state) ||
+ !evbuffer_get_length(bufev->output))
+ return BEV_OK;
+ }
+
+ /* disable the callback that calls this function
+ when the user adds to the output buffer. */
+ evbuffer_cb_set_flags(bufev->output, bevf->outbuf_cb, 0);
+
+ do {
+ int processed = 0;
+ again = 0;
+
+ do {
+ ev_ssize_t limit = -1;
+ if (state == BEV_NORMAL &&
+ bevf->underlying->wm_write.high)
+ limit = bevf->underlying->wm_write.high -
+ evbuffer_get_length(bevf->underlying->output);
+
+ res = bevf->process_out(downcast(bevf)->output,
+ bevf->underlying->output,
+ limit,
+ state,
+ bevf->context);
+
+ if (res == BEV_OK)
+ processed = *processed_out = 1;
+ } while (/* Stop if the filter wasn't successful...*/
+ res == BEV_OK &&
+ /* Or if we aren't writing any more. */
+ (bufev->enabled & EV_WRITE) &&
+ /* Of if we have nothing more to write and we are
+ * not flushing. */
+ evbuffer_get_length(bufev->output) &&
+ /* Or if we have filled the underlying output buffer. */
+ !be_underlying_writebuf_full(bevf,state));
+
+ if (processed &&
+ evbuffer_get_length(bufev->output) <= bufev->wm_write.low) {
+ /* call the write callback.*/
+ _bufferevent_run_writecb(bufev);
+
+ if (res == BEV_OK &&
+ (bufev->enabled & EV_WRITE) &&
+ evbuffer_get_length(bufev->output) &&
+ !be_underlying_writebuf_full(bevf, state)) {
+ again = 1;
+ }
+ }
+ } while (again);
+
+ /* reenable the outbuf_cb */
+ evbuffer_cb_set_flags(bufev->output,bevf->outbuf_cb,
+ EVBUFFER_CB_ENABLED);
if (*processed_out)
BEV_RESET_GENERIC_WRITE_TIMEOUT(bufev);
/* Somebody added more data to the output buffer. Try to
* process it, if we should. */
_bufferevent_incref_and_lock(bev);
- be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
+ be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
_bufferevent_decref_and_unlock(bev);
}
}
{
struct bufferevent_filtered *bevf = _me;
struct bufferevent *bev = downcast(bevf);
- int processed_any = 0;
+ int processed_any = 0;
_bufferevent_incref_and_lock(bev);
- be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
+ be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
_bufferevent_decref_and_unlock(bev);
}
if (iotype & EV_WRITE) {
be_filter_process_output(bevf, mode, &processed_any);
}
- /* XXX check the return value? */
- /* XXX does this want to recursively call lower-level flushes? */
- bufferevent_flush(bevf->underlying, iotype, mode);
+ /* XXX check the return value? */
+ /* XXX does this want to recursively call lower-level flushes? */
+ bufferevent_flush(bevf->underlying, iotype, mode);
_bufferevent_decref_and_unlock(bufev);
be_openssl_disable,
be_openssl_destruct,
be_openssl_adj_timeouts,
- be_openssl_flush,
+ be_openssl_flush,
be_openssl_ctrl,
};
(! wm->high || evbuffer_get_length(input) < wm->high)) {
int n_to_read =
wm->high ? wm->high - evbuffer_get_length(input)
- : READ_DEFAULT;
+ : READ_DEFAULT;
r = do_read(bev_ssl, n_to_read);
if (r <= 0)
break;
static void
be_openssl_outbuf_cb(struct evbuffer *buf,
- const struct evbuffer_cb_info *cbinfo, void *arg)
+ const struct evbuffer_cb_info *cbinfo, void *arg)
{
struct bufferevent_openssl *bev_ssl = arg;
int r = 0;
bufferevent_pair_new(struct event_base *base, int options,
struct bufferevent *pair[2])
{
- struct bufferevent_pair *bufev1 = NULL, *bufev2 = NULL;
+ struct bufferevent_pair *bufev1 = NULL, *bufev2 = NULL;
int tmp_options;
options |= BEV_OPT_DEFER_CALLBACKS;
return 0;
/* Naively, we would say
- bucket->limit += n_ticks * cfg->rate;
+ bucket->limit += n_ticks * cfg->rate;
- if (bucket->limit > cfg->maximum)
- bucket->limit = cfg->maximum;
+ if (bucket->limit > cfg->maximum)
+ bucket->limit = cfg->maximum;
But we're worried about overflow, so we do it like this:
*/
/* Initialize the kernel queue */
if ((dpfd = open("/dev/poll", O_RDWR)) == -1) {
- event_warn("open: /dev/poll");
+ event_warn("open: /dev/poll");
mm_free(devpollop);
return (NULL);
}
int which = 0;
int what = events[i].revents;
- if (what & POLLHUP)
- what |= POLLIN | POLLOUT;
- else if (what & POLLERR)
- what |= POLLIN | POLLOUT;
+ if (what & POLLHUP)
+ what |= POLLIN | POLLOUT;
+ else if (what & POLLERR)
+ what |= POLLIN | POLLOUT;
if (what & POLLIN)
which |= EV_READ;
/** A single evbuffer callback for an evbuffer. This function will be invoked
* when bytes are added to or removed from the evbuffer. */
struct evbuffer_cb_entry {
- /** Structures to implement a doubly-linked queue of callbacks */
+ /** Structures to implement a doubly-linked queue of callbacks */
TAILQ_ENTRY(evbuffer_cb_entry) next;
- /** The callback function to invoke when this callback is called.
+ /** The callback function to invoke when this callback is called.
If EVBUFFER_CB_OBSOLETE is set in flags, the cb_obsolete field is
valid; otherwise, cb_func is valid. */
- union {
- evbuffer_cb_func cb_func;
- evbuffer_cb cb_obsolete;
- } cb;
- /** Argument to pass to cb. */
+ union {
+ evbuffer_cb_func cb_func;
+ evbuffer_cb cb_obsolete;
+ } cb;
+ /** Argument to pass to cb. */
void *cbarg;
- /** Currently set flags on this callback. */
+ /** Currently set flags on this callback. */
ev_uint32_t flags;
};
/** Number of bytes we have added to the buffer since we last tried to
* invoke callbacks. */
- size_t n_add_for_cb;
+ size_t n_add_for_cb;
/** Number of bytes we have removed from the buffer since we last
* tried to invoke callbacks. */
- size_t n_del_for_cb;
+ size_t n_del_for_cb;
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
/** A lock used to mediate access to this buffer. */
- void *lock;
+ void *lock;
#endif
/** True iff we should free the lock field when we free this
* evbuffer. */
unsigned freeze_end : 1;
/** True iff this evbuffer's callbacks are not invoked immediately
* upon a change in the buffer, but instead are deferred to be invoked
- * from the event_base's loop. Useful for preventing enormous stack
+ * from the event_base's loop. Useful for preventing enormous stack
* overflows when we have mutually recursive callbacks, and for
* serializing callbacks in a single thread. */
unsigned deferred_cbs : 1;
/** Used to implement deferred callbacks. */
struct deferred_cb_queue *cb_queue;
- /** A reference count on this evbuffer. When the reference count
- * reaches 0, the buffer is destroyed. Manipulated with
+ /** A reference count on this evbuffer. When the reference count
+ * reaches 0, the buffer is destroyed. Manipulated with
* evbuffer_incref and evbuffer_decref_and_unlock and
* evbuffer_free. */
int refcnt;
/** Set if special handling is required for this chain */
unsigned flags;
-#define EVBUFFER_MMAP 0x0001 /**< memory in buffer is mmaped */
-#define EVBUFFER_SENDFILE 0x0002 /**< a chain used for sendfile */
+#define EVBUFFER_MMAP 0x0001 /**< memory in buffer is mmaped */
+#define EVBUFFER_SENDFILE 0x0002 /**< a chain used for sendfile */
#define EVBUFFER_REFERENCE 0x0004 /**< a chain with a mem reference */
-#define EVBUFFER_IMMUTABLE 0x0008 /**< read-only chain */
+#define EVBUFFER_IMMUTABLE 0x0008 /**< read-only chain */
/** a chain that mustn't be reallocated or freed, or have its contents
* memmoved, until the chain is un-pinned. */
#define EVBUFFER_MEM_PINNED_R 0x0010
#define EVBUFFER_CHAIN_EXTRA(t, c) (t *)((struct evbuffer_chain *)(c) + 1)
/** Assert that we are holding the lock on an evbuffer */
-#define ASSERT_EVBUFFER_LOCKED(buffer) \
+#define ASSERT_EVBUFFER_LOCKED(buffer) \
EVLOCK_ASSERT_LOCKED((buffer)->lock)
#define EVBUFFER_LOCK(buffer) \
static void
_evdns_log(int warn, const char *fmt, ...)
{
- va_list args;
- static char buf[512];
- if (!evdns_log_fn)
- return;
- va_start(args,fmt);
- evutil_vsnprintf(buf, sizeof(buf), fmt, args);
- evdns_log_fn(warn, buf);
- va_end(args);
+ va_list args;
+ static char buf[512];
+ if (!evdns_log_fn)
+ return;
+ va_start(args,fmt);
+ evutil_vsnprintf(buf, sizeof(buf), fmt, args);
+ evdns_log_fn(warn, buf);
+ va_end(args);
}
#define log _evdns_log
ns->socket, EV_READ | (waiting ? EV_WRITE : 0) | EV_PERSIST,
nameserver_ready_callback, ns);
if (event_add(&ns->event, NULL) < 0) {
- log(EVDNS_LOG_WARN, "Error from libevent when adding event for %s",
- debug_ntop((struct sockaddr *)&ns->address));
- /* ???? Do more? */
+ log(EVDNS_LOG_WARN, "Error from libevent when adding event for %s",
+ debug_ntop((struct sockaddr *)&ns->address));
+ /* ???? Do more? */
}
}
int r;
ASSERT_LOCKED(req->base);
r = sendto(server->socket, req->request, req->request_len, 0,
- (struct sockaddr *)&server->address, server->addrlen);
+ (struct sockaddr *)&server->address, server->addrlen);
if (r < 0) {
int err = evutil_socket_geterror(server->socket);
if (EVUTIL_ERR_RW_RETRIABLE(err))
static inline int
str_matches_option(const char *s1, const char *optionname)
{
- /* Option names are given as "option:" We accept either 'option' in
+ /* Option names are given as "option:" We accept either 'option' in
* s1, or 'option:randomjunk'. The latter form is to implement the
* resolv.conf parser. */
size_t optlen = strlen(optionname);
struct event_iocp_port *iocp;
#endif
- enum event_base_config_flag flags;
+ enum event_base_config_flag flags;
/* Notify main thread to wake up break, etc. */
int th_notify_fd[2];
TAILQ_HEAD(event_configq, event_config_entry) entries;
enum event_method_feature require_features;
- enum event_base_config_flag flags;
+ enum event_base_config_flag flags;
};
/* Internal use only: Functions that might be missing from <sys/queue.h> */
int
event_config_require_features(struct event_config *cfg,
- int features)
+ int features)
{
if (!cfg)
return (-1);
int
event_priority_init(int npriorities)
{
- return event_base_priority_init(current_base, npriorities);
+ return event_base_priority_init(current_base, npriorities);
}
int
int
event_base_dispatch(struct event_base *event_base)
{
- return (event_base_loop(event_base, 0));
+ return (event_base_loop(event_base, 0));
}
const char *
int
event_once(evutil_socket_t fd, short events,
void (*callback)(evutil_socket_t, short, void *),
- void *arg, const struct timeval *tv)
+ void *arg, const struct timeval *tv)
{
return event_base_once(current_base, fd, events, callback, arg, tv);
}
int
event_base_once(struct event_base *base, evutil_socket_t fd, short events,
void (*callback)(evutil_socket_t, short, void *),
- void *arg, const struct timeval *tv)
+ void *arg, const struct timeval *tv)
{
struct event_once *eonce;
struct timeval etv;
TAILQ_ENTRY(evrpc_request_wrapper) next;
- /* pool on which this rpc request is being made */
- struct evrpc_pool *pool;
+ /* pool on which this rpc request is being made */
+ struct evrpc_pool *pool;
- /* connection on which the request is being sent */
+ /* connection on which the request is being sent */
struct evhttp_connection *evcon;
- /* the actual request */
+ /* the actual request */
struct evhttp_request *req;
/* event for implementing request timeouts */
mm_free((char *)rpc->uri);
mm_free(rpc);
- registered_uri = evrpc_construct_uri(name);
+ registered_uri = evrpc_construct_uri(name);
/* remove the http server callback */
EVUTIL_ASSERT(evhttp_del_cb(base->http_server, registered_uri) == 0);
void *_lock2_tmplock = (lock2); \
_EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
EVLOCK_LOCK(_lock1_tmplock,mode1); \
- if (_lock2_tmplock != _lock1_tmplock) \
- EVLOCK_LOCK(_lock2_tmplock,mode2); \
+ if (_lock2_tmplock != _lock1_tmplock) \
+ EVLOCK_LOCK(_lock2_tmplock,mode2); \
} while (0)
/** Release both lock1 and lock2. */
void *_lock1_tmplock = (lock1); \
void *_lock2_tmplock = (lock2); \
_EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
- if (_lock2_tmplock != _lock1_tmplock) \
- EVLOCK_UNLOCK(_lock2_tmplock,mode2); \
+ if (_lock2_tmplock != _lock1_tmplock) \
+ EVLOCK_UNLOCK(_lock2_tmplock,mode2); \
EVLOCK_UNLOCK(_lock1_tmplock,mode1); \
} while (0)
{
struct evthread_lock_callbacks *target =
_evthread_lock_debugging_enabled
- ? &_original_lock_fns : &_evthread_lock_fns;
+ ? &_original_lock_fns : &_evthread_lock_fns;
if (!cbs) {
memset(target, 0, sizeof(_evthread_lock_fns));
int
evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap)
{
- int r;
- if (!buflen)
- return 0;
+ int r;
+ if (!buflen)
+ return 0;
#ifdef _MSC_VER
r = _vsnprintf(buf, buflen, format, ap);
if (r < 0)
const ev_uint32_t a = ntohl(in->s_addr);
int r;
r = evutil_snprintf(dst, len, "%d.%d.%d.%d",
- (int)(ev_uint8_t)((a>>24)&0xff),
- (int)(ev_uint8_t)((a>>16)&0xff),
- (int)(ev_uint8_t)((a>>8 )&0xff),
- (int)(ev_uint8_t)((a )&0xff));
+ (int)(ev_uint8_t)((a>>24)&0xff),
+ (int)(ev_uint8_t)((a>>16)&0xff),
+ (int)(ev_uint8_t)((a>>8 )&0xff),
+ (int)(ev_uint8_t)((a )&0xff));
if (r<0||(size_t)r>=len)
return NULL;
else
ev_uint16_t words[8];
for (i = 0; i < 8; ++i) {
words[i] =
- (((ev_uint16_t)addr->s6_addr[2*i])<<8) + addr->s6_addr[2*i+1];
+ (((ev_uint16_t)addr->s6_addr[2*i])<<8) + addr->s6_addr[2*i+1];
}
if (words[0] == 0 && words[1] == 0 && words[2] == 0 && words[3] == 0 &&
- words[4] == 0 && ((words[5] == 0 && words[6] && words[7]) ||
- (words[5] == 0xffff))) {
+ words[4] == 0 && ((words[5] == 0 && words[6] && words[7]) ||
+ (words[5] == 0xffff))) {
/* This is an IPv4 address. */
if (words[5] == 0) {
evutil_snprintf(buf, sizeof(buf), "::%d.%d.%d.%d",
- addr->s6_addr[12], addr->s6_addr[13],
- addr->s6_addr[14], addr->s6_addr[15]);
+ addr->s6_addr[12], addr->s6_addr[13],
+ addr->s6_addr[14], addr->s6_addr[15]);
} else {
evutil_snprintf(buf, sizeof(buf), "::%x:%d.%d.%d.%d", words[5],
- addr->s6_addr[12], addr->s6_addr[13],
- addr->s6_addr[14], addr->s6_addr[15]);
+ addr->s6_addr[12], addr->s6_addr[13],
+ addr->s6_addr[14], addr->s6_addr[15]);
}
if (strlen(buf) > len)
return NULL;
return -1;
memset(out, 0, *outlen);
memcpy(out, &sin6, sizeof(sin6));
- *outlen = sizeof(sin6);
+ *outlen = sizeof(sin6);
return 0;
}
else
return -1;
memset(out, 0, *outlen);
memcpy(out, &sin, sizeof(sin));
- *outlen = sizeof(sin);
+ *outlen = sizeof(sin);
return 0;
}
}
_evutil_weakrand(void)
{
#ifdef WIN32
- return rand();
+ return rand();
#else
- return random();
+ return random();
#endif
}
TAILQ_HEAD(boundq, evhttp_bound_socket) sockets;
TAILQ_HEAD(httpcbq, evhttp_cb) callbacks;
- struct evconq connections;
+ struct evconq connections;
TAILQ_HEAD(vhostsq, evhttp) virtualhosts;
/* NULL if this server is not a vhost */
- char *vhost_pattern;
+ char *vhost_pattern;
int timeout;
fake_getnameinfo(const struct sockaddr *sa, size_t salen, char *host,
size_t hostlen, char *serv, size_t servlen, int flags)
{
- struct sockaddr_in *sin = (struct sockaddr_in *)sa;
-
- if (serv != NULL) {
- char tmpserv[16];
- evutil_snprintf(tmpserv, sizeof(tmpserv),
- "%d", ntohs(sin->sin_port));
- if (strlcpy(serv, tmpserv, servlen) >= servlen)
- return (-1);
- }
-
- if (host != NULL) {
- if (flags & NI_NUMERICHOST) {
- if (strlcpy(host, inet_ntoa(sin->sin_addr),
- hostlen) >= hostlen)
- return (-1);
- else
- return (0);
- } else {
- struct hostent *hp;
- hp = gethostbyaddr((char *)&sin->sin_addr,
- sizeof(struct in_addr), AF_INET);
- if (hp == NULL)
- return (-2);
-
- if (strlcpy(host, hp->h_name, hostlen) >= hostlen)
- return (-1);
- else
- return (0);
- }
- }
- return (0);
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ if (serv != NULL) {
+ char tmpserv[16];
+ evutil_snprintf(tmpserv, sizeof(tmpserv),
+ "%d", ntohs(sin->sin_port));
+ if (strlcpy(serv, tmpserv, servlen) >= servlen)
+ return (-1);
+ }
+
+ if (host != NULL) {
+ if (flags & NI_NUMERICHOST) {
+ if (strlcpy(host, inet_ntoa(sin->sin_addr),
+ hostlen) >= hostlen)
+ return (-1);
+ else
+ return (0);
+ } else {
+ struct hostent *hp;
+ hp = gethostbyaddr((char *)&sin->sin_addr,
+ sizeof(struct in_addr), AF_INET);
+ if (hp == NULL)
+ return (-2);
+
+ if (strlcpy(host, hp->h_name, hostlen) >= hostlen)
+ return (-1);
+ else
+ return (0);
+ }
+ }
+ return (0);
}
#endif
/**
* Determines if a response should have a body.
* Follows the rules in RFC 2616 section 4.3.
- * @return 1 if the response MUST have a body;
- * 0 if the response MUST NOT have a body.
+ * @return 1 if the response MUST have a body; 0 if the response MUST NOT have
+ * a body.
*/
static int
evhttp_response_needs_body(struct evhttp_request *req)
if (con_outgoing) {
/* idle or close the connection */
- int need_close;
+ int need_close;
TAILQ_REMOVE(&evcon->requests, req, next);
req->evcon = NULL;
} else if (strcmp(method, "HEAD") == 0) {
req->type = EVHTTP_REQ_HEAD;
} else if (strcmp(method, "PUT") == 0) {
- req->type = EVHTTP_REQ_PUT;
+ req->type = EVHTTP_REQ_PUT;
} else if (strcmp(method, "DELETE") == 0) {
- req->type = EVHTTP_REQ_DELETE;
+ req->type = EVHTTP_REQ_DELETE;
} else {
event_debug(("%s: bad method %s on request %p from %s",
__func__, method, req, req->remote_host));
evcon->max_headers_size = http->default_max_headers_size;
evcon->max_body_size = http->default_max_body_size;
-
+
evcon->flags |= EVHTTP_CON_INCOMING;
evcon->state = EVCON_READING_FIRSTLINE;
static evutil_socket_t
bind_socket_ai(struct evutil_addrinfo *ai, int reuse)
{
- evutil_socket_t fd;
+ evutil_socket_t fd;
int on = 1, r;
int serrno;
- /* Create listen socket */
- fd = socket(ai ? ai->ai_family : AF_INET, SOCK_STREAM, 0);
- if (fd == -1) {
+ /* Create listen socket */
+ fd = socket(ai ? ai->ai_family : AF_INET, SOCK_STREAM, 0);
+ if (fd == -1) {
event_sock_warn(-1, "socket");
return (-1);
- }
+ }
if (evutil_make_socket_nonblocking(fd) < 0)
goto out;
if (evutil_make_socket_closeonexec(fd) < 0)
goto out;
- setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void *)&on, sizeof(on));
+ setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void *)&on, sizeof(on));
if (reuse)
- evutil_make_listen_socket_reuseable(fd);
+ evutil_make_listen_socket_reuseable(fd);
if (ai != NULL) {
r = bind(fd, ai->ai_addr, ai->ai_addrlen);
static struct evutil_addrinfo *
make_addrinfo(const char *address, ev_uint16_t port)
{
- struct evutil_addrinfo *ai = NULL;
+ struct evutil_addrinfo *ai = NULL;
- struct evutil_addrinfo hints;
- char strport[NI_MAXSERV];
- int ai_result;
+ struct evutil_addrinfo hints;
+ char strport[NI_MAXSERV];
+ int ai_result;
- memset(&hints, 0, sizeof(hints));
- hints.ai_family = AF_UNSPEC;
- hints.ai_socktype = SOCK_STREAM;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
/* turn NULL hostname into INADDR_ANY, and skip looking up any address
* types we don't have an interface to connect to. */
- hints.ai_flags = EVUTIL_AI_PASSIVE|EVUTIL_AI_ADDRCONFIG;
- evutil_snprintf(strport, sizeof(strport), "%d", port);
- if ((ai_result = evutil_getaddrinfo(address, strport, &hints, &ai))
+ hints.ai_flags = EVUTIL_AI_PASSIVE|EVUTIL_AI_ADDRCONFIG;
+ evutil_snprintf(strport, sizeof(strport), "%d", port);
+ if ((ai_result = evutil_getaddrinfo(address, strport, &hints, &ai))
!= 0) {
- if (ai_result == EVUTIL_EAI_SYSTEM)
- event_warn("getaddrinfo");
- else
- event_warnx("getaddrinfo: %s",
+ if (ai_result == EVUTIL_EAI_SYSTEM)
+ event_warn("getaddrinfo");
+ else
+ event_warnx("getaddrinfo: %s",
evutil_gai_strerror(ai_result));
return (NULL);
- }
+ }
return (ai);
}
Allocate storage for a new evbuffer.
@return a pointer to a newly allocated evbuffer struct, or NULL if an error
- occurred
+ occurred
*/
struct evbuffer *evbuffer_new(void);
@param buffer the evbuffer to be written and drained
@param fd the file descriptor to be written to
@param howmuch the largest allowable number of bytes to write, or -1
- to write as many bytes as we can.
+ to write as many bytes as we can.
@return the number of bytes written, or -1 if an error occurred
@see evbuffer_read()
*/
/** Structure passed to an evbuffer callback */
struct evbuffer_cb_info {
- /** The size of */
- size_t orig_size;
- size_t n_added;
- size_t n_deleted;
+ /** The size of */
+ size_t orig_size;
+ size_t n_added;
+ size_t n_deleted;
};
/** Function to peek at data inside an evbuffer without removing it or
@param buffer the evbuffer to be monitored
@param cb the callback function to invoke when the evbuffer is modified,
- or NULL to remove all callbacks.
+ or NULL to remove all callbacks.
@param cbarg an argument to be provided to the callback function
@return a handle to the callback on success, or NULL on failure.
*/
@param buf the evbuffer to make contiguous
@param size the number of bytes to make contiguous, or -1 to make the
- entire buffer contiguous.
+ entire buffer contiguous.
@return a pointer to the contiguous memory array
*/
@param buffer the evbuffer to be monitored
@param cb the callback function to invoke when the evbuffer is modified,
- or NULL to remove all callbacks.
+ or NULL to remove all callbacks.
@param cbarg an argument to be provided to the callback function
*/
void evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg);
circumstances.
Libevent provides an abstraction on top of the regular event callbacks.
- This abstraction is called a buffered event. A buffered event provides
+ This abstraction is called a buffered event. A buffered event provides
input and output buffers that get filled and drained automatically. The
user of a buffered event no longer deals directly with the I/O, but
instead is reading from input and writing to output buffers.
It is safe to set the fd to -1, so long as you later
set it with bufferevent_setfd or bufferevent_socket_connect().
@return a pointer to a newly allocated bufferevent struct, or NULL if an
- error occurred
+ error occurred
@see bufferevent_free()
*/
struct bufferevent *bufferevent_socket_new(struct event_base *base, evutil_socket_t fd, int options);
Recognized hostname formats are:
- www.example.com (hostname)
- 1.2.3.4 (ipv4address)
- ::1 (ipv6address)
- [::1] ([ipv6address])
+ www.example.com (hostname)
+ 1.2.3.4 (ipv4address)
+ ::1 (ipv6address)
+ [::1] ([ipv6address])
Performance note: If you do not provide an evdns_base, this function
- may block while it waits for a DNS response. This is probably not
+ may block while it waits for a DNS response. This is probably not
what you want.
*/
int bufferevent_socket_connect_hostname(struct bufferevent *b,
@param bufev the bufferevent object for which to change callbacks
@param readcb callback to invoke when there is data to be read, or NULL if
- no callback is desired
+ no callback is desired
@param writecb callback to invoke when the file descriptor is ready for
- writing, or NULL if no callback is desired
+ writing, or NULL if no callback is desired
@param eventcb callback to invoke when there is an event on the file
- descriptor
+ descriptor
@param cbarg an argument that will be supplied to each of the callbacks
- (readcb, writecb, and errorcb)
+ (readcb, writecb, and errorcb)
@see bufferevent_new()
*/
void bufferevent_setcb(struct bufferevent *bufev,
/**
- Write data from an evbuffer to a bufferevent buffer. The evbuffer is
+ Write data from an evbuffer to a bufferevent buffer. The evbuffer is
being drained as a result.
@param bufev the bufferevent to be written to
size_t bufferevent_read(struct bufferevent *bufev, void *data, size_t size);
/**
- Read data from a bufferevent buffer into an evbuffer. This avoids
+ Read data from a bufferevent buffer into an evbuffer. This avoids
memory copies.
@param bufev the bufferevent to be read from
Sets the watermarks for read and write events.
On input, a bufferevent does not invoke the user read callback unless
- there is at least low watermark data in the buffer. If the read buffer
+ there is at least low watermark data in the buffer. If the read buffer
is beyond the high watermark, the bufferevent stops reading from the network.
On output, the user write callback is invoked whenever the buffered data
@param write_rate The maximum number of bytes to write per tick on
average.
@param write_burst The maximum number of bytes to write in any single tick.
- @param tick_len The length of a single tick. Defaults to one second.
+ @param tick_len The length of a single tick. Defaults to one second.
Any fractions of a millisecond are ignored.
Note that all rate-limits hare are currently best-effort: future versions
/**
Subtract a number of bytes from a bufferevent's read or write bucket.
The decrement value can be negative, if you want to manually refill
- the bucket. If the change puts the bucket above or below zero, the
+ the bucket. If the change puts the bucket above or below zero, the
bufferevent will resume or suspend reading writing as appropriate.
These functions make no change in the buckets for the bufferevent's
group, if any.
/**
Subtract a number of bytes from a bufferevent rate-limiting group's
read or write bucket. The decrement value can be negative, if you
- want to manually refill the bucket. If the change puts the bucket
+ want to manually refill the bucket. If the change puts the bucket
above or below zero, the bufferevents in the group will resume or
suspend reading writing as appropriate.
enabling the bufferevent for the first time.
@param fd the file descriptor from which data is read and written to.
- This file descriptor is not allowed to be a pipe(2).
+ This file descriptor is not allowed to be a pipe(2).
@param readcb callback to invoke when there is data to be read, or NULL if
- no callback is desired
+ no callback is desired
@param writecb callback to invoke when the file descriptor is ready for
- writing, or NULL if no callback is desired
+ writing, or NULL if no callback is desired
@param errorcb callback to invoke when there is an error on the file
- descriptor
+ descriptor
@param cbarg an argument that will be supplied to each of the callbacks
- (readcb, writecb, and errorcb)
+ (readcb, writecb, and errorcb)
@return a pointer to a newly allocated bufferevent struct, or NULL if an
- error occurred
+ error occurred
@see bufferevent_base_set(), bufferevent_free()
*/
struct bufferevent *bufferevent_new(evutil_socket_t fd,
@see evdns_base_nameserver_ip_add()
*/
int evdns_base_nameserver_add(struct evdns_base *base,
- unsigned long int address);
+ unsigned long int address);
/**
Get the number of configured nameservers.
@see evdns_base_nameserver_add()
*/
int evdns_base_nameserver_ip_add(struct evdns_base *base,
- const char *ip_as_string);
+ const char *ip_as_string);
struct evdns_request;
@param base the evdns_base to which to apply this operation
@param flags any of DNS_OPTION_NAMESERVERS|DNS_OPTION_SEARCH|DNS_OPTION_MISC|
- DNS_OPTIONS_HOSTSFILE|DNS_OPTIONS_ALL
+ DNS_OPTIONS_HOSTSFILE|DNS_OPTIONS_ALL
@param filename the path to the resolv.conf file
@return 0 if successful, or various positive error codes if an error
- occurred (see above)
+ occurred (see above)
@see resolv.conf(3), evdns_config_windows_nameservers()
*/
int evdns_base_resolv_conf_parse(struct evdns_base *base, int flags, const char *const filename);
#define evtimer_initialized(ev) _event_initialized((ev), 0)
#define evsignal_add(ev, tv) event_add((ev), (tv))
-#define evsignal_assign(ev, b, x, cb, arg) \
+#define evsignal_assign(ev, b, x, cb, arg) \
event_assign((ev), (b), (x), EV_SIGNAL|EV_PERSIST, cb, (arg))
-#define evsignal_new(b, x, cb, arg) \
+#define evsignal_new(b, x, cb, arg) \
event_new((b), (x), EV_SIGNAL|EV_PERSIST, (cb), (arg))
#define evsignal_del(ev) event_del(ev)
#define evsignal_pending(ev, tv) event_pending((ev), EV_SIGNAL, (tv))
@deprecated This macro is deprecated because its naming is inconsistent.
The recommend macro is evsignal_set().
*/
-#define signal_set(ev, x, cb, arg) \
+#define signal_set(ev, x, cb, arg) \
event_set((ev), (x), EV_SIGNAL|EV_PERSIST, (cb), (arg))
/**
@deprecated This macro is deprecated because its naming is inconsistent.
The recommend macro is evsignal_assign().
*/
-#define signal_assign(ev, b, x, cb, arg) \
+#define signal_assign(ev, b, x, cb, arg) \
event_assign((ev), (b), (x), EV_SIGNAL|EV_PERSIST, (cb), (arg))
/**
@deprecated This macro is deprecated because its naming is inconsistent.
@param overlapped The struct event_overlapped to initialize
@param cb The callback that should be invoked once the IO operation has
- finished.
+ finished.
*/
void event_overlapped_init(struct event_overlapped *, iocp_callback cb);
if (res == -1) {
if (errno != EINTR) {
- event_warn("kevent");
+ event_warn("kevent");
return (-1);
}
#include "log-internal.h"
static void _warn_helper(int severity, const char *errstr, const char *fmt,
- va_list ap);
+ va_list ap);
static void event_log(int severity, const char *msg);
static event_fatal_cb fatal_fn = NULL;
typedef struct min_heap
{
- struct event** p;
- unsigned n, a;
+ struct event** p;
+ unsigned n, a;
} min_heap_t;
-static inline void min_heap_ctor(min_heap_t* s);
-static inline void min_heap_dtor(min_heap_t* s);
-static inline void min_heap_elem_init(struct event* e);
-static inline int min_heap_elt_is_top(const struct event *e);
-static inline int min_heap_elem_greater(struct event *a, struct event *b);
-static inline int min_heap_empty(min_heap_t* s);
-static inline unsigned min_heap_size(min_heap_t* s);
+static inline void min_heap_ctor(min_heap_t* s);
+static inline void min_heap_dtor(min_heap_t* s);
+static inline void min_heap_elem_init(struct event* e);
+static inline int min_heap_elt_is_top(const struct event *e);
+static inline int min_heap_elem_greater(struct event *a, struct event *b);
+static inline int min_heap_empty(min_heap_t* s);
+static inline unsigned min_heap_size(min_heap_t* s);
static inline struct event* min_heap_top(min_heap_t* s);
-static inline int min_heap_reserve(min_heap_t* s, unsigned n);
-static inline int min_heap_push(min_heap_t* s, struct event* e);
+static inline int min_heap_reserve(min_heap_t* s, unsigned n);
+static inline int min_heap_push(min_heap_t* s, struct event* e);
static inline struct event* min_heap_pop(min_heap_t* s);
-static inline int min_heap_erase(min_heap_t* s, struct event* e);
-static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
-static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
+static inline int min_heap_erase(min_heap_t* s, struct event* e);
+static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
+static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
int min_heap_elem_greater(struct event *a, struct event *b)
{
- return evutil_timercmp(&a->ev_timeout, &b->ev_timeout, >);
+ return evutil_timercmp(&a->ev_timeout, &b->ev_timeout, >);
}
void min_heap_ctor(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
int min_heap_push(min_heap_t* s, struct event* e)
{
- if(min_heap_reserve(s, s->n + 1))
- return -1;
- min_heap_shift_up_(s, s->n++, e);
- return 0;
+ if (min_heap_reserve(s, s->n + 1))
+ return -1;
+ min_heap_shift_up_(s, s->n++, e);
+ return 0;
}
struct event* min_heap_pop(min_heap_t* s)
{
- if(s->n)
- {
- struct event* e = *s->p;
- min_heap_shift_down_(s, 0u, s->p[--s->n]);
- e->ev_timeout_pos.min_heap_idx = -1;
- return e;
- }
- return 0;
+ if (s->n)
+ {
+ struct event* e = *s->p;
+ min_heap_shift_down_(s, 0u, s->p[--s->n]);
+ e->ev_timeout_pos.min_heap_idx = -1;
+ return e;
+ }
+ return 0;
}
int min_heap_elt_is_top(const struct event *e)
int min_heap_erase(min_heap_t* s, struct event* e)
{
- if(((unsigned int)-1) != e->ev_timeout_pos.min_heap_idx)
- {
- struct event *last = s->p[--s->n];
- unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
- /* we replace e with the last element in the heap. We might need to
- shift it upward if it is less than its parent, or downward if it is
- greater than one or both its children. Since the children are known
- to be less than the parent, it can't need to shift both up and
- down. */
- if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], last))
- min_heap_shift_up_(s, e->ev_timeout_pos.min_heap_idx, last);
- else
- min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, last);
- e->ev_timeout_pos.min_heap_idx = -1;
- return 0;
- }
- return -1;
+ if (((unsigned int)-1) != e->ev_timeout_pos.min_heap_idx)
+ {
+ struct event *last = s->p[--s->n];
+ unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
+ /* we replace e with the last element in the heap. We might need to
+ shift it upward if it is less than its parent, or downward if it is
+ greater than one or both its children. Since the children are known
+ to be less than the parent, it can't need to shift both up and
+ down. */
+ if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], last))
+ min_heap_shift_up_(s, e->ev_timeout_pos.min_heap_idx, last);
+ else
+ min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, last);
+ e->ev_timeout_pos.min_heap_idx = -1;
+ return 0;
+ }
+ return -1;
}
int min_heap_reserve(min_heap_t* s, unsigned n)
{
- if(s->a < n)
- {
- struct event** p;
- unsigned a = s->a ? s->a * 2 : 8;
- if(a < n)
- a = n;
- if(!(p = (struct event**)realloc(s->p, a * sizeof *p)))
- return -1;
- s->p = p;
- s->a = a;
- }
- return 0;
+ if(s->a < n)
+ {
+ struct event** p;
+ unsigned a = s->a ? s->a * 2 : 8;
+ if(a < n)
+ a = n;
+ if(!(p = (struct event**)realloc(s->p, a * sizeof *p)))
+ return -1;
+ s->p = p;
+ s->a = a;
+ }
+ return 0;
}
void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
unsigned parent = (hole_index - 1) / 2;
while(hole_index && min_heap_elem_greater(s->p[parent], e))
{
- (s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
- hole_index = parent;
- parent = (hole_index - 1) / 2;
+ (s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
+ hole_index = parent;
+ parent = (hole_index - 1) / 2;
}
(s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
}
unsigned min_child = 2 * (hole_index + 1);
while(min_child <= s->n)
{
- min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
- if(!(min_heap_elem_greater(e, s->p[min_child])))
- break;
- (s->p[hole_index] = s->p[min_child])->ev_timeout_pos.min_heap_idx = hole_index;
- hole_index = min_child;
- min_child = 2 * (hole_index + 1);
+ min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
+ if(!(min_heap_elem_greater(e, s->p[min_child])))
+ break;
+ (s->p[hole_index] = s->p[min_child])->ev_timeout_pos.min_heap_idx = hole_index;
+ hole_index = min_child;
+ min_child = 2 * (hole_index + 1);
}
min_heap_shift_up_(s, hole_index, e);
}
struct pollop {
int event_count; /* Highest number alloc */
- int nfds; /* Highest number used */
- int realloc_copy; /* True iff we must realloc
+ int nfds; /* Highest number used */
+ int realloc_copy; /* True iff we must realloc
* event_set_copy */
struct pollfd *event_set;
struct pollfd *event_set_copy;
poll_del,
poll_dispatch,
poll_dealloc,
- 0, /* doesn't need_reinit */
+ 0, /* doesn't need_reinit */
EV_FEATURE_FDS,
sizeof(struct pollidx),
};
if (res == -1) {
if (errno != EINTR) {
- event_warn("poll");
+ event_warn("poll");
return (-1);
}
#ifdef WIN32
HANDLE socket;
/* Open a file. */
- socket = CreateFileA("test.txt", /* open File */
- GENERIC_READ, /* open for reading */
- 0, /* do not share */
- NULL, /* no security */
- OPEN_EXISTING, /* existing file only */
- FILE_ATTRIBUTE_NORMAL, /* normal file */
- NULL); /* no attr. template */
+ socket = CreateFileA("test.txt", /* open File */
+ GENERIC_READ, /* open for reading */
+ 0, /* do not share */
+ NULL, /* no security */
+ OPEN_EXISTING, /* existing file only */
+ FILE_ATTRIBUTE_NORMAL, /* normal file */
+ NULL); /* no attr. template */
if(socket == INVALID_HANDLE_VALUE)
return 1;
#include "evmap-internal.h"
#ifndef howmany
-#define howmany(x, y) (((x)+((y)-1))/(y))
+#define howmany(x, y) (((x)+((y)-1))/(y))
#endif
#ifndef _EVENT_HAVE_FD_MASK
static char signals[1];
ev_ssize_t n;
- (void)arg; /* Suppress "unused variable" warning. */
+ (void)arg; /* Suppress "unused variable" warning. */
n = recv(fd, signals, sizeof(signals), 0);
if (n == -1)
base->sig.evsig_caught = 0;
memset(&base->sig.evsigcaught, 0, sizeof(sig_atomic_t)*NSIG);
- evutil_make_socket_nonblocking(base->sig.ev_signal_pair[0]);
+ evutil_make_socket_nonblocking(base->sig.ev_signal_pair[0]);
event_assign(&base->sig.ev_signal, base, base->sig.ev_signal_pair[1],
EV_READ | EV_PERSIST, evsig_cb, &base->sig.ev_signal);
tt_fail_perror("read (callback)");
} else {
switch (arg->callcount++) {
- case 0: /* first call: expect to read data; cycle */
+ case 0: /* first call: expect to read data; cycle */
if (len > 0)
return;
tt_fail_msg("EOF before data read");
break;
- case 1: /* second call: expect EOF; stop */
+ case 1: /* second call: expect EOF; stop */
if (len > 0)
tt_fail_msg("not all data read on first cycle");
break;
static int
setup_test(const char *name)
{
- if (in_legacy_test_wrapper)
- return 0;
+ if (in_legacy_test_wrapper)
+ return 0;
fprintf(stdout, "%s", name);
exit(1);
}
- if (evutil_make_socket_nonblocking(pair[0]) == -1)
+ if (evutil_make_socket_nonblocking(pair[0]) == -1)
fprintf(stderr, "fcntl(O_NONBLOCK)");
- if (evutil_make_socket_nonblocking(pair[1]) == -1)
+ if (evutil_make_socket_nonblocking(pair[1]) == -1)
fprintf(stderr, "fcntl(O_NONBLOCK)");
test_ok = 0;
static int
cleanup_test(void)
{
- if (in_legacy_test_wrapper)
- return 0;
+ if (in_legacy_test_wrapper)
+ return 0;
#ifndef WIN32
close(pair[0]);
fprintf(stdout, "FAILED\n");
exit(1);
}
- test_ok = 0;
+ test_ok = 0;
return (0);
}
evsignal_add(&ev, NULL);
evsignal_del(&ev);
event_base_free(base);
- /* If we got here without asserting, we're fine. */
- test_ok = 1;
+ /* If we got here without asserting, we're fine. */
+ test_ok = 1;
cleanup_test();
}
/*
* make two bases to catch signals, use both of them. this only works
- * for event mechanisms that use our signal pipe trick. kqueue handles
+ * for event mechanisms that use our signal pipe trick. kqueue handles
* signals internally, and all interested kqueues get all the signals.
*/
static void
{
struct event ev1, ev2;
struct event_base *base1, *base2;
- int is_kqueue;
+ int is_kqueue;
test_ok = 0;
printf("Signal switchbase: ");
base1 = event_init();
base2 = event_init();
- is_kqueue = !strcmp(event_get_method(),"kqueue");
+ is_kqueue = !strcmp(event_get_method(),"kqueue");
evsignal_set(&ev1, SIGUSR1, signal_cb, &ev1);
evsignal_set(&ev2, SIGUSR1, signal_cb, &ev2);
if (event_base_set(base1, &ev1) ||
/* can handle signal before loop is called */
raise(SIGUSR1);
event_base_loop(base2, EVLOOP_NONBLOCK);
- if (is_kqueue) {
- if (!test_ok)
- goto end;
- test_ok = 0;
- }
+ if (is_kqueue) {
+ if (!test_ok)
+ goto end;
+ test_ok = 0;
+ }
event_base_loop(base1, EVLOOP_NONBLOCK);
if (test_ok && !is_kqueue) {
test_ok = 0;
event_assign(&ev1, base1, data->pair[1], EV_READ,
dummy_read_cb, NULL);
event_add(&ev1, NULL);
- event_base_free(base1); /* should not crash */
+ event_base_free(base1); /* should not crash */
} else {
tt_fail_msg("failed to create event_base for test");
}
static void
test_priorities(void)
{
- test_priorities_impl(1);
- if (test_ok)
- test_priorities_impl(2);
- if (test_ok)
- test_priorities_impl(3);
+ test_priorities_impl(1);
+ if (test_ok)
+ test_priorities_impl(2);
+ if (test_ok)
+ test_priorities_impl(3);
}
int oldlen, newlen;
oldlen = EVBUFFER_LENGTH(tmp);
evtag_encode_int(tmp, integers[i]);
- newlen = EVBUFFER_LENGTH(tmp);
+ newlen = EVBUFFER_LENGTH(tmp);
TT_BLATHER(("encoded 0x%08x with %d bytes",
- (unsigned)integers[i], newlen - oldlen));
+ (unsigned)integers[i], newlen - oldlen));
big_int = integers[i];
big_int *= 1000000000; /* 1 billion */
evtag_encode_int64(tmp, big_int);
}
for (i = 0; i < TEST_MAX_INT; i++) {
- tt_int_op(evtag_decode_int(&integer, tmp), !=, -1);
- tt_uint_op(integer, ==, integers[i]);
+ tt_int_op(evtag_decode_int(&integer, tmp), !=, -1);
+ tt_uint_op(integer, ==, integers[i]);
tt_int_op(evtag_decode_int64(&big_int, tmp), !=, -1);
tt_assert((big_int / 1000000000) == integers[i]);
}
((char *)EVBUFFER_DATA(tmp))[1] = '\xff';
if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) {
- tt_abort_msg("evtag_unmarshal_timeval should have failed");
+ tt_abort_msg("evtag_unmarshal_timeval should have failed");
}
end:
oldlen = EVBUFFER_LENGTH(tmp);
evtag_encode_tag(tmp, integers[i]);
newlen = EVBUFFER_LENGTH(tmp);
- TT_BLATHER(("encoded 0x%08x with %d bytes",
- (unsigned)integers[i], newlen - oldlen));
+ TT_BLATHER(("encoded 0x%08x with %d bytes",
+ (unsigned)integers[i], newlen - oldlen));
}
for (i = 0; i < TEST_MAX_INT; i++) {
- tt_int_op(evtag_decode_tag(&integer, tmp), !=, -1);
- tt_uint_op(integer, ==, integers[i]);
+ tt_int_op(evtag_decode_tag(&integer, tmp), !=, -1);
+ tt_uint_op(integer, ==, integers[i]);
}
- tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0);
+ tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0);
end:
evbuffer_free(tmp);
const char *backend;
int n_methods = 0;
- tt_assert(methods);
+ tt_assert(methods);
backend = methods[0];
while (*methods != NULL) {
}
end:
- if (base)
- event_base_free(base);
- if (cfg)
- event_config_free(cfg);
+ if (base)
+ event_base_free(base);
+ if (cfg)
+ event_config_free(cfg);
}
static void
}
struct testcase_t main_testcases[] = {
- /* Some converted-over tests */
- { "methods", test_methods, TT_FORK, NULL, NULL },
+ /* Some converted-over tests */
+ { "methods", test_methods, TT_FORK, NULL, NULL },
{ "version", test_version, 0, NULL, NULL },
BASIC(base_features, TT_FORK|TT_NO_LOGS),
{ "base_environ", test_base_environ, TT_FORK, NULL, NULL },
BASIC(bad_assign, TT_FORK|TT_NEED_BASE|TT_NO_LOGS),
- /* These are still using the old API */
- LEGACY(persistent_timeout, TT_FORK|TT_NEED_BASE),
- LEGACY(priorities, TT_FORK|TT_NEED_BASE),
+ /* These are still using the old API */
+ LEGACY(persistent_timeout, TT_FORK|TT_NEED_BASE),
+ LEGACY(priorities, TT_FORK|TT_NEED_BASE),
{ "common_timeout", test_common_timeout, TT_FORK|TT_NEED_BASE,
&basic_setup, NULL },
- /* These legacy tests may not all need all of these flags. */
- LEGACY(simpleread, TT_ISOLATED),
- LEGACY(simpleread_multiple, TT_ISOLATED),
- LEGACY(simplewrite, TT_ISOLATED),
- LEGACY(multiple, TT_ISOLATED),
- LEGACY(persistent, TT_ISOLATED),
- LEGACY(combined, TT_ISOLATED),
- LEGACY(simpletimeout, TT_ISOLATED),
- LEGACY(loopbreak, TT_ISOLATED),
- LEGACY(loopexit, TT_ISOLATED),
+ /* These legacy tests may not all need all of these flags. */
+ LEGACY(simpleread, TT_ISOLATED),
+ LEGACY(simpleread_multiple, TT_ISOLATED),
+ LEGACY(simplewrite, TT_ISOLATED),
+ LEGACY(multiple, TT_ISOLATED),
+ LEGACY(persistent, TT_ISOLATED),
+ LEGACY(combined, TT_ISOLATED),
+ LEGACY(simpletimeout, TT_ISOLATED),
+ LEGACY(loopbreak, TT_ISOLATED),
+ LEGACY(loopexit, TT_ISOLATED),
LEGACY(loopexit_multiple, TT_ISOLATED),
LEGACY(nonpersist_readd, TT_ISOLATED),
LEGACY(multiple_events_for_same_fd, TT_ISOLATED),
BASIC(many_events, TT_ISOLATED),
#ifndef WIN32
- LEGACY(fork, TT_ISOLATED),
+ LEGACY(fork, TT_ISOLATED),
#endif
- END_OF_TESTCASES
+ END_OF_TESTCASES
};
struct testcase_t evtag_testcases[] = {
LEGACY(simplesignal, TT_ISOLATED),
LEGACY(multiplesignal, TT_ISOLATED),
LEGACY(immediatesignal, TT_ISOLATED),
- LEGACY(signal_dealloc, TT_ISOLATED),
+ LEGACY(signal_dealloc, TT_ISOLATED),
LEGACY(signal_pipeloss, TT_ISOLATED),
LEGACY(signal_switchbase, TT_ISOLATED),
LEGACY(signal_restore, TT_ISOLATED),
LEGACY(signal_assert, TT_ISOLATED),
LEGACY(signal_while_processing, TT_ISOLATED),
#endif
- END_OF_TESTCASES
+ END_OF_TESTCASES
};
void run_legacy_test_fn(void *ptr);
/* A couple of flags that basic/legacy_setup can support. */
-#define TT_NEED_SOCKETPAIR TT_FIRST_USER_FLAG
-#define TT_NEED_BASE (TT_FIRST_USER_FLAG<<1)
-#define TT_NEED_DNS (TT_FIRST_USER_FLAG<<2)
-#define TT_LEGACY (TT_FIRST_USER_FLAG<<3)
-#define TT_NEED_THREADS (TT_FIRST_USER_FLAG<<4)
-#define TT_NO_LOGS (TT_FIRST_USER_FLAG<<5)
-#define TT_ENABLE_IOCP_FLAG (TT_FIRST_USER_FLAG<<6)
-#define TT_ENABLE_IOCP (TT_ENABLE_IOCP_FLAG|TT_NEED_THREADS)
+#define TT_NEED_SOCKETPAIR TT_FIRST_USER_FLAG
+#define TT_NEED_BASE (TT_FIRST_USER_FLAG<<1)
+#define TT_NEED_DNS (TT_FIRST_USER_FLAG<<2)
+#define TT_LEGACY (TT_FIRST_USER_FLAG<<3)
+#define TT_NEED_THREADS (TT_FIRST_USER_FLAG<<4)
+#define TT_NO_LOGS (TT_FIRST_USER_FLAG<<5)
+#define TT_ENABLE_IOCP_FLAG (TT_FIRST_USER_FLAG<<6)
+#define TT_ENABLE_IOCP (TT_ENABLE_IOCP_FLAG|TT_NEED_THREADS)
/* All the flags that a legacy test needs. */
#define TT_ISOLATED TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE
void *arg)
{
- size_t old_len = cbinfo->orig_size;
- size_t new_len = old_len + cbinfo->n_added - cbinfo->n_deleted;
+ size_t old_len = cbinfo->orig_size;
+ size_t new_len = old_len + cbinfo->n_added - cbinfo->n_deleted;
struct evbuffer *out = arg;
evbuffer_add_printf(out, "%lu->%lu; ", (unsigned long)old_len,
(unsigned long)new_len);
}
static void
self_draining_callback(struct evbuffer *evbuffer, size_t old_len,
- size_t new_len, void *arg)
+ size_t new_len, void *arg)
{
if (new_len > old_len)
evbuffer_drain(evbuffer, new_len);
if (evbuffer_get_length(evbuf) == 8333) {
test_ok++;
- }
+ }
evbuffer_free(evbuf);
}
{
if (evbuffer_get_length(bev->output) == 0) {
test_ok++;
- }
+ }
}
static void
static void
wm_writecb(struct bufferevent *bev, void *arg)
{
- assert(evbuffer_get_length(bev->output) <= 100);
+ assert(evbuffer_get_length(bev->output) <= 100);
if (evbuffer_get_length(bev->output) == 0) {
- evbuffer_drain(bev->output, evbuffer_get_length(bev->output));
+ evbuffer_drain(bev->output, evbuffer_get_length(bev->output));
test_ok++;
- }
+ }
}
static void
/* limit the reading on the receiving bufferevent */
bufferevent_setwatermark(bev2, EV_READ, 10, 20);
- /* Tell the sending bufferevent not to notify us till it's down to
- 100 bytes. */
- bufferevent_setwatermark(bev1, EV_WRITE, 100, 2000);
+ /* Tell the sending bufferevent not to notify us till it's down to
+ 100 bytes. */
+ bufferevent_setwatermark(bev1, EV_WRITE, 100, 2000);
bufferevent_write(bev1, buffer, sizeof(buffer));
tt_int_op(test_ok, ==, 2);
- /* The write callback drained all the data from outbuf, so we
- * should have removed the write event... */
- tt_assert(!event_pending(&bev2->ev_write, EV_WRITE, NULL));
+ /* The write callback drained all the data from outbuf, so we
+ * should have removed the write event... */
+ tt_assert(!event_pending(&bev2->ev_write, EV_WRITE, NULL));
end:
bufferevent_free(bev1);
char buffer[8333];
int i;
- test_ok = 0;
+ test_ok = 0;
if (use_pair) {
struct bufferevent *pair[2];
#ifdef WIN32
if (!strcmp((char*)data->setup_data, "unset_connectex")) {
struct win32_extension_fns *ext =
- (struct win32_extension_fns *)
+ (struct win32_extension_fns *)
event_get_win32_extension_fns();
ext->ConnectEx = NULL;
}
struct testcase_t bufferevent_testcases[] = {
- LEGACY(bufferevent, TT_ISOLATED),
- LEGACY(bufferevent_pair, TT_ISOLATED),
- LEGACY(bufferevent_watermarks, TT_ISOLATED),
- LEGACY(bufferevent_pair_watermarks, TT_ISOLATED),
- LEGACY(bufferevent_filters, TT_ISOLATED),
- LEGACY(bufferevent_pair_filters, TT_ISOLATED),
+ LEGACY(bufferevent, TT_ISOLATED),
+ LEGACY(bufferevent_pair, TT_ISOLATED),
+ LEGACY(bufferevent_watermarks, TT_ISOLATED),
+ LEGACY(bufferevent_pair_watermarks, TT_ISOLATED),
+ LEGACY(bufferevent_filters, TT_ISOLATED),
+ LEGACY(bufferevent_pair_filters, TT_ISOLATED),
{ "bufferevent_connect", test_bufferevent_connect, TT_FORK|TT_NEED_BASE,
&basic_setup, (void*)"" },
{ "bufferevent_connect_defer", test_bufferevent_connect,
{ "bufferevent_connect_fail", test_bufferevent_connect_fail,
TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
#ifdef _EVENT_HAVE_LIBZ
- LEGACY(bufferevent_zlib, TT_ISOLATED),
+ LEGACY(bufferevent_zlib, TT_ISOLATED),
#else
- { "bufferevent_zlib", NULL, TT_SKIP, NULL, NULL },
+ { "bufferevent_zlib", NULL, TT_SKIP, NULL, NULL },
#endif
- END_OF_TESTCASES,
+ END_OF_TESTCASES,
};
struct testcase_t bufferevent_iocp_testcases[] = {
- LEGACY(bufferevent, TT_ISOLATED|TT_ENABLE_IOCP),
- LEGACY(bufferevent_watermarks, TT_ISOLATED|TT_ENABLE_IOCP),
- LEGACY(bufferevent_filters, TT_ISOLATED|TT_ENABLE_IOCP),
+ LEGACY(bufferevent, TT_ISOLATED|TT_ENABLE_IOCP),
+ LEGACY(bufferevent_watermarks, TT_ISOLATED|TT_ENABLE_IOCP),
+ LEGACY(bufferevent_filters, TT_ISOLATED|TT_ENABLE_IOCP),
{ "bufferevent_connect", test_bufferevent_connect,
TT_FORK|TT_NEED_BASE|TT_ENABLE_IOCP, &basic_setup, (void*)"" },
{ "bufferevent_connect_defer", test_bufferevent_connect,
TT_FORK|TT_NEED_BASE|TT_ENABLE_IOCP, &basic_setup,
(void*)"unset_connectex" },
- END_OF_TESTCASES,
+ END_OF_TESTCASES,
};
goto out;
}
- TT_BLATHER(("type: %d, count: %d, ttl: %d: ", type, count, ttl));
+ TT_BLATHER(("type: %d, count: %d, ttl: %d: ", type, count, ttl));
switch (type) {
case DNS_IPv6_AAAA: {
if (ttl < 0)
goto out;
for (i = 0; i < count; ++i)
- TT_BLATHER(("%s ", inet_ntoa(in_addrs[i])));
+ TT_BLATHER(("%s ", inet_ntoa(in_addrs[i])));
break;
}
case DNS_PTR:
evdns_resolve_ipv4("www.monkey.org", 0, dns_gethostbyname_cb, NULL);
event_dispatch();
- tt_int_op(dns_ok, ==, DNS_IPv4_A);
- test_ok = dns_ok;
+ tt_int_op(dns_ok, ==, DNS_IPv4_A);
+ test_ok = dns_ok;
end:
- ;
+ ;
}
static void
evdns_resolve_ipv6("www.ietf.org", 0, dns_gethostbyname_cb, NULL);
event_dispatch();
- if (!dns_ok && dns_err == DNS_ERR_TIMEOUT) {
- tt_skip();
- }
+ if (!dns_ok && dns_err == DNS_ERR_TIMEOUT) {
+ tt_skip();
+ }
- tt_int_op(dns_ok, ==, DNS_IPv6_AAAA);
- test_ok = 1;
+ tt_int_op(dns_ok, ==, DNS_IPv6_AAAA);
+ test_ok = 1;
end:
- ;
+ ;
}
static void
evdns_resolve_reverse(&in, 0, dns_gethostbyname_cb, NULL);
event_dispatch();
- tt_int_op(dns_ok, ==, DNS_PTR);
- test_ok = dns_ok;
+ tt_int_op(dns_ok, ==, DNS_PTR);
+ test_ok = dns_ok;
end:
- ;
+ ;
}
static void
struct evdns_base *dns = evdns_base_new(base, 1/* init name servers */);
struct evdns_request *req = NULL;
- tt_assert(base);
- tt_assert(dns);
+ tt_assert(base);
+ tt_assert(dns);
in.s_addr = htonl(0x7f000001ul); /* 127.0.0.1 */
dns_ok = 0;
req = evdns_base_resolve_reverse(
dns, &in, 0, dns_gethostbyname_cb, base);
- tt_assert(req);
+ tt_assert(req);
event_base_dispatch(base);
- tt_int_op(dns_ok, ==, DNS_PTR);
+ tt_int_op(dns_ok, ==, DNS_PTR);
end:
- if (dns)
- evdns_base_free(dns, 0);
- if (base)
- event_base_free(base);
+ if (dns)
+ evdns_base_free(dns, 0);
+ if (base)
+ event_base_free(base);
}
static int n_server_responses = 0;
"ZZ-INET6.EXAMPLE.COM", 54322);
if (r<0)
dns_ok = 0;
- } else if (qtype == EVDNS_TYPE_A &&
+ } else if (qtype == EVDNS_TYPE_A &&
qclass == EVDNS_CLASS_INET &&
!evutil_ascii_strcasecmp(qname, "drop.example.com")) {
if (evdns_server_request_drop(req)<0)
static void
dns_server(void)
{
- evutil_socket_t sock=-1;
+ evutil_socket_t sock=-1;
struct sockaddr_in my_addr;
struct evdns_server_port *port=NULL;
struct in_addr resolve_addr;
tt_int_op(evdns_base_count_nameservers(base), ==, 1);
/* Now configure a nameserver port. */
sock = socket(AF_INET, SOCK_DGRAM, 0);
- if (sock<0) {
- tt_abort_perror("socket");
- }
+ if (sock<0) {
+ tt_abort_perror("socket");
+ }
- evutil_make_socket_nonblocking(sock);
+ evutil_make_socket_nonblocking(sock);
memset(&my_addr, 0, sizeof(my_addr));
my_addr.sin_family = AF_INET;
dns_server_gethostbyname_cb, NULL);
resolve_addr.s_addr = htonl(0xc0a80b0bUL); /* 192.168.11.11 */
evdns_base_resolve_reverse(base, &resolve_addr, 0,
- dns_server_gethostbyname_cb, NULL);
+ dns_server_gethostbyname_cb, NULL);
memcpy(resolve_addr6.s6_addr,
"\xff\xf0\x00\x00\x00\x00\xaa\xaa"
"\x11\x11\x00\x00\x00\x00\xef\xef", 16);
evdns_base_resolve_reverse_ipv6(base, &resolve_addr6, 0,
- dns_server_gethostbyname_cb, (void*)6);
+ dns_server_gethostbyname_cb, (void*)6);
req = evdns_base_resolve_ipv4(base,
"drop.example.com", DNS_QUERY_NO_SEARCH,
event_dispatch();
tt_assert(dns_got_cancel);
- test_ok = dns_ok;
+ test_ok = dns_ok;
end:
- if (port)
- evdns_close_server_port(port);
- if (sock >= 0)
- EVUTIL_CLOSESOCKET(sock);
+ if (port)
+ evdns_close_server_port(port);
+ if (sock >= 0)
+ EVUTIL_CLOSESOCKET(sock);
if (base)
evdns_base_free(base, 0);
}
evutil_snprintf(buf, sizeof(buf), "127.0.0.1:%d", dns_port);
evdns_base_nameserver_ip_add(dns, buf);
- /* Now, finally, at long last, launch the bufferevents. One should do
+ /* Now, finally, at long last, launch the bufferevents. One should do
* a failing lookup IP, one should do a successful lookup by IP,
* and one should do a successful lookup by hostname. */
be1 = bufferevent_socket_new(data->base, -1, BEV_OPT_CLOSE_ON_FREE);
if (server_fd>=0)
EVUTIL_CLOSESOCKET(server_fd);
if (port)
- evdns_close_server_port(port);
+ evdns_close_server_port(port);
if (dns)
evdns_base_free(dns, 0);
if (be1)
tt_assert(r);
/* 3: PF_INET request for v4assert.example.com should not generate a
- * v6 request. The server will fail the test if it does. */
+ * v6 request. The server will fail the test if it does. */
hints.ai_family = PF_INET;
r = evdns_getaddrinfo(dns_base, "v4assert.example.com", "8003",
&hints, gai_cb, &a_out[3]);
tt_assert(r);
/* 4: PF_INET6 request for v6assert.example.com should not generate a
- * v4 request. The server will fail the test if it does. */
+ * v4 request. The server will fail the test if it does. */
hints.ai_family = PF_INET6;
r = evdns_getaddrinfo(dns_base, "v6assert.example.com", "8004",
&hints, gai_cb, &a_out[4]);
"8008", &hints, gai_cb, &a_out[8]);
tt_assert(r);
- /* 9: AI_ADDRCONFIG should at least not crash. Can't test it more
+ /* 9: AI_ADDRCONFIG should at least not crash. Can't test it more
* without knowing what kind of internet we have. */
hints.ai_flags |= EVUTIL_AI_ADDRCONFIG;
r = evdns_getaddrinfo(dns_base, "both.example.com",
evutil_freeaddrinfo(a_out[i].ai);
}
if (port)
- evdns_close_server_port(port);
+ evdns_close_server_port(port);
if (dns_base)
evdns_base_free(dns_base, 0);
}
-#define DNS_LEGACY(name, flags) \
+#define DNS_LEGACY(name, flags) \
{ #name, run_legacy_test_fn, flags|TT_LEGACY, &legacy_setup, \
- dns_##name }
+ dns_##name }
struct testcase_t dns_testcases[] = {
- DNS_LEGACY(server, TT_FORK|TT_NEED_BASE),
- DNS_LEGACY(gethostbyname, TT_FORK|TT_NEED_BASE|TT_NEED_DNS),
- DNS_LEGACY(gethostbyname6, TT_FORK|TT_NEED_BASE|TT_NEED_DNS),
- DNS_LEGACY(gethostbyaddr, TT_FORK|TT_NEED_BASE|TT_NEED_DNS),
- { "resolve_reverse", dns_resolve_reverse, TT_FORK, NULL, NULL },
+ DNS_LEGACY(server, TT_FORK|TT_NEED_BASE),
+ DNS_LEGACY(gethostbyname, TT_FORK|TT_NEED_BASE|TT_NEED_DNS),
+ DNS_LEGACY(gethostbyname6, TT_FORK|TT_NEED_BASE|TT_NEED_DNS),
+ DNS_LEGACY(gethostbyaddr, TT_FORK|TT_NEED_BASE|TT_NEED_DNS),
+ { "resolve_reverse", dns_resolve_reverse, TT_FORK, NULL, NULL },
{ "search", dns_search_test, TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
{ "retry", dns_retry_test, TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
{ "reissue", dns_reissue_test, TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
{ "getaddrinfo_async", test_getaddrinfo_async,
TT_FORK|TT_NEED_BASE, &basic_setup, (char*)"" },
- END_OF_TESTCASES
+ END_OF_TESTCASES
};
len = recv(fd, &buf, sizeof(buf), 0);
/*printf("%s: %s %d%s\n", __func__, event & EV_ET ? "etread" : "read",
- len, len ? "" : " - means EOF");
+ len, len ? "" : " - means EOF");
*/
called++;
test_edgetriggered(void *et)
{
struct event *ev = NULL;
- struct event_base *base = NULL;
+ struct event_base *base = NULL;
const char *test = "test string";
evutil_socket_t pair[2] = {-1,-1};
- int supports_et;
+ int supports_et;
int success;
if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair) == -1) {
supports_et?"":"not "));
/* Initalize one event */
- ev = event_new(base, pair[1], EV_READ|EV_ET|EV_PERSIST, read_cb, &ev);
+ ev = event_new(base, pair[1], EV_READ|EV_ET|EV_PERSIST, read_cb, &ev);
event_add(ev, NULL);
evhttp_connection_free(evcon);
if (http)
evhttp_free(http);
- if (dns_base)
- evdns_base_free(dns_base, 0);
+ if (dns_base)
+ evdns_base_free(dns_base, 0);
regress_clean_dnsserver();
}
http_base_test(void)
{
struct event_base *tmp;
- struct bufferevent *bev;
+ struct bufferevent *bev;
int fd;
const char *http_request;
short port = -1;
req = evhttp_request_new(http_data_length_constraints_test_done, NULL);
tt_assert(req);
evhttp_add_header(req->output_headers, "Host", "somehost");
-
- /* GET /?arg=verylongvalue HTTP/1.1 */
+
+ /* GET /?arg=verylongvalue HTTP/1.1 */
if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, long_str) == -1) {
tt_abort_msg("Couldn't make request");
}
}
event_dispatch();
- test_ok = 1;
+ test_ok = 1;
end:
if (evcon)
evhttp_connection_free(evcon);
evhttp_connection_set_closecb(
evhttp_request_get_connection(req),
terminate_chunked_close_cb, arg);
-
+
state->req = req;
evhttp_send_reply_start(req, HTTP_OK, "OK");
#define HTTP_LEGACY(name) \
{ #name, run_legacy_test_fn, TT_ISOLATED|TT_LEGACY, &legacy_setup, \
- http_##name##_test }
+ http_##name##_test }
struct testcase_t http_testcases[] = {
{ "primitives", http_primitives, 0, NULL, NULL },
{ "randport", regress_pick_a_port, TT_FORK|TT_NEED_BASE,
&basic_setup, NULL},
- END_OF_TESTCASES,
+ END_OF_TESTCASES,
};
struct testcase_t listener_iocp_testcases[] = {
TT_FORK|TT_NEED_BASE|TT_ENABLE_IOCP,
&basic_setup, NULL},
- END_OF_TESTCASES,
+ END_OF_TESTCASES,
};
static void dnslogcb(int w, const char *m)
{
- TT_BLATHER(("%s", m));
+ TT_BLATHER(("%s", m));
}
/* creates a temporary file with the data in it */
}
}
- if (testcase->flags & TT_NEED_DNS) {
- evdns_set_log_fn(dnslogcb);
- if (evdns_init())
- return NULL; /* fast failure */ /*XXX asserts. */
- }
+ if (testcase->flags & TT_NEED_DNS) {
+ evdns_set_log_fn(dnslogcb);
+ if (evdns_init())
+ return NULL; /* fast failure */ /*XXX asserts. */
+ }
if (testcase->flags & TT_NO_LOGS)
event_set_log_callback(ignore_log_cb);
event_set_log_callback(NULL);
if (testcase->flags & TT_NEED_SOCKETPAIR) {
- if (data->pair[0] != -1)
- EVUTIL_CLOSESOCKET(data->pair[0]);
- if (data->pair[1] != -1)
- EVUTIL_CLOSESOCKET(data->pair[1]);
- }
+ if (data->pair[0] != -1)
+ EVUTIL_CLOSESOCKET(data->pair[0]);
+ if (data->pair[1] != -1)
+ EVUTIL_CLOSESOCKET(data->pair[1]);
+ }
- if (testcase->flags & TT_NEED_DNS) {
- evdns_shutdown(0);
- }
+ if (testcase->flags & TT_NEED_DNS) {
+ evdns_shutdown(0);
+ }
- if (testcase->flags & TT_NEED_BASE) {
+ if (testcase->flags & TT_NEED_BASE) {
if (data->base)
event_base_free(data->base);
- }
+ }
free(data);
struct basic_test_data *data = ptr;
test_ok = called = 0;
- in_legacy_test_wrapper = 1;
+ in_legacy_test_wrapper = 1;
data->legacy_test_fn(); /* This part actually calls the test */
- in_legacy_test_wrapper = 0;
+ in_legacy_test_wrapper = 0;
if (!test_ok)
tt_abort_msg("Legacy unit test failed");
#ifdef _EVENT_HAVE_OPENSSL
{ "ssl/", ssl_testcases },
#endif
- END_OF_GROUPS
+ END_OF_GROUPS
};
int
evthread_enable_lock_debuging();
#endif
- if (tinytest_main(argc,argv,testgroups))
- return 1;
+ if (tinytest_main(argc,argv,testgroups))
+ return 1;
return 0;
}
regress_threads(void *arg)
{
struct event_base *base;
- (void) arg;
+ (void) arg;
pthread_mutex_init(&count_lock, NULL);
- if (evthread_use_pthreads()<0)
+ if (evthread_use_pthreads()<0)
tt_abort_msg("Couldn't initialize pthreads!");
- base = event_base_new();
- if (evthread_make_base_notifiable(base)<0) {
- tt_abort_msg("Couldn't make base notifiable!");
- }
+ base = event_base_new();
+ if (evthread_make_base_notifiable(base)<0) {
+ tt_abort_msg("Couldn't make base notifiable!");
+ }
pthread_basic(base);
event_base_free(base);
end:
- ;
+ ;
}
if (evhttp_make_request(evcon, req,
EVHTTP_REQ_POST,
"/.rpc.Message") == -1) {
- tt_abort();
+ tt_abort();
}
test_ok = 0;
rpc_teardown(base);
- tt_assert(test_ok == 1);
+ tt_assert(test_ok == 1);
end:
evhttp_free(http);
rpc_setup(&http, &port, &base);
evcon = evhttp_connection_new("127.0.0.1", port);
- tt_assert(evcon);
+ tt_assert(evcon);
/*
* At this point, we want to schedule an HTTP POST request
event_dispatch();
- tt_assert(test_ok == 1);
+ tt_assert(test_ok == 1);
/* we do it twice to make sure that reuse works correctly */
kill_clear(kill);
event_dispatch();
- tt_assert(test_ok == 2);
+ tt_assert(test_ok == 2);
/* we do it trice to make sure other stuff works, too */
kill_clear(kill);
rpc_teardown(base);
- tt_assert(test_ok == 3);
+ tt_assert(test_ok == 3);
end:
- if (msg)
- msg_free(msg);
- if (kill)
- kill_free(kill);
+ if (msg)
+ msg_free(msg);
+ if (kill)
+ kill_free(kill);
- if (pool)
- evrpc_pool_free(pool);
- if (http)
- evhttp_free(http);
+ if (pool)
+ evrpc_pool_free(pool);
+ if (http)
+ evhttp_free(http);
need_input_hook = 0;
need_output_hook = 0;
rpc_teardown(base);
- tt_assert(test_ok == 2);
+ tt_assert(test_ok == 2);
end:
- if (msg)
- msg_free(msg);
- if (kill_one)
- kill_free(kill_one);
- if (kill_two)
- kill_free(kill_two);
-
- if (pool)
- evrpc_pool_free(pool);
- if (http)
- evhttp_free(http);
+ if (msg)
+ msg_free(msg);
+ if (kill_one)
+ kill_free(kill_one);
+ if (kill_two)
+ kill_free(kill_two);
+
+ if (pool)
+ evrpc_pool_free(pool);
+ if (http)
+ evhttp_free(http);
}
static void
event_dispatch();
- tt_int_op(test_ok, ==, 1);
- tt_int_op(hook_pause_cb_called, ==, 4);
+ tt_int_op(test_ok, ==, 1);
+ tt_int_op(hook_pause_cb_called, ==, 4);
end:
- if (base)
- rpc_teardown(base);
-
- if (msg)
- msg_free(msg);
- if (kill)
- kill_free(kill);
-
- if (pool)
- evrpc_pool_free(pool);
- if (http)
- evhttp_free(http);
+ if (base)
+ rpc_teardown(base);
+
+ if (msg)
+ msg_free(msg);
+ if (kill)
+ kill_free(kill);
+
+ if (pool)
+ evrpc_pool_free(pool);
+ if (http)
+ evhttp_free(http);
}
static void
rpc_teardown(base);
- tt_assert(test_ok == 2);
+ tt_assert(test_ok == 2);
end:
- if (msg)
- msg_free(msg);
- if (kill)
- kill_free(kill);
-
- if (pool)
- evrpc_pool_free(pool);
- if (http)
- evhttp_free(http);
+ if (msg)
+ msg_free(msg);
+ if (kill)
+ kill_free(kill);
+
+ if (pool)
+ evrpc_pool_free(pool);
+ if (http)
+ evhttp_free(http);
}
static void
msg2 = msg_new();
if (evtag_unmarshal_msg(tmp, 0xdeaf, msg2) == -1)
- tt_abort_msg("Failed to unmarshal message.");
+ tt_abort_msg("Failed to unmarshal message.");
evutil_gettimeofday(&tv_end, NULL);
evutil_timersub(&tv_end, &tv_start, &tv_end);
- TT_BLATHER(("(%.1f us/add) ",
- (float)tv_end.tv_sec/(float)i * 1000000.0 +
- tv_end.tv_usec / (float)i));
+ TT_BLATHER(("(%.1f us/add) ",
+ (float)tv_end.tv_sec/(float)i * 1000000.0 +
+ tv_end.tv_usec / (float)i));
if (!EVTAG_HAS(msg2, from_name) ||
!EVTAG_HAS(msg2, to_name) ||
!EVTAG_HAS(msg2, attack)) {
- tt_abort_msg("Missing data structures.");
+ tt_abort_msg("Missing data structures.");
}
if (EVTAG_GET(msg2, attack, &attack) == -1) {
}
if (EVTAG_ARRAY_LEN(msg2, run) != i) {
- tt_abort_msg("Wrong number of run messages.");
+ tt_abort_msg("Wrong number of run messages.");
}
/* get the very first run message */
tt_uint_op(short_number, ==, 0xdead0a0b);
}
- tt_int_op(EVTAG_ARRAY_LEN(attack, how_often), ==, 3);
+ tt_int_op(EVTAG_ARRAY_LEN(attack, how_often), ==, 3);
for (i = 0; i < 3; ++i) {
ev_uint32_t res;
}
}
- test_ok = 1;
+ test_ok = 1;
end:
- if (msg)
- msg_free(msg);
- if (msg2)
- msg_free(msg2);
- if (tmp)
- evbuffer_free(tmp);
+ if (msg)
+ msg_free(msg);
+ if (msg2)
+ msg_free(msg2);
+ if (tmp)
+ evbuffer_free(tmp);
}
#define RPC_LEGACY(name) \
{ #name, run_legacy_test_fn, TT_FORK|TT_NEED_BASE|TT_LEGACY, \
- &legacy_setup, \
- rpc_##name }
+ &legacy_setup, \
+ rpc_##name }
struct testcase_t rpc_testcases[] = {
- RPC_LEGACY(basic_test),
- RPC_LEGACY(basic_message),
- RPC_LEGACY(basic_client),
- RPC_LEGACY(basic_queued_client),
- RPC_LEGACY(basic_client_with_pause),
- RPC_LEGACY(client_timeout),
- RPC_LEGACY(test),
-
- END_OF_TESTCASES,
+ RPC_LEGACY(basic_test),
+ RPC_LEGACY(basic_message),
+ RPC_LEGACY(basic_client),
+ RPC_LEGACY(basic_queued_client),
+ RPC_LEGACY(basic_client_with_pause),
+ RPC_LEGACY(client_timeout),
+ RPC_LEGACY(test),
+
+ END_OF_TESTCASES,
};
{ "bufferevent_connect", regress_bufferevent_openssl_connect,
TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
- END_OF_TESTCASES,
+ END_OF_TESTCASES,
};
struct sockaddr_in my_addr;
sock = socket(AF_INET, SOCK_DGRAM, 0);
- if (sock <= 0) {
- tt_abort_perror("socket");
- }
+ if (sock <= 0) {
+ tt_abort_perror("socket");
+ }
- evutil_make_socket_nonblocking(sock);
+ evutil_make_socket_nonblocking(sock);
memset(&my_addr, 0, sizeof(my_addr));
my_addr.sin_family = AF_INET;
void
regress_clean_dnsserver(void)
{
- if (dns_port)
- evdns_close_server_port(dns_port);
- if (dns_sock >= 0)
- EVUTIL_CLOSESOCKET(dns_sock);
+ if (dns_port)
+ evdns_close_server_port(dns_port);
+ if (dns_sock >= 0)
+ EVUTIL_CLOSESOCKET(dns_sock);
}
void
{ "ffff", { 0, 0, 0, 0 }, BAD },
{ "fffff::", { 0, 0, 0, 0 }, BAD },
{ "fffff::", { 0, 0, 0, 0 }, BAD },
- { "::1.0.1.1000", { 0, 0, 0, 0 }, BAD },
+ { "::1.0.1.1000", { 0, 0, 0, 0 }, BAD },
{ "1:2:33333:4::", { 0, 0, 0, 0 }, BAD },
{ "1:2:3:4:5:6:7:8:9", { 0, 0, 0, 0 }, BAD },
{ "1::2::3", { 0, 0, 0, 0 }, BAD },
for (i = 0; sa_port_ents[i].parse; ++i) {
struct sa_port_ent *ent = &sa_port_ents[i];
- int len = sizeof(ss);
+ int len = sizeof(ss);
memset(&ss, 0, sizeof(ss));
r = evutil_parse_sockaddr_port(ent->parse, (struct sockaddr*)&ss, &len);
if (r < 0) {
sin.sin_family = AF_INET;
sin.sin_port = htons(ent->port);
r = evutil_inet_pton(AF_INET, ent->addr, &sin.sin_addr);
- if (1 != r) {
+ if (1 != r) {
TT_FAIL(("Couldn't parse ipv4 target %s.", ent->addr));
} else if (memcmp(&sin, &ss, sizeof(sin))) {
TT_FAIL(("Parse for %s was not as expected.", ent->parse));
} else if (len != sizeof(sin)) {
- TT_FAIL(("Length for %s not as expected.",ent->parse));
- }
+ TT_FAIL(("Length for %s not as expected.",ent->parse));
+ }
} else {
struct sockaddr_in6 sin6;
memset(&sin6, 0, sizeof(sin6));
} else if (memcmp(&sin6, &ss, sizeof(sin6))) {
TT_FAIL(("Parse for %s was not as expected.", ent->parse));
} else if (len != sizeof(sin6)) {
- TT_FAIL(("Length for %s not as expected.",ent->parse));
+ TT_FAIL(("Length for %s not as expected.",ent->parse));
}
}
}
int is_loopback;
} sa_pred_entries[] = {
- { "127.0.0.1", 1 },
- { "127.0.3.2", 1 },
- { "128.1.2.3", 0 },
- { "18.0.0.1", 0 },
+ { "127.0.0.1", 1 },
+ { "127.0.3.2", 1 },
+ { "128.1.2.3", 0 },
+ { "18.0.0.1", 0 },
{ "129.168.1.1", 0 },
- { "::1", 1 },
- { "::0", 0 },
- { "f::1", 0 },
- { "::501", 0 },
- { NULL, 0 },
+ { "::1", 1 },
+ { "::0", 0 },
+ { "f::1", 0 },
+ { "::501", 0 },
+ { NULL, 0 },
};
for (i=0; sa_pred_entries[i].parse; ++i) {
struct sa_pred_ent *ent = &sa_pred_entries[i];
- int len = sizeof(ss);
+ int len = sizeof(ss);
r = evutil_parse_sockaddr_port(ent->parse, (struct sockaddr*)&ss, &len);
static void
test_evutil_strtoll(void *ptr)
{
- const char *s;
- char *endptr;
+ const char *s;
+ char *endptr;
- tt_want(evutil_strtoll("5000000000", NULL, 10) ==
+ tt_want(evutil_strtoll("5000000000", NULL, 10) ==
((ev_int64_t)5000000)*1000);
- tt_want(evutil_strtoll("-5000000000", NULL, 10) ==
+ tt_want(evutil_strtoll("-5000000000", NULL, 10) ==
((ev_int64_t)5000000)*-1000);
s = " 99999stuff";
tt_want(evutil_strtoll(s, &endptr, 10) == (ev_int64_t)99999);
char buf[16];
int r;
r = evutil_snprintf(buf, sizeof(buf), "%d %d", 50, 100);
- tt_str_op(buf, ==, "50 100");
- tt_int_op(r, ==, 6);
+ tt_str_op(buf, ==, "50 100");
+ tt_int_op(r, ==, 6);
r = evutil_snprintf(buf, sizeof(buf), "longish %d", 1234567890);
- tt_str_op(buf, ==, "longish 1234567");
- tt_int_op(r, ==, 18);
+ tt_str_op(buf, ==, "longish 1234567");
+ tt_int_op(r, ==, 18);
end:
;
event_debug(("A millisecond passed! We should log that!"));
#ifdef USE_DEBUG
- LOGEQ(_EVENT_LOG_DEBUG, "A millisecond passed! We should log that!");
+ LOGEQ(_EVENT_LOG_DEBUG, "A millisecond passed! We should log that!");
#else
tt_int_op(logsev,==,0);
tt_ptr_op(logmsg,==,NULL);
int socktype, int protocol, int line)
{
struct sockaddr_storage ss;
- int slen = sizeof(ss);
+ int slen = sizeof(ss);
int gotport;
char buf[128];
memset(&ss, 0, sizeof(ss));
} while (evbuffer_get_length(src) > 0);
- ++infilter_calls;
+ ++infilter_calls;
return (BEV_OK);
}
if (evbuffer_get_length(evbuf) == 8333) {
++readcb_finished;
- }
+ }
evbuffer_free(evbuf);
}
{
if (evbuffer_get_length(bufferevent_get_output(bev)) == 0) {
++writecb_finished;
- }
+ }
}
static void
char buffer[8333];
z_stream z_input, z_output;
int i, pair[2]={-1,-1}, r;
- (void)arg;
+ (void)arg;
infilter_calls = outfilter_calls = readcb_finished = writecb_finished
- = errorcb_invoked = 0;
+ = errorcb_invoked = 0;
if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) {
tt_abort_perror("socketpair");
event_dispatch();
- tt_want(infilter_calls);
- tt_want(outfilter_calls);
- tt_want(readcb_finished);
- tt_want(writecb_finished);
- tt_want(!errorcb_invoked);
+ tt_want(infilter_calls);
+ tt_want(outfilter_calls);
+ tt_want(readcb_finished);
+ tt_want(writecb_finished);
+ tt_want(!errorcb_invoked);
- test_ok = 1;
+ test_ok = 1;
end:
- if (bev1)
- bufferevent_free(bev1);
- if (bev2)
+ if (bev1)
+ bufferevent_free(bev1);
+ if (bev2)
bufferevent_free(bev2);
if (pair[0] >= 0)
" -d INT: Duration of the test in seconds (default: 5 sec)\n");
fprintf(stderr,
" -c INT: Connection-rate limit applied to each connection in bytes per second\n"
-" (default: None.)\n"
+" (default: None.)\n"
" -g INT: Group-rate limit applied to sum of all usage in bytes per second\n"
-" (default: None.)\n"
+" (default: None.)\n"
" -t INT: Granularity of timing, in milliseconds (default: 1000 msec)\n");
}
int outcome;
if (testcase->setup) {
env = testcase->setup->setup_fn(testcase);
- if (!env)
+ if (!env)
return FAIL;
else if (env == (void*)TT_SKIP)
return SKIP;
#else
int outcome_pipe[2];
pid_t pid;
- (void)group;
+ (void)group;
if (pipe(outcome_pipe))
perror("opening pipe");
test_r = _testcase_run_bare(testcase);
assert(0<=(int)test_r && (int)test_r<=2);
b[0] = "NYS"[test_r];
- write_r = write(outcome_pipe[1], b, 1);
+ write_r = write(outcome_pipe[1], b, 1);
if (write_r != 1) {
perror("write outcome to pipe");
exit(1);
/* Every group has a 'prefix', and an array of tests. That's it. */
{ "demo/", demo_tests },
- END_OF_GROUPS
+ END_OF_GROUPS
};
#define tt_fail() TT_FAIL(("%s", "(Failed.)"))
/* End the current test, and indicate we are skipping it. */
-#define tt_skip() \
+#define tt_skip() \
TT_STMT_BEGIN \
_tinytest_set_test_skipped(); \
TT_EXIT_TEST_FUNCTION; \
* when you care about ASCII's notion of character types, because you are about
* to send those types onto the wire.
*/
-#define DECLARE_CTYPE_FN(name) \
- static int EVUTIL_##name(char c); \
- extern const ev_uint32_t EVUTIL_##name##_TABLE[]; \
- static inline int EVUTIL_##name(char c) { \
- ev_uint8_t u = c; \
- return !!(EVUTIL_##name##_TABLE[(u >> 5) & 7] & (1 << (u & 31))); \
- }
+#define DECLARE_CTYPE_FN(name) \
+ static int EVUTIL_##name(char c); \
+ extern const ev_uint32_t EVUTIL_##name##_TABLE[]; \
+ static inline int EVUTIL_##name(char c) { \
+ ev_uint8_t u = c; \
+ return !!(EVUTIL_##name##_TABLE[(u >> 5) & 7] & (1 << (u & 31))); \
+ }
DECLARE_CTYPE_FN(ISALPHA)
DECLARE_CTYPE_FN(ISALNUM)
DECLARE_CTYPE_FN(ISSPACE)
our half-baked C OO. Example:
struct subtype {
- int x;
- struct supertype common;
- int y;
+ int x;
+ struct supertype common;
+ int y;
};
...
void fn(struct supertype *super) {
- struct subtype *sub = EVUTIL_UPCAST(super, struct subtype, common);
- ...
+ struct subtype *sub = EVUTIL_UPCAST(super, struct subtype, common);
+ ...
}
*/
#define EVUTIL_UPCAST(ptr, type, field) \
return (winop);
err:
- XFREE(winop->readset_in);
- XFREE(winop->writeset_in);
- XFREE(winop->readset_out);
- XFREE(winop->writeset_out);
- XFREE(winop->exset_out);
- XFREE(winop);
- return (NULL);
+ XFREE(winop->readset_in);
+ XFREE(winop->writeset_in);
+ XFREE(winop->readset_out);
+ XFREE(winop->writeset_out);
+ XFREE(winop->exset_out);
+ XFREE(winop);
+ return (NULL);
}
int
fd_set_copy(win32op->writeset_out, win32op->writeset_in);
fd_count =
- (win32op->readset_out->fd_count > win32op->writeset_out->fd_count) ?
+ (win32op->readset_out->fd_count > win32op->writeset_out->fd_count) ?
win32op->readset_out->fd_count : win32op->writeset_out->fd_count;
if (!fd_count) {