-#if !defined(_EVENT_EVCONFIG__PRIVATE_H) && !defined(__MINGW32__)
-#define _EVENT_EVCONFIG__PRIVATE_H
+#if !defined(EVENT_EVCONFIG__PRIVATE_H_) && !defined(__MINGW32__)
+#define EVENT_EVCONFIG__PRIVATE_H_
/* Nothing to see here. Move along. */
*
* Do not rely on macros in this file existing in later versions.
*/
-#ifndef _EVENT_CONFIG_H_
-#define _EVENT_CONFIG_H_
+#ifndef EVENT_CONFIG_H__
+#define EVENT_CONFIG_H__
/* config.h. Generated by configure. */
/* config.h.in. Generated from configure.in by autoheader. */
/* Define is no secure id variant is available */
/* #define _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID 1 */
-#define _EVENT_DNS_USE_FTIME_FOR_ID 1
+#define EVENT_DNS_USE_FTIME_FOR_ID_ 1
/* Define to 1 if you have the <arpa/inet.h> header file. */
/* #undef EVENT__HAVE_ARPA_INET_H */
arc4random_stir(void)
{
int val;
- _ARC4_LOCK();
+ ARC4_LOCK_();
val = arc4_stir();
- _ARC4_UNLOCK();
+ ARC4_UNLOCK_();
return val;
}
#endif
arc4random_addrandom(const unsigned char *dat, int datlen)
{
int j;
- _ARC4_LOCK();
+ ARC4_LOCK_();
if (!rs_initialized)
arc4_stir();
for (j = 0; j < datlen; j += 256) {
* crazy like passing us all the files in /var/log. */
arc4_addrandom(dat + j, datlen - j);
}
- _ARC4_UNLOCK();
+ ARC4_UNLOCK_();
}
#endif
arc4random(void)
{
ARC4RANDOM_UINT32 val;
- _ARC4_LOCK();
+ ARC4_LOCK_();
arc4_count -= 4;
arc4_stir_if_needed();
val = arc4_getword();
- _ARC4_UNLOCK();
+ ARC4_UNLOCK_();
return val;
}
#endif
arc4random_buf(void *_buf, size_t n)
{
unsigned char *buf = _buf;
- _ARC4_LOCK();
+ ARC4_LOCK_();
arc4_stir_if_needed();
while (n--) {
if (--arc4_count <= 0)
arc4_stir();
buf[n] = arc4_getbyte();
}
- _ARC4_UNLOCK();
+ ARC4_UNLOCK_();
}
#ifndef ARC4RANDOM_NOUNIFORM
/* evbuffer_ptr support */
#define PTR_NOT_FOUND(ptr) do { \
(ptr)->pos = -1; \
- (ptr)->_internal.chain = NULL; \
- (ptr)->_internal.pos_in_chain = 0; \
+ (ptr)->internal_.chain = NULL; \
+ (ptr)->internal_.pos_in_chain = 0; \
} while (0)
static void evbuffer_chain_align(struct evbuffer_chain *chain);
EVUTIL_ASSERT(info->parent != NULL);
EVBUFFER_LOCK(info->source);
evbuffer_chain_free(info->parent);
- _evbuffer_decref_and_unlock(info->source);
+ evbuffer_decref_and_unlock_(info->source);
}
mm_free(chain);
}
void
-_evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag)
+evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag)
{
EVUTIL_ASSERT((chain->flags & flag) == 0);
chain->flags |= flag;
}
void
-_evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag)
+evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag)
{
EVUTIL_ASSERT((chain->flags & flag) != 0);
chain->flags &= ~flag;
}
void
-_evbuffer_incref(struct evbuffer *buf)
+evbuffer_incref_(struct evbuffer *buf)
{
EVBUFFER_LOCK(buf);
++buf->refcnt;
}
void
-_evbuffer_incref_and_lock(struct evbuffer *buf)
+evbuffer_incref_and_lock_(struct evbuffer *buf)
{
EVBUFFER_LOCK(buf);
++buf->refcnt;
if (buffer->deferred_cbs) {
if (buffer->deferred.queued)
return;
- _evbuffer_incref_and_lock(buffer);
+ evbuffer_incref_and_lock_(buffer);
if (buffer->parent)
bufferevent_incref(buffer->parent);
EVBUFFER_UNLOCK(buffer);
EVBUFFER_LOCK(buffer);
parent = buffer->parent;
evbuffer_run_callbacks(buffer, 1);
- _evbuffer_decref_and_unlock(buffer);
+ evbuffer_decref_and_unlock_(buffer);
if (parent)
bufferevent_decref(parent);
}
}
void
-_evbuffer_decref_and_unlock(struct evbuffer *buffer)
+evbuffer_decref_and_unlock_(struct evbuffer *buffer)
{
struct evbuffer_chain *chain, *next;
ASSERT_EVBUFFER_LOCKED(buffer);
evbuffer_free(struct evbuffer *buffer)
{
EVBUFFER_LOCK(buffer);
- _evbuffer_decref_and_unlock(buffer);
+ evbuffer_decref_and_unlock_(buffer);
}
void
to_alloc += vec[n].iov_len;
}
- if (_evbuffer_expand_fast(buf, to_alloc, 2) < 0) {
+ if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) {
goto done;
}
for (n = 0; n < n_vec; n++) {
/* XXX each 'add' call here does a bunch of setup that's
- * obviated by _evbuffer_expand_fast, and some cleanup that we
+ * obviated by evbuffer_expand_fast_, and some cleanup that we
* would like to do only once. Instead we should just extract
* the part of the code that's needed. */
EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
n = 1;
} else {
- if (_evbuffer_expand_fast(buf, size, n_vecs)<0)
+ if (evbuffer_expand_fast_(buf, size, n_vecs)<0)
goto done;
- n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs,
+ n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs,
&chainp, 0);
}
/* reference evbuffer containing source chain so it
* doesn't get released while the chain is still
* being referenced to */
- _evbuffer_incref(src);
+ evbuffer_incref_(src);
extra->source = src;
/* reference source chain which now becomes immutable */
evbuffer_chain_incref(chain);
EVBUFFER_LOCK(buf);
if (pos) {
- chain = pos->_internal.chain;
- pos_in_chain = pos->_internal.pos_in_chain;
+ chain = pos->internal_.chain;
+ pos_in_chain = pos->internal_.pos_in_chain;
if (datlen + pos->pos > buf->total_len)
datlen = buf->total_len - pos->pos;
} else {
static inline ev_ssize_t
evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
{
- struct evbuffer_chain *chain = it->_internal.chain;
- size_t i = it->_internal.pos_in_chain;
+ struct evbuffer_chain *chain = it->internal_.chain;
+ size_t i = it->internal_.pos_in_chain;
while (chain != NULL) {
char *buffer = (char *)chain->buffer + chain->misalign;
char *cp = memchr(buffer+i, chr, chain->off-i);
if (cp) {
- it->_internal.chain = chain;
- it->_internal.pos_in_chain = cp - buffer;
+ it->internal_.chain = chain;
+ it->internal_.pos_in_chain = cp - buffer;
it->pos += (cp - buffer - i);
return it->pos;
}
static ev_ssize_t
evbuffer_find_eol_char(struct evbuffer_ptr *it)
{
- struct evbuffer_chain *chain = it->_internal.chain;
- size_t i = it->_internal.pos_in_chain;
+ struct evbuffer_chain *chain = it->internal_.chain;
+ size_t i = it->internal_.pos_in_chain;
while (chain != NULL) {
char *buffer = (char *)chain->buffer + chain->misalign;
char *cp = find_eol_char(buffer+i, chain->off-i);
if (cp) {
- it->_internal.chain = chain;
- it->_internal.pos_in_chain = cp - buffer;
+ it->internal_.chain = chain;
+ it->internal_.pos_in_chain = cp - buffer;
it->pos += (cp - buffer) - i;
return it->pos;
}
struct evbuffer_ptr *ptr, const char *chrset)
{
int count = 0;
- struct evbuffer_chain *chain = ptr->_internal.chain;
- size_t i = ptr->_internal.pos_in_chain;
+ struct evbuffer_chain *chain = ptr->internal_.chain;
+ size_t i = ptr->internal_.pos_in_chain;
if (!chain)
return 0;
if (buffer[i] == *p++)
goto next;
}
- ptr->_internal.chain = chain;
- ptr->_internal.pos_in_chain = i;
+ ptr->internal_.chain = chain;
+ ptr->internal_.pos_in_chain = i;
ptr->pos += count;
return count;
next:
i = 0;
if (! chain->next) {
- ptr->_internal.chain = chain;
- ptr->_internal.pos_in_chain = i;
+ ptr->internal_.chain = chain;
+ ptr->internal_.pos_in_chain = i;
ptr->pos += count;
return count;
}
static inline int
evbuffer_getchr(struct evbuffer_ptr *it)
{
- struct evbuffer_chain *chain = it->_internal.chain;
- size_t off = it->_internal.pos_in_chain;
+ struct evbuffer_chain *chain = it->internal_.chain;
+ size_t off = it->internal_.pos_in_chain;
if (chain == NULL)
return -1;
int ok = 0;
/* Avoid locking in trivial edge cases */
- if (start && start->_internal.chain == NULL) {
+ if (start && start->internal_.chain == NULL) {
PTR_NOT_FOUND(&it);
if (eol_len_out)
*eol_len_out = extra_drain;
memcpy(&it, start, sizeof(it));
} else {
it.pos = 0;
- it._internal.chain = buffer->first;
- it._internal.pos_in_chain = 0;
+ it.internal_.chain = buffer->first;
+ it.internal_.pos_in_chain = 0;
}
/* the eol_style determines our first stop character and how many
/* Make sure that datlen bytes are available for writing in the last n
* chains. Never copies or moves data. */
int
-_evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n)
+evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n)
{
struct evbuffer_chain *chain = buf->last, *tmp, *next;
size_t avail;
@return The number of buffers we're using.
*/
int
-_evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
+evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,
struct evbuffer_iovec *vecs, int n_vecs_avail,
struct evbuffer_chain ***chainp, int exact)
{
#ifdef USE_IOVEC_IMPL
/* Since we can use iovecs, we're willing to use the last
* NUM_READ_IOVEC chains. */
- if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) {
+ if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) {
result = -1;
goto done;
} else {
IOV_TYPE vecs[NUM_READ_IOVEC];
-#ifdef _EVBUFFER_IOVEC_IS_NATIVE
- nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs,
+#ifdef EVBUFFER_IOVEC_IS_NATIVE_
+ nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs,
NUM_READ_IOVEC, &chainp, 1);
#else
/* We aren't using the native struct iovec. Therefore,
we are on win32. */
struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
- nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2,
+ nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2,
&chainp, 1);
for (i=0; i < nvecs; ++i)
{
if (howfar > (size_t)pos->pos)
return -1;
- if (pos->_internal.chain && howfar <= pos->_internal.pos_in_chain) {
- pos->_internal.pos_in_chain -= howfar;
+ if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) {
+ pos->internal_.pos_in_chain -= howfar;
pos->pos -= howfar;
return 0;
} else {
case EVBUFFER_PTR_ADD:
/* this avoids iterating over all previous chains if
we just want to advance the position */
- chain = pos->_internal.chain;
+ chain = pos->internal_.chain;
pos->pos += position;
- position = pos->_internal.pos_in_chain;
+ position = pos->internal_.pos_in_chain;
break;
}
position = 0;
}
if (chain) {
- pos->_internal.chain = chain;
- pos->_internal.pos_in_chain = position + left;
+ pos->internal_.chain = chain;
+ pos->internal_.pos_in_chain = position + left;
} else if (left == 0) {
/* The first byte in the (nonexistent) chain after the last chain */
- pos->_internal.chain = NULL;
- pos->_internal.pos_in_chain = 0;
+ pos->internal_.chain = NULL;
+ pos->internal_.pos_in_chain = 0;
} else {
PTR_NOT_FOUND(pos);
result = -1;
if (pos->pos + len > buf->total_len)
return -1;
- chain = pos->_internal.chain;
- position = pos->_internal.pos_in_chain;
+ chain = pos->internal_.chain;
+ position = pos->internal_.pos_in_chain;
while (len && chain) {
size_t n_comparable;
if (len + position > chain->off)
if (start) {
memcpy(&pos, start, sizeof(pos));
- chain = pos._internal.chain;
+ chain = pos.internal_.chain;
} else {
pos.pos = 0;
- chain = pos._internal.chain = buffer->first;
- pos._internal.pos_in_chain = 0;
+ chain = pos.internal_.chain = buffer->first;
+ pos.internal_.pos_in_chain = 0;
}
if (end)
- last_chain = end->_internal.chain;
+ last_chain = end->internal_.chain;
if (!len || len > EV_SSIZE_MAX)
goto done;
while (chain) {
const unsigned char *start_at =
chain->buffer + chain->misalign +
- pos._internal.pos_in_chain;
+ pos.internal_.pos_in_chain;
p = memchr(start_at, first,
- chain->off - pos._internal.pos_in_chain);
+ chain->off - pos.internal_.pos_in_chain);
if (p) {
pos.pos += p - start_at;
- pos._internal.pos_in_chain += p - start_at;
+ pos.internal_.pos_in_chain += p - start_at;
if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
if (end && pos.pos + (ev_ssize_t)len > end->pos)
goto not_found;
goto done;
}
++pos.pos;
- ++pos._internal.pos_in_chain;
- if (pos._internal.pos_in_chain == chain->off) {
- chain = pos._internal.chain = chain->next;
- pos._internal.pos_in_chain = 0;
+ ++pos.internal_.pos_in_chain;
+ if (pos.internal_.pos_in_chain == chain->off) {
+ chain = pos.internal_.chain = chain->next;
+ pos.internal_.pos_in_chain = 0;
}
} else {
if (chain == last_chain)
goto not_found;
- pos.pos += chain->off - pos._internal.pos_in_chain;
- chain = pos._internal.chain = chain->next;
- pos._internal.pos_in_chain = 0;
+ pos.pos += chain->off - pos.internal_.pos_in_chain;
+ chain = pos.internal_.chain = chain->next;
+ pos.internal_.pos_in_chain = 0;
}
}
ev_ssize_t len_so_far = 0;
/* Avoid locking in trivial edge cases */
- if (start_at && start_at->_internal.chain == NULL)
+ if (start_at && start_at->internal_.chain == NULL)
return 0;
EVBUFFER_LOCK(buffer);
if (start_at) {
- chain = start_at->_internal.chain;
+ chain = start_at->internal_.chain;
len_so_far = chain->off
- - start_at->_internal.pos_in_chain;
+ - start_at->internal_.pos_in_chain;
idx = 1;
if (n_vec > 0) {
vec[0].iov_base = chain->buffer + chain->misalign
- + start_at->_internal.pos_in_chain;
+ + start_at->internal_.pos_in_chain;
vec[0].iov_len = len_so_far;
}
chain = chain->next;
for (i = 0; i < eo->n_buffers; ++i) {
EVUTIL_ASSERT(chain);
next = chain->next;
- _evbuffer_chain_unpin(chain, flag);
+ evbuffer_chain_unpin_(chain, flag);
chain = next;
}
}
evbuffer_invoke_callbacks(evbuf);
- _evbuffer_decref_and_unlock(evbuf);
+ evbuffer_decref_and_unlock_(evbuf);
}
void
evbuffer_drain(evbuf, nBytes);
pin_release(buf,EVBUFFER_MEM_PINNED_W);
buf->write_in_progress = 0;
- _evbuffer_decref_and_unlock(evbuf);
+ evbuffer_decref_and_unlock_(evbuf);
}
struct evbuffer *
for (i=0; i < MAX_WSABUFS && chain; ++i, chain=chain->next) {
WSABUF *b = &buf_o->buffers[i];
b->buf = (char*)( chain->buffer + chain->misalign );
- _evbuffer_chain_pin(chain, EVBUFFER_MEM_PINNED_W);
+ evbuffer_chain_pin_(chain, EVBUFFER_MEM_PINNED_W);
if ((size_t)at_most > chain->off) {
/* XXXX Cast is safe for now, since win32 has no
}
buf_o->n_buffers = i;
- _evbuffer_incref(buf);
+ evbuffer_incref_(buf);
if (WSASend(buf_o->fd, buf_o->buffers, i, &bytesSent, 0,
&ol->overlapped, NULL)) {
int error = WSAGetLastError();
buf_o->n_buffers = 0;
memset(buf_o->buffers, 0, sizeof(buf_o->buffers));
- if (_evbuffer_expand_fast(buf, at_most, MAX_WSABUFS) == -1)
+ if (evbuffer_expand_fast_(buf, at_most, MAX_WSABUFS) == -1)
goto done;
evbuffer_freeze(buf, 0);
- nvecs = _evbuffer_read_setup_vecs(buf, at_most,
+ nvecs = evbuffer_read_setup_vecs_(buf, at_most,
vecs, MAX_WSABUFS, &chainp, 1);
for (i=0;i<nvecs;++i) {
WSABUF_FROM_EVBUFFER_IOV(
npin=0;
for ( ; chain; chain = chain->next) {
- _evbuffer_chain_pin(chain, EVBUFFER_MEM_PINNED_R);
+ evbuffer_chain_pin_(chain, EVBUFFER_MEM_PINNED_R);
++npin;
}
EVUTIL_ASSERT(npin == nvecs);
- _evbuffer_incref(buf);
+ evbuffer_incref_(buf);
if (WSARecv(buf_o->fd, buf_o->buffers, nvecs, &bytesRead, &flags,
&ol->overlapped, NULL)) {
int error = WSAGetLastError();
}
evutil_socket_t
-_evbuffer_overlapped_get_fd(struct evbuffer *buf)
+evbuffer_overlapped_get_fd_(struct evbuffer *buf)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
return buf_o ? buf_o->fd : -1;
}
void
-_evbuffer_overlapped_set_fd(struct evbuffer *buf, evutil_socket_t fd)
+evbuffer_overlapped_set_fd_(struct evbuffer *buf, evutil_socket_t fd)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
EVBUFFER_LOCK(buf);
void bufferevent_incref(struct bufferevent *bufev);
/** Internal: Lock bufev and increase its reference count.
* unlocking it otherwise. */
-void _bufferevent_incref_and_lock(struct bufferevent *bufev);
+void bufferevent_incref_and_lock_(struct bufferevent *bufev);
/** Internal: Decrement the reference count on bufev. Returns 1 if it freed
* the bufferevent.*/
int bufferevent_decref(struct bufferevent *bufev);
/** Internal: Drop the reference count on bufev, freeing as necessary, and
* unlocking it otherwise. Returns 1 if it freed the bufferevent. */
-int _bufferevent_decref_and_unlock(struct bufferevent *bufev);
+int bufferevent_decref_and_unlock_(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have a read callback, schedule
* a readcb. Otherwise just run the readcb. */
-void _bufferevent_run_readcb(struct bufferevent *bufev);
+void bufferevent_run_readcb_(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have a write callback, schedule
* a writecb. Otherwise just run the writecb. */
-void _bufferevent_run_writecb(struct bufferevent *bufev);
+void bufferevent_run_writecb_(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have an eventcb, schedule
* it to run with events "what". Otherwise just run the eventcb. */
-void _bufferevent_run_eventcb(struct bufferevent *bufev, short what);
+void bufferevent_run_eventcb_(struct bufferevent *bufev, short what);
/** Internal: Add the event 'ev' with timeout tv, unless tv is set to 0, in
* which case add ev with no timeout. */
-int _bufferevent_add_event(struct event *ev, const struct timeval *tv);
+int bufferevent_add_event_(struct event *ev, const struct timeval *tv);
/* =========
* These next functions implement timeouts for bufferevents that aren't doing
/** Internal use: Set up the ev_read and ev_write callbacks so that
* the other "generic_timeout" functions will work on it. Call this from
* the constructor function. */
-void _bufferevent_init_generic_timeout_cbs(struct bufferevent *bev);
+void bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev);
/** Internal use: Delete the ev_read and ev_write callbacks if they're pending.
* Call this from the destructor function. */
-int _bufferevent_del_generic_timeout_cbs(struct bufferevent *bev);
+int bufferevent_del_generic_timeout_cbs_(struct bufferevent *bev);
/** Internal use: Add or delete the generic timeout events as appropriate.
* (If an event is enabled and a timeout is set, we add the event. Otherwise
* we delete it.) Call this from anything that changes the timeout values,
* that enabled EV_READ or EV_WRITE, or that disables EV_READ or EV_WRITE. */
-int _bufferevent_generic_adj_timeouts(struct bufferevent *bev);
+int bufferevent_generic_adj_timeouts_(struct bufferevent *bev);
/** Internal use: We have just successfully read data into an inbuf, so
* reset the read timeout (if any). */
#define BEV_UPCAST(b) EVUTIL_UPCAST((b), struct bufferevent_private, bev)
#ifdef EVENT__DISABLE_THREAD_SUPPORT
-#define BEV_LOCK(b) _EVUTIL_NIL_STMT
-#define BEV_UNLOCK(b) _EVUTIL_NIL_STMT
+#define BEV_LOCK(b) EVUTIL_NIL_STMT_
+#define BEV_UNLOCK(b) EVUTIL_NIL_STMT_
#else
/** Internal: Grab the lock (if any) on a bufferevent */
#define BEV_LOCK(b) do { \
/* ==== For rate-limiting. */
-int _bufferevent_decrement_write_buckets(struct bufferevent_private *bev,
+int bufferevent_decrement_write_buckets_(struct bufferevent_private *bev,
ev_ssize_t bytes);
-int _bufferevent_decrement_read_buckets(struct bufferevent_private *bev,
+int bufferevent_decrement_read_buckets_(struct bufferevent_private *bev,
ev_ssize_t bytes);
-ev_ssize_t _bufferevent_get_read_max(struct bufferevent_private *bev);
-ev_ssize_t _bufferevent_get_write_max(struct bufferevent_private *bev);
+ev_ssize_t bufferevent_get_read_max_(struct bufferevent_private *bev);
+ev_ssize_t bufferevent_get_write_max_(struct bufferevent_private *bev);
-int _bufferevent_ratelim_init(struct bufferevent_private *bev);
+int bufferevent_ratelim_init_(struct bufferevent_private *bev);
#ifdef __cplusplus
}
#include "evbuffer-internal.h"
#include "util-internal.h"
-static void _bufferevent_cancel_all(struct bufferevent *bev);
+static void bufferevent_cancel_all_(struct bufferevent *bev);
void
EVUTIL_SET_SOCKET_ERROR(err);
bufev->errorcb(bufev, what, bufev->cbarg);
}
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
}
static void
EVUTIL_SET_SOCKET_ERROR(err);
UNLOCKED(errorcb(bufev,what,cbarg));
}
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
#undef UNLOCKED
}
void
-_bufferevent_run_readcb(struct bufferevent *bufev)
+bufferevent_run_readcb_(struct bufferevent *bufev)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
}
void
-_bufferevent_run_writecb(struct bufferevent *bufev)
+bufferevent_run_writecb_(struct bufferevent *bufev)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
}
void
-_bufferevent_run_eventcb(struct bufferevent *bufev, short what)
+bufferevent_run_eventcb_(struct bufferevent *bufev, short what)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
bufev->be_ops = ops;
- _bufferevent_ratelim_init(bufev_private);
+ bufferevent_ratelim_init_(bufev_private);
/*
* Set to EV_WRITE so that using bufferevent_write is going to
short impl_events = event;
int r = 0;
- _bufferevent_incref_and_lock(bufev);
+ bufferevent_incref_and_lock_(bufev);
if (bufev_private->read_suspended)
impl_events &= ~EV_READ;
if (bufev_private->write_suspended)
if (impl_events && bufev->be_ops->enable(bufev, impl_events) < 0)
r = -1;
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
return r;
}
}
void
-_bufferevent_incref_and_lock(struct bufferevent *bufev)
+bufferevent_incref_and_lock_(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_private =
BEV_UPCAST(bufev);
#endif
int
-_bufferevent_decref_and_unlock(struct bufferevent *bufev)
+bufferevent_decref_and_unlock_(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
bufferevent_decref(struct bufferevent *bufev)
{
BEV_LOCK(bufev);
- return _bufferevent_decref_and_unlock(bufev);
+ return bufferevent_decref_and_unlock_(bufev);
}
void
{
BEV_LOCK(bufev);
bufferevent_setcb(bufev, NULL, NULL, NULL, NULL);
- _bufferevent_cancel_all(bufev);
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_cancel_all_(bufev);
+ bufferevent_decref_and_unlock_(bufev);
}
void
}
static void
-_bufferevent_cancel_all(struct bufferevent *bev)
+bufferevent_cancel_all_(struct bufferevent *bev)
{
union bufferevent_ctrl_data d;
memset(&d, 0, sizeof(d));
bufferevent_generic_read_timeout_cb(evutil_socket_t fd, short event, void *ctx)
{
struct bufferevent *bev = ctx;
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
bufferevent_disable(bev, EV_READ);
- _bufferevent_run_eventcb(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING);
+ bufferevent_decref_and_unlock_(bev);
}
static void
bufferevent_generic_write_timeout_cb(evutil_socket_t fd, short event, void *ctx)
{
struct bufferevent *bev = ctx;
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
bufferevent_disable(bev, EV_WRITE);
- _bufferevent_run_eventcb(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING);
+ bufferevent_decref_and_unlock_(bev);
}
void
-_bufferevent_init_generic_timeout_cbs(struct bufferevent *bev)
+bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev)
{
evtimer_assign(&bev->ev_read, bev->ev_base,
bufferevent_generic_read_timeout_cb, bev);
}
int
-_bufferevent_del_generic_timeout_cbs(struct bufferevent *bev)
+bufferevent_del_generic_timeout_cbs_(struct bufferevent *bev)
{
int r1,r2;
r1 = event_del(&bev->ev_read);
}
int
-_bufferevent_generic_adj_timeouts(struct bufferevent *bev)
+bufferevent_generic_adj_timeouts_(struct bufferevent *bev)
{
const short enabled = bev->enabled;
struct bufferevent_private *bev_p =
}
int
-_bufferevent_add_event(struct event *ev, const struct timeval *tv)
+bufferevent_add_event_(struct event *ev, const struct timeval *tv)
{
if (tv->tv_sec == 0 && tv->tv_usec == 0)
return event_add(ev, NULL);
}
/* For use by user programs only; internally, we should be calling
- either _bufferevent_incref_and_lock(), or BEV_LOCK. */
+ either bufferevent_incref_and_lock_(), or BEV_LOCK. */
void
bufferevent_lock(struct bufferevent *bev)
{
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
}
void
bufferevent_unlock(struct bufferevent *bev)
{
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
be_async_enable,
be_async_disable,
be_async_destruct,
- _bufferevent_generic_adj_timeouts,
+ bufferevent_generic_adj_timeouts_,
be_async_flush,
be_async_ctrl,
};
/* This is safe so long as bufferevent_get_write_max never returns
* more than INT_MAX. That's true for now. XXXX */
- limit = (int)_bufferevent_get_write_max(&beva->bev);
+ limit = (int)bufferevent_get_write_max_(&beva->bev);
if (at_most >= (size_t)limit && limit >= 0)
at_most = limit;
&beva->write_overlapped)) {
bufferevent_decref(bev);
beva->ok = 0;
- _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR);
} else {
beva->write_in_progress = at_most;
- _bufferevent_decrement_write_buckets(&beva->bev, at_most);
+ bufferevent_decrement_write_buckets_(&beva->bev, at_most);
bev_async_add_write(beva);
}
}
}
/* XXXX This over-commits. */
- /* XXXX see also not above on cast on _bufferevent_get_write_max() */
- limit = (int)_bufferevent_get_read_max(&beva->bev);
+ /* XXXX see also not above on cast on bufferevent_get_write_max_() */
+ limit = (int)bufferevent_get_read_max_(&beva->bev);
if (at_most >= (size_t)limit && limit >= 0)
at_most = limit;
bufferevent_incref(bev);
if (evbuffer_launch_read(bev->input, at_most, &beva->read_overlapped)) {
beva->ok = 0;
- _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR);
bufferevent_decref(bev);
} else {
beva->read_in_progress = at_most;
- _bufferevent_decrement_read_buckets(&beva->bev, at_most);
+ bufferevent_decrement_read_buckets_(&beva->bev, at_most);
bev_async_add_read(beva);
}
/* If we added data to the outbuf and were not writing before,
* we may want to write now. */
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
if (cbinfo->n_added)
bev_async_consider_writing(bev_async);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
static void
/* If we drained data from the inbuf and were not reading before,
* we may want to read now */
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
if (cbinfo->n_deleted)
bev_async_consider_reading(bev_async);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
static int
bev_async_del_read(bev_async);
bev_async_del_write(bev_async);
- fd = _evbuffer_overlapped_get_fd(bev->input);
+ fd = evbuffer_overlapped_get_fd_(bev->input);
if (bev_p->options & BEV_OPT_CLOSE_ON_FREE) {
/* XXXX possible double-close */
evutil_closesocket(fd);
/* delete this in case non-blocking connect was used */
if (event_initialized(&bev->ev_write)) {
event_del(&bev->ev_write);
- _bufferevent_del_generic_timeout_cbs(bev);
+ bufferevent_del_generic_timeout_cbs_(bev);
}
}
DWORD bytes, flags;
evutil_socket_t fd;
- fd = _evbuffer_overlapped_get_fd(bev->input);
+ fd = evbuffer_overlapped_get_fd_(bev->input);
WSAGetOverlappedResult(fd, &eo->overlapped, &bytes, FALSE, &flags);
}
EVUTIL_ASSERT(bev_a->bev.connecting);
bev_a->bev.connecting = 0;
- sock = _evbuffer_overlapped_get_fd(bev_a->bev.bev.input);
+ sock = evbuffer_overlapped_get_fd_(bev_a->bev.bev.input);
/* XXXX Handle error? */
setsockopt(sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0);
else
bev_async_set_wsa_error(bev, eo);
- _bufferevent_run_eventcb(bev,
+ bufferevent_run_eventcb_(bev,
ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR);
event_base_del_virtual(bev->ev_base);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
static void
evbuffer_commit_read(bev->input, nbytes);
bev_a->read_in_progress = 0;
if (amount_unread)
- _bufferevent_decrement_read_buckets(&bev_a->bev, -amount_unread);
+ bufferevent_decrement_read_buckets_(&bev_a->bev, -amount_unread);
if (!ok)
bev_async_set_wsa_error(bev, eo);
if (ok && nbytes) {
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
if (evbuffer_get_length(bev->input) >= bev->wm_read.low)
- _bufferevent_run_readcb(bev);
+ bufferevent_run_readcb_(bev);
bev_async_consider_reading(bev_a);
} else if (!ok) {
what |= BEV_EVENT_ERROR;
bev_a->ok = 0;
- _bufferevent_run_eventcb(bev, what);
+ bufferevent_run_eventcb_(bev, what);
} else if (!nbytes) {
what |= BEV_EVENT_EOF;
bev_a->ok = 0;
- _bufferevent_run_eventcb(bev, what);
+ bufferevent_run_eventcb_(bev, what);
}
}
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
static void
bev_a->write_in_progress = 0;
if (amount_unwritten)
- _bufferevent_decrement_write_buckets(&bev_a->bev,
+ bufferevent_decrement_write_buckets_(&bev_a->bev,
-amount_unwritten);
BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
if (evbuffer_get_length(bev->output) <=
bev->wm_write.low)
- _bufferevent_run_writecb(bev);
+ bufferevent_run_writecb_(bev);
bev_async_consider_writing(bev_a);
} else if (!ok) {
what |= BEV_EVENT_ERROR;
bev_a->ok = 0;
- _bufferevent_run_eventcb(bev, what);
+ bufferevent_run_eventcb_(bev, what);
} else if (!nbytes) {
what |= BEV_EVENT_EOF;
bev_a->ok = 0;
- _bufferevent_run_eventcb(bev, what);
+ bufferevent_run_eventcb_(bev, what);
}
}
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
struct bufferevent *
bev_a->ok = fd >= 0;
if (bev_a->ok)
- _bufferevent_init_generic_timeout_cbs(bev);
+ bufferevent_init_generic_timeout_cbs_(bev);
return bev;
err:
{
struct bufferevent_async *bev_async = upcast(bev);
bev_async->ok = 1;
- _bufferevent_init_generic_timeout_cbs(bev);
+ bufferevent_init_generic_timeout_cbs_(bev);
/* Now's a good time to consider reading/writing */
be_async_enable(bev, bev->enabled);
}
{
switch (op) {
case BEV_CTRL_GET_FD:
- data->fd = _evbuffer_overlapped_get_fd(bev->input);
+ data->fd = evbuffer_overlapped_get_fd_(bev->input);
return 0;
case BEV_CTRL_SET_FD: {
struct event_iocp_port *iocp;
- if (data->fd == _evbuffer_overlapped_get_fd(bev->input))
+ if (data->fd == evbuffer_overlapped_get_fd_(bev->input))
return 0;
if (!(iocp = event_base_get_iocp(bev->ev_base)))
return -1;
if (event_iocp_port_associate(iocp, data->fd, 1) < 0)
return -1;
- _evbuffer_overlapped_set_fd(bev->input, data->fd);
- _evbuffer_overlapped_set_fd(bev->output, data->fd);
+ evbuffer_overlapped_set_fd_(bev->input, data->fd);
+ evbuffer_overlapped_set_fd_(bev->output, data->fd);
return 0;
}
case BEV_CTRL_CANCEL_ALL: {
struct bufferevent_async *bev_a = upcast(bev);
- evutil_socket_t fd = _evbuffer_overlapped_get_fd(bev->input);
+ evutil_socket_t fd = evbuffer_overlapped_get_fd_(bev->input);
if (fd != (evutil_socket_t)INVALID_SOCKET &&
(bev_a->bev.options & BEV_OPT_CLOSE_ON_FREE)) {
closesocket(fd);
be_filter_enable,
be_filter_disable,
be_filter_destruct,
- _bufferevent_generic_adj_timeouts,
+ bufferevent_generic_adj_timeouts_,
be_filter_flush,
be_filter_ctrl,
};
bufev_f->outbuf_cb = evbuffer_add_cb(downcast(bufev_f)->output,
bufferevent_filtered_outbuf_cb, bufev_f);
- _bufferevent_init_generic_timeout_cbs(downcast(bufev_f));
+ bufferevent_init_generic_timeout_cbs_(downcast(bufev_f));
bufferevent_incref(underlying);
bufferevent_enable(underlying, EV_READ|EV_WRITE);
}
}
- _bufferevent_del_generic_timeout_cbs(bev);
+ bufferevent_del_generic_timeout_cbs_(bev);
}
static int
if (processed &&
evbuffer_get_length(bufev->output) <= bufev->wm_write.low) {
/* call the write callback.*/
- _bufferevent_run_writecb(bufev);
+ bufferevent_run_writecb_(bufev);
if (res == BEV_OK &&
(bufev->enabled & EV_WRITE) &&
int processed_any = 0;
/* Somebody added more data to the output buffer. Try to
* process it, if we should. */
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
}
struct bufferevent *bufev = downcast(bevf);
int processed_any = 0;
- _bufferevent_incref_and_lock(bufev);
+ bufferevent_incref_and_lock_(bufev);
if (bevf->got_eof)
state = BEV_FINISHED;
* force readcb calls as needed. */
if (processed_any &&
evbuffer_get_length(bufev->input) >= bufev->wm_read.low)
- _bufferevent_run_readcb(bufev);
+ bufferevent_run_readcb_(bufev);
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
}
/* Called when the underlying socket has drained enough that we can write to
struct bufferevent *bev = downcast(bevf);
int processed_any = 0;
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
/* Called when the underlying socket has given us an error */
struct bufferevent_filtered *bevf = _me;
struct bufferevent *bev = downcast(bevf);
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
/* All we can really to is tell our own eventcb. */
- _bufferevent_run_eventcb(bev, what);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_run_eventcb_(bev, what);
+ bufferevent_decref_and_unlock_(bev);
}
static int
int processed_any = 0;
EVUTIL_ASSERT(bevf);
- _bufferevent_incref_and_lock(bufev);
+ bufferevent_incref_and_lock_(bufev);
if (iotype & EV_READ) {
be_filter_process_input(bevf, mode, &processed_any);
/* XXX does this want to recursively call lower-level flushes? */
bufferevent_flush(bevf->underlying, iotype, mode);
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
return processed_any;
}
} else {
struct bufferevent *bev = &bev_ssl->bev.bev;
int r;
- r = _bufferevent_add_event(&bev->ev_read, &bev->timeout_read);
+ r = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
if (r == 0 && bev_ssl->read_blocked_on_write)
- r = _bufferevent_add_event(&bev->ev_write,
+ r = bufferevent_add_event_(&bev->ev_write,
&bev->timeout_write);
return r;
}
;
} else {
struct bufferevent *bev = &bev_ssl->bev.bev;
- r = _bufferevent_add_event(&bev->ev_write, &bev->timeout_write);
+ r = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
if (!r && bev_ssl->write_blocked_on_read)
- r = _bufferevent_add_event(&bev->ev_read,
+ r = bufferevent_add_event_(&bev->ev_read,
&bev->timeout_read);
}
return r;
/* when is BEV_EVENT_{READING|WRITING} */
event = when | event;
- _bufferevent_run_eventcb(&bev_ssl->bev.bev, event);
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev, event);
}
static void
unsigned long w = num_w - bev_ssl->counts.n_written;
unsigned long r = num_r - bev_ssl->counts.n_read;
if (w)
- _bufferevent_decrement_write_buckets(&bev_ssl->bev, w);
+ bufferevent_decrement_write_buckets_(&bev_ssl->bev, w);
if (r)
- _bufferevent_decrement_read_buckets(&bev_ssl->bev, r);
+ bufferevent_decrement_read_buckets_(&bev_ssl->bev, r);
bev_ssl->counts.n_written = num_w;
bev_ssl->counts.n_read = num_r;
}
int r, n, i, n_used = 0, blocked = 0, atmost;
struct evbuffer_iovec space[2];
- atmost = _bufferevent_get_read_max(&bev_ssl->bev);
+ atmost = bufferevent_get_read_max_(&bev_ssl->bev);
if (n_to_read > atmost)
n_to_read = atmost;
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
if (evbuffer_get_length(input) >= bev->wm_read.low)
- _bufferevent_run_readcb(bev);
+ bufferevent_run_readcb_(bev);
}
return blocked ? 0 : 1;
if (bev_ssl->last_write > 0)
atmost = bev_ssl->last_write;
else
- atmost = _bufferevent_get_write_max(&bev_ssl->bev);
+ atmost = bufferevent_get_write_max_(&bev_ssl->bev);
n = evbuffer_peek(output, atmost, NULL, space, 8);
if (n < 0)
BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
if (evbuffer_get_length(output) <= bev->wm_write.low)
- _bufferevent_run_writecb(bev);
+ bufferevent_run_writecb_(bev);
}
return blocked ? 0 : 1;
}
}
/* Respect the rate limit */
- limit = _bufferevent_get_read_max(&bev->bev);
+ limit = bufferevent_get_read_max_(&bev->bev);
if (result > limit) {
result = limit;
}
eat it. */
}
if (event)
- _bufferevent_run_eventcb(&bev_ssl->bev.bev, event);
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev, event);
}
static void
be_openssl_readeventcb(evutil_socket_t fd, short what, void *ptr)
{
struct bufferevent_openssl *bev_ssl = ptr;
- _bufferevent_incref_and_lock(&bev_ssl->bev.bev);
+ bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
if (what & EV_TIMEOUT) {
- _bufferevent_run_eventcb(&bev_ssl->bev.bev,
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev,
BEV_EVENT_TIMEOUT|BEV_EVENT_READING);
} else
consider_reading(bev_ssl);
- _bufferevent_decref_and_unlock(&bev_ssl->bev.bev);
+ bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
}
static void
be_openssl_writeeventcb(evutil_socket_t fd, short what, void *ptr)
{
struct bufferevent_openssl *bev_ssl = ptr;
- _bufferevent_incref_and_lock(&bev_ssl->bev.bev);
+ bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
if (what & EV_TIMEOUT) {
- _bufferevent_run_eventcb(&bev_ssl->bev.bev,
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev,
BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING);
}
consider_writing(bev_ssl);
- _bufferevent_decref_and_unlock(&bev_ssl->bev.bev);
+ bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
}
static int
event_assign(&bev->ev_write, bev->ev_base, fd,
EV_WRITE|EV_PERSIST, be_openssl_writeeventcb, bev_ssl);
if (rpending)
- r1 = _bufferevent_add_event(&bev->ev_read, &bev->timeout_read);
+ r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
if (wpending)
- r2 = _bufferevent_add_event(&bev->ev_write, &bev->timeout_write);
+ r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
if (fd >= 0) {
bev_ssl->fd_is_set = 1;
}
set_open_callbacks(bev_ssl, -1); /* XXXX handle failure */
/* Call do_read and do_write as needed */
bufferevent_enable(&bev_ssl->bev.bev, bev_ssl->bev.bev.enabled);
- _bufferevent_run_eventcb(&bev_ssl->bev.bev,
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev,
BEV_EVENT_CONNECTED);
return 1;
} else {
{
struct bufferevent_openssl *bev_ssl = ptr;
- _bufferevent_incref_and_lock(&bev_ssl->bev.bev);
+ bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
if (what & EV_TIMEOUT) {
- _bufferevent_run_eventcb(&bev_ssl->bev.bev, BEV_EVENT_TIMEOUT);
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev, BEV_EVENT_TIMEOUT);
} else
do_handshake(bev_ssl);/* XXX handle failure */
- _bufferevent_decref_and_unlock(&bev_ssl->bev.bev);
+ bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
}
static int
event_assign(&bev->ev_write, bev->ev_base, fd,
EV_WRITE|EV_PERSIST, be_openssl_handshakeeventcb, bev_ssl);
if (fd >= 0) {
- r1 = _bufferevent_add_event(&bev->ev_read, &bev->timeout_read);
- r2 = _bufferevent_add_event(&bev->ev_write, &bev->timeout_write);
+ r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
+ r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
bev_ssl->fd_is_set = 1;
}
return (r1<0 || r2<0) ? -1 : 0;
if (cbinfo->n_added && bev_ssl->state == BUFFEREVENT_SSL_OPEN) {
if (cbinfo->orig_size == 0)
- r = _bufferevent_add_event(&bev_ssl->bev.bev.ev_write,
+ r = bufferevent_add_event_(&bev_ssl->bev.bev.ev_write,
&bev_ssl->bev.bev.timeout_write);
consider_writing(bev_ssl);
}
struct bufferevent_openssl *bev_ssl = upcast(bev);
if (bev_ssl->underlying) {
- _bufferevent_del_generic_timeout_cbs(bev);
+ bufferevent_del_generic_timeout_cbs_(bev);
} else {
event_del(&bev->ev_read);
event_del(&bev->ev_write);
struct bufferevent_openssl *bev_ssl = upcast(bev);
if (bev_ssl->underlying)
- return _bufferevent_generic_adj_timeouts(bev);
+ return bufferevent_generic_adj_timeouts_(bev);
else {
int r1=0, r2=0;
if (event_pending(&bev->ev_read, EV_READ, NULL))
- r1 = _bufferevent_add_event(&bev->ev_read, &bev->timeout_read);
+ r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
if (event_pending(&bev->ev_write, EV_WRITE, NULL))
- r2 = _bufferevent_add_event(&bev->ev_write, &bev->timeout_write);
+ r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
return (r1<0 || r2<0) ? -1 : 0;
}
}
bufferevent_enable_locking(&bev_ssl->bev.bev, NULL);
if (underlying) {
- _bufferevent_init_generic_timeout_cbs(&bev_ssl->bev.bev);
+ bufferevent_init_generic_timeout_cbs_(&bev_ssl->bev.bev);
bufferevent_incref(underlying);
}
incref_and_lock(struct bufferevent *b)
{
struct bufferevent_pair *bevp;
- _bufferevent_incref_and_lock(b);
+ bufferevent_incref_and_lock_(b);
bevp = upcast(b);
if (bevp->partner)
- _bufferevent_incref_and_lock(downcast(bevp->partner));
+ bufferevent_incref_and_lock_(downcast(bevp->partner));
}
static inline void
{
struct bufferevent_pair *bevp = upcast(b);
if (bevp->partner)
- _bufferevent_decref_and_unlock(downcast(bevp->partner));
- _bufferevent_decref_and_unlock(b);
+ bufferevent_decref_and_unlock_(downcast(bevp->partner));
+ bufferevent_decref_and_unlock_(b);
}
/* XXX Handle close */
return NULL;
}
- _bufferevent_init_generic_timeout_cbs(&bufev->bev.bev);
+ bufferevent_init_generic_timeout_cbs_(&bufev->bev.bev);
return bufev;
}
dst_size = evbuffer_get_length(dst->input);
if (dst_size >= dst->wm_read.low) {
- _bufferevent_run_readcb(dst);
+ bufferevent_run_readcb_(dst);
}
if (src_size <= src->wm_write.low) {
- _bufferevent_run_writecb(src);
+ bufferevent_run_writecb_(src);
}
done:
evbuffer_freeze(src->output, 1);
bev_p->partner = NULL;
}
- _bufferevent_del_generic_timeout_cbs(bev);
+ bufferevent_del_generic_timeout_cbs_(bev);
}
static int
be_pair_transfer(bev, partner, 1);
if (mode == BEV_FINISHED) {
- _bufferevent_run_eventcb(partner, iotype|BEV_EVENT_EOF);
+ bufferevent_run_eventcb_(partner, iotype|BEV_EVENT_EOF);
}
decref_and_unlock(bev);
return 0;
be_pair_enable,
be_pair_disable,
be_pair_destruct,
- _bufferevent_generic_adj_timeouts,
+ bufferevent_generic_adj_timeouts_,
be_pair_flush,
NULL, /* ctrl */
};
#define LOCK_GROUP(g) EVLOCK_LOCK((g)->lock, 0)
#define UNLOCK_GROUP(g) EVLOCK_UNLOCK((g)->lock, 0)
-static int _bev_group_suspend_reading(struct bufferevent_rate_limit_group *g);
-static int _bev_group_suspend_writing(struct bufferevent_rate_limit_group *g);
-static void _bev_group_unsuspend_reading(struct bufferevent_rate_limit_group *g);
-static void _bev_group_unsuspend_writing(struct bufferevent_rate_limit_group *g);
+static int bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g);
+static int bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g);
+static void bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g);
+static void bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g);
/** Helper: figure out the maximum amount we should write if is_write, or
the maximum amount we should read if is_read. Return that maximum, or
0 if our bucket is wholly exhausted.
*/
static inline ev_ssize_t
-_bufferevent_get_rlim_max(struct bufferevent_private *bev, int is_write)
+bufferevent_get_rlim_max_(struct bufferevent_private *bev, int is_write)
{
/* needs lock on bev. */
ev_ssize_t max_so_far = is_write?bev->max_single_write:bev->max_single_read;
}
ev_ssize_t
-_bufferevent_get_read_max(struct bufferevent_private *bev)
+bufferevent_get_read_max_(struct bufferevent_private *bev)
{
- return _bufferevent_get_rlim_max(bev, 0);
+ return bufferevent_get_rlim_max_(bev, 0);
}
ev_ssize_t
-_bufferevent_get_write_max(struct bufferevent_private *bev)
+bufferevent_get_write_max_(struct bufferevent_private *bev)
{
- return _bufferevent_get_rlim_max(bev, 1);
+ return bufferevent_get_rlim_max_(bev, 1);
}
int
-_bufferevent_decrement_read_buckets(struct bufferevent_private *bev, ev_ssize_t bytes)
+bufferevent_decrement_read_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes)
{
/* XXXXX Make sure all users of this function check its return value */
int r = 0;
bev->rate_limiting->group->rate_limit.read_limit -= bytes;
bev->rate_limiting->group->total_read += bytes;
if (bev->rate_limiting->group->rate_limit.read_limit <= 0) {
- _bev_group_suspend_reading(bev->rate_limiting->group);
+ bev_group_suspend_reading_(bev->rate_limiting->group);
} else if (bev->rate_limiting->group->read_suspended) {
- _bev_group_unsuspend_reading(bev->rate_limiting->group);
+ bev_group_unsuspend_reading_(bev->rate_limiting->group);
}
UNLOCK_GROUP(bev->rate_limiting->group);
}
}
int
-_bufferevent_decrement_write_buckets(struct bufferevent_private *bev, ev_ssize_t bytes)
+bufferevent_decrement_write_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes)
{
/* XXXXX Make sure all users of this function check its return value */
int r = 0;
bev->rate_limiting->group->rate_limit.write_limit -= bytes;
bev->rate_limiting->group->total_written += bytes;
if (bev->rate_limiting->group->rate_limit.write_limit <= 0) {
- _bev_group_suspend_writing(bev->rate_limiting->group);
+ bev_group_suspend_writing_(bev->rate_limiting->group);
} else if (bev->rate_limiting->group->write_suspended) {
- _bev_group_unsuspend_writing(bev->rate_limiting->group);
+ bev_group_unsuspend_writing_(bev->rate_limiting->group);
}
UNLOCK_GROUP(bev->rate_limiting->group);
}
/** Stop reading on every bufferevent in <b>g</b> */
static int
-_bev_group_suspend_reading(struct bufferevent_rate_limit_group *g)
+bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g)
{
/* Needs group lock */
struct bufferevent_private *bev;
/** Stop writing on every bufferevent in <b>g</b> */
static int
-_bev_group_suspend_writing(struct bufferevent_rate_limit_group *g)
+bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g)
{
/* Needs group lock */
struct bufferevent_private *bev;
/** Timer callback invoked on a single bufferevent with one or more exhausted
buckets when they are ready to refill. */
static void
-_bev_refill_callback(evutil_socket_t fd, short what, void *arg)
+bev_refill_callback_(evutil_socket_t fd, short what, void *arg)
{
unsigned tick;
struct timeval now;
/** Helper: grab a random element from a bufferevent group. */
static struct bufferevent_private *
-_bev_group_random_element(struct bufferevent_rate_limit_group *group)
+bev_group_random_element_(struct bufferevent_rate_limit_group *group)
{
int which;
struct bufferevent_private *bev;
EVUTIL_ASSERT(! LIST_EMPTY(&group->members));
- which = _evutil_weakrand() % group->n_members;
+ which = evutil_weakrand_() % group->n_members;
bev = LIST_FIRST(&group->members);
while (which--)
*/
#define FOREACH_RANDOM_ORDER(block) \
do { \
- first = _bev_group_random_element(g); \
+ first = bev_group_random_element_(g); \
for (bev = first; bev != LIST_END(&g->members); \
bev = LIST_NEXT(bev, rate_limiting->next_in_group)) { \
block ; \
} while (0)
static void
-_bev_group_unsuspend_reading(struct bufferevent_rate_limit_group *g)
+bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g)
{
int again = 0;
struct bufferevent_private *bev, *first;
}
static void
-_bev_group_unsuspend_writing(struct bufferevent_rate_limit_group *g)
+bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g)
{
int again = 0;
struct bufferevent_private *bev, *first;
and unsuspend group members as needed.
*/
static void
-_bev_group_refill_callback(evutil_socket_t fd, short what, void *arg)
+bev_group_refill_callback_(evutil_socket_t fd, short what, void *arg)
{
struct bufferevent_rate_limit_group *g = arg;
unsigned tick;
if (g->pending_unsuspend_read ||
(g->read_suspended && (g->rate_limit.read_limit >= g->min_share))) {
- _bev_group_unsuspend_reading(g);
+ bev_group_unsuspend_reading_(g);
}
if (g->pending_unsuspend_write ||
(g->write_suspended && (g->rate_limit.write_limit >= g->min_share))){
- _bev_group_unsuspend_writing(g);
+ bev_group_unsuspend_writing_(g);
}
/* XXXX Rather than waiting to the next tick to unsuspend stuff
event_del(&rlim->refill_bucket_event);
}
evtimer_assign(&rlim->refill_bucket_event, bev->ev_base,
- _bev_refill_callback, bevp);
+ bev_refill_callback_, bevp);
if (rlim->limit.read_limit > 0) {
bufferevent_unsuspend_read(bev, BEV_SUSPEND_BW);
ev_token_bucket_init(&g->rate_limit, cfg, tick, 0);
event_assign(&g->master_refill_event, base, -1, EV_PERSIST,
- _bev_group_refill_callback, g);
+ bev_group_refill_callback_, g);
/*XXXX handle event_add failure */
event_add(&g->master_refill_event, &cfg->tick_timeout);
return -1;
}
evtimer_assign(&rlim->refill_bucket_event, bev->ev_base,
- _bev_refill_callback, bevp);
+ bev_refill_callback_, bevp);
bevp->rate_limiting = rlim;
}
* === */
/* Mostly you don't want to use this function from inside libevent;
- * _bufferevent_get_read_max() is more likely what you want*/
+ * bufferevent_get_read_max_() is more likely what you want*/
ev_ssize_t
bufferevent_get_read_limit(struct bufferevent *bev)
{
}
/* Mostly you don't want to use this function from inside libevent;
- * _bufferevent_get_write_max() is more likely what you want*/
+ * bufferevent_get_write_max_() is more likely what you want*/
ev_ssize_t
bufferevent_get_write_limit(struct bufferevent *bev)
{
{
ev_ssize_t r;
BEV_LOCK(bev);
- r = _bufferevent_get_read_max(BEV_UPCAST(bev));
+ r = bufferevent_get_read_max_(BEV_UPCAST(bev));
BEV_UNLOCK(bev);
return r;
}
{
ev_ssize_t r;
BEV_LOCK(bev);
- r = _bufferevent_get_write_max(BEV_UPCAST(bev));
+ r = bufferevent_get_write_max_(BEV_UPCAST(bev));
BEV_UNLOCK(bev);
return r;
}
/* Mostly you don't want to use this function from inside libevent;
- * _bufferevent_get_read_max() is more likely what you want*/
+ * bufferevent_get_read_max_() is more likely what you want*/
ev_ssize_t
bufferevent_rate_limit_group_get_read_limit(
struct bufferevent_rate_limit_group *grp)
}
/* Mostly you don't want to use this function from inside libevent;
- * _bufferevent_get_write_max() is more likely what you want. */
+ * bufferevent_get_write_max_() is more likely what you want. */
ev_ssize_t
bufferevent_rate_limit_group_get_write_limit(
struct bufferevent_rate_limit_group *grp)
new_limit = (grp->rate_limit.read_limit -= decr);
if (old_limit > 0 && new_limit <= 0) {
- _bev_group_suspend_reading(grp);
+ bev_group_suspend_reading_(grp);
} else if (old_limit <= 0 && new_limit > 0) {
- _bev_group_unsuspend_reading(grp);
+ bev_group_unsuspend_reading_(grp);
}
UNLOCK_GROUP(grp);
new_limit = (grp->rate_limit.write_limit -= decr);
if (old_limit > 0 && new_limit <= 0) {
- _bev_group_suspend_writing(grp);
+ bev_group_suspend_writing_(grp);
} else if (old_limit <= 0 && new_limit > 0) {
- _bev_group_unsuspend_writing(grp);
+ bev_group_unsuspend_writing_(grp);
}
UNLOCK_GROUP(grp);
}
int
-_bufferevent_ratelim_init(struct bufferevent_private *bev)
+bufferevent_ratelim_init_(struct bufferevent_private *bev)
{
bev->rate_limiting = NULL;
bev->max_single_read = MAX_SINGLE_READ_DEFAULT;
};
#define be_socket_add(ev, t) \
- _bufferevent_add_event((ev), (t))
+ bufferevent_add_event_((ev), (t))
static void
bufferevent_socket_outbuf_cb(struct evbuffer *buf,
short what = BEV_EVENT_READING;
ev_ssize_t howmuch = -1, readmax=-1;
- _bufferevent_incref_and_lock(bufev);
+ bufferevent_incref_and_lock_(bufev);
if (event == EV_TIMEOUT) {
what |= BEV_EVENT_TIMEOUT;
goto done;
}
}
- readmax = _bufferevent_get_read_max(bufev_p);
+ readmax = bufferevent_get_read_max_(bufev_p);
if (howmuch < 0 || howmuch > readmax) /* The use of -1 for "unlimited"
* uglifies this code. XXXX */
howmuch = readmax;
if (res <= 0)
goto error;
- _bufferevent_decrement_read_buckets(bufev_p, res);
+ bufferevent_decrement_read_buckets_(bufev_p, res);
/* Invoke the user callback - must always be called last */
if (evbuffer_get_length(input) >= bufev->wm_read.low)
- _bufferevent_run_readcb(bufev);
+ bufferevent_run_readcb_(bufev);
goto done;
error:
bufferevent_disable(bufev, EV_READ);
- _bufferevent_run_eventcb(bufev, what);
+ bufferevent_run_eventcb_(bufev, what);
done:
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
}
static void
int connected = 0;
ev_ssize_t atmost = -1;
- _bufferevent_incref_and_lock(bufev);
+ bufferevent_incref_and_lock_(bufev);
if (event == EV_TIMEOUT) {
what |= BEV_EVENT_TIMEOUT;
if (c < 0) {
event_del(&bufev->ev_write);
event_del(&bufev->ev_read);
- _bufferevent_run_eventcb(bufev, BEV_EVENT_ERROR);
+ bufferevent_run_eventcb_(bufev, BEV_EVENT_ERROR);
goto done;
} else {
connected = 1;
if (BEV_IS_ASYNC(bufev)) {
event_del(&bufev->ev_write);
bufferevent_async_set_connected(bufev);
- _bufferevent_run_eventcb(bufev,
+ bufferevent_run_eventcb_(bufev,
BEV_EVENT_CONNECTED);
goto done;
}
#endif
- _bufferevent_run_eventcb(bufev,
+ bufferevent_run_eventcb_(bufev,
BEV_EVENT_CONNECTED);
if (!(bufev->enabled & EV_WRITE) ||
bufev_p->write_suspended) {
}
}
- atmost = _bufferevent_get_write_max(bufev_p);
+ atmost = bufferevent_get_write_max_(bufev_p);
if (bufev_p->write_suspended)
goto done;
if (res <= 0)
goto error;
- _bufferevent_decrement_write_buckets(bufev_p, res);
+ bufferevent_decrement_write_buckets_(bufev_p, res);
}
if (evbuffer_get_length(bufev->output) == 0) {
*/
if ((res || !connected) &&
evbuffer_get_length(bufev->output) <= bufev->wm_write.low) {
- _bufferevent_run_writecb(bufev);
+ bufferevent_run_writecb_(bufev);
}
goto done;
error:
bufferevent_disable(bufev, EV_WRITE);
- _bufferevent_run_eventcb(bufev, what);
+ bufferevent_run_eventcb_(bufev, what);
done:
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
}
struct bufferevent *
int result=-1;
int ownfd = 0;
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
if (!bufev_p)
goto done;
goto done;
freesock:
- _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR);
if (ownfd)
evutil_closesocket(fd);
/* do something about the error? */
done:
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
return result;
}
if (result != 0) {
bev_p->dns_error = result;
- _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR);
+ bufferevent_decref_and_unlock_(bev);
if (ai)
evutil_freeaddrinfo(ai);
return;
/* XXX use this return value */
r = bufferevent_socket_connect(bev, ai->ai_addr, (int)ai->ai_addrlen);
(void)r;
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
evutil_freeaddrinfo(ai);
}
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
-#ifndef _SYS_QUEUE_H_
-#define _SYS_QUEUE_H_
+#ifndef SYS_QUEUE_H__
+#define SYS_QUEUE_H__
/*
* This file defines five types of data structures: singly-linked lists,
(elm2)->field.cqe_prev->field.cqe_next = (elm2); \
} while (0)
-#endif /* !_SYS_QUEUE_H_ */
+#endif /* !SYS_QUEUE_H__ */
} while (0)
/** Increase the reference count of buf by one. */
-void _evbuffer_incref(struct evbuffer *buf);
+void evbuffer_incref_(struct evbuffer *buf);
/** Increase the reference count of buf by one and acquire the lock. */
-void _evbuffer_incref_and_lock(struct evbuffer *buf);
+void evbuffer_incref_and_lock_(struct evbuffer *buf);
/** Pin a single buffer chain using a given flag. A pinned chunk may not be
* moved or freed until it is unpinned. */
-void _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag);
+void evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag);
/** Unpin a single buffer chain using a given flag. */
-void _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag);
+void evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag);
/** As evbuffer_free, but requires that we hold a lock on the buffer, and
* releases the lock before freeing it and the buffer. */
-void _evbuffer_decref_and_unlock(struct evbuffer *buffer);
+void evbuffer_decref_and_unlock_(struct evbuffer *buffer);
/** As evbuffer_expand, but does not guarantee that the newly allocated memory
* is contiguous. Instead, it may be split across two or more chunks. */
-int _evbuffer_expand_fast(struct evbuffer *, size_t, int);
+int evbuffer_expand_fast_(struct evbuffer *, size_t, int);
/** Helper: prepares for a readv/WSARecv call by expanding the buffer to
* hold enough memory to read 'howmuch' bytes in possibly noncontiguous memory.
* extent, and *chainp to point to the first chain that we'll try to read into.
* Returns the number of vecs used.
*/
-int _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
+int evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,
struct evbuffer_iovec *vecs, int n_vecs, struct evbuffer_chain ***chainp,
int exact);
static int strtoint(const char *const str);
#ifdef EVENT__DISABLE_THREAD_SUPPORT
-#define EVDNS_LOCK(base) _EVUTIL_NIL_STMT
-#define EVDNS_UNLOCK(base) _EVUTIL_NIL_STMT
-#define ASSERT_LOCKED(base) _EVUTIL_NIL_STMT
+#define EVDNS_LOCK(base) EVUTIL_NIL_STMT_
+#define EVDNS_UNLOCK(base) EVUTIL_NIL_STMT_
+#define ASSERT_LOCKED(base) EVUTIL_NIL_STMT_
#else
#define EVDNS_LOCK(base) \
EVLOCK_LOCK((base)->lock, 0)
#define EVDNS_LOG_CHECK
#endif
-static void _evdns_log(int warn, const char *fmt, ...) EVDNS_LOG_CHECK;
+static void evdns_log_(int warn, const char *fmt, ...) EVDNS_LOG_CHECK;
static void
-_evdns_log(int warn, const char *fmt, ...)
+evdns_log_(int warn, const char *fmt, ...)
{
va_list args;
char buf[512];
}
-#define log _evdns_log
+#define log evdns_log_
/* This walks the list of inflight requests to find the */
/* one with a matching transaction id. Returns NULL on */
int name_end = -1;
int j = *idx;
int ptr_count = 0;
-#define GET32(x) do { if (j + 4 > length) goto err; memcpy(&_t32, packet + j, 4); j += 4; x = ntohl(_t32); } while (0)
-#define GET16(x) do { if (j + 2 > length) goto err; memcpy(&_t, packet + j, 2); j += 2; x = ntohs(_t); } while (0)
+#define GET32(x) do { if (j + 4 > length) goto err; memcpy(&t32_, packet + j, 4); j += 4; x = ntohl(t32_); } while (0)
+#define GET16(x) do { if (j + 2 > length) goto err; memcpy(&t_, packet + j, 2); j += 2; x = ntohs(t_); } while (0)
#define GET8(x) do { if (j >= length) goto err; x = packet[j++]; } while (0)
char *cp = name_out;
static int
reply_parse(struct evdns_base *base, u8 *packet, int length) {
int j = 0, k = 0; /* index into packet */
- u16 _t; /* used by the macros */
- u32 _t32; /* used by the macros */
+ u16 t_; /* used by the macros */
+ u32 t32_; /* used by the macros */
char tmp_name[256], cmp_name[256]; /* used by the macros */
int name_matches = 0;
request_parse(u8 *packet, int length, struct evdns_server_port *port, struct sockaddr *addr, ev_socklen_t addrlen)
{
int j = 0; /* index into packet */
- u16 _t; /* used by the macros */
+ u16 t_; /* used by the macros */
char tmp_name[256]; /* used by the macros */
int i;
struct dnslabel_table *table) {
const char *end = name + name_len;
int ref = 0;
- u16 _t;
+ u16 t_;
#define APPEND16(x) do { \
if (j + 2 > (off_t)buf_len) \
goto overflow; \
- _t = htons(x); \
- memcpy(buf + j, &_t, 2); \
+ t_ = htons(x); \
+ memcpy(buf + j, &t_, 2); \
j += 2; \
} while (0)
#define APPEND32(x) do { \
if (j + 4 > (off_t)buf_len) \
goto overflow; \
- _t32 = htonl(x); \
- memcpy(buf + j, &_t32, 4); \
+ t32_ = htonl(x); \
+ memcpy(buf + j, &t32_, 4); \
j += 4; \
} while (0)
const u16 trans_id, const u16 type, const u16 class,
u8 *const buf, size_t buf_len) {
off_t j = 0; /* current offset into buf */
- u16 _t; /* used by the macros */
+ u16 t_; /* used by the macros */
APPEND16(trans_id);
APPEND16(0x0100); /* standard query, recusion needed */
unsigned char buf[1500];
size_t buf_len = sizeof(buf);
off_t j = 0, r;
- u16 _t;
- u32 _t32;
+ u16 t_;
+ u32 t32_;
int i;
u16 flags;
struct dnslabel_table table;
if (r < 0)
goto overflow;
j = r;
- _t = htons( (short) (j-name_start) );
- memcpy(buf+len_idx, &_t, 2);
+ t_ = htons( (short) (j-name_start) );
+ memcpy(buf+len_idx, &t_, 2);
} else {
APPEND16(item->datalen);
if (j+item->datalen > (off_t)buf_len)
}
static int
-_evdns_nameserver_add_impl(struct evdns_base *base, const struct sockaddr *address, int addrlen) {
+evdns_nameserver_add_impl_(struct evdns_base *base, const struct sockaddr *address, int addrlen) {
/* first check to see if we already have this nameserver */
const struct nameserver *server = base->server_head, *const started_at = base->server_head;
sin.sin_port = htons(53);
sin.sin_family = AF_INET;
EVDNS_LOCK(base);
- res = _evdns_nameserver_add_impl(base, (struct sockaddr*)&sin, sizeof(sin));
+ res = evdns_nameserver_add_impl_(base, (struct sockaddr*)&sin, sizeof(sin));
EVDNS_UNLOCK(base);
return res;
}
sockaddr_setport(sa, 53);
EVDNS_LOCK(base);
- res = _evdns_nameserver_add_impl(base, sa, len);
+ res = evdns_nameserver_add_impl_(base, sa, len);
EVDNS_UNLOCK(base);
return res;
}
int res;
EVUTIL_ASSERT(base);
EVDNS_LOCK(base);
- res = _evdns_nameserver_add_impl(base, sa, len);
+ res = evdns_nameserver_add_impl_(base, sa, len);
EVDNS_UNLOCK(base);
return res;
}
/* map union members back */
/* mutually exclusive */
-#define ev_signal_next _ev.ev_signal.ev_signal_next
-#define ev_io_next _ev.ev_io.ev_io_next
-#define ev_io_timeout _ev.ev_io.ev_timeout
+#define ev_signal_next ev_.ev_signal.ev_signal_next
+#define ev_io_next ev_.ev_io.ev_io_next
+#define ev_io_timeout ev_.ev_io.ev_timeout
/* used only by signals */
-#define ev_ncalls _ev.ev_signal.ev_ncalls
-#define ev_pncalls _ev.ev_signal.ev_pncalls
+#define ev_ncalls ev_.ev_signal.ev_ncalls
+#define ev_pncalls ev_.ev_signal.ev_pncalls
/* Possible values for ev_closure in struct event. */
#define EV_CLOSURE_NONE 0
#ifndef EVENT__DISABLE_DEBUG_MODE
/* Global internal flag: set to one if debug mode is on. */
-extern int _event_debug_mode_on;
-#define EVENT_DEBUG_MODE_IS_ON() (_event_debug_mode_on)
+extern int event_debug_mode_on_;
+#define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_)
#else
#define EVENT_DEBUG_MODE_IS_ON() (0)
#endif
#define N_ACTIVE_CALLBACKS(base) \
((base)->event_count_active + (base)->defer_queue.active_count)
-int _evsig_set_handler(struct event_base *base, int evsignal,
+int evsig_set_handler_(struct event_base *base, int evsignal,
void (*fn)(int));
-int _evsig_restore_handler(struct event_base *base, int evsignal);
+int evsig_restore_handler_(struct event_base *base, int evsignal);
void event_active_nolock(struct event *ev, int res, short count);
return a->ptr == b->ptr;
}
-int _event_debug_mode_on = 0;
+int event_debug_mode_on_ = 0;
/* Set if it's too late to enable event_debug_mode. */
static int event_debug_mode_too_late = 0;
#ifndef EVENT__DISABLE_THREAD_SUPPORT
-static void *_event_debug_map_lock = NULL;
+static void *event_debug_map_lock_ = NULL;
#endif
static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
HT_INITIALIZER();
eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
/* Macro: record that ev is now setup (that is, ready for an add) */
-#define _event_debug_note_setup(ev) do { \
- if (_event_debug_mode_on) { \
+#define event_debug_note_setup_(ev) do { \
+ if (event_debug_mode_on_) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
- EVLOCK_LOCK(_event_debug_map_lock, 0); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (dent) { \
dent->added = 0; \
dent->added = 0; \
HT_INSERT(event_debug_map, &global_debug_map, dent); \
} \
- EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
} \
event_debug_mode_too_late = 1; \
} while (0)
/* Macro: record that ev is no longer setup */
-#define _event_debug_note_teardown(ev) do { \
- if (_event_debug_mode_on) { \
+#define event_debug_note_teardown_(ev) do { \
+ if (event_debug_mode_on_) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
- EVLOCK_LOCK(_event_debug_map_lock, 0); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
if (dent) \
mm_free(dent); \
- EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
} \
event_debug_mode_too_late = 1; \
} while (0)
/* Macro: record that ev is now added */
-#define _event_debug_note_add(ev) do { \
- if (_event_debug_mode_on) { \
+#define event_debug_note_add_(ev) do { \
+ if (event_debug_mode_on_) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
- EVLOCK_LOCK(_event_debug_map_lock, 0); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (dent) { \
dent->added = 1; \
} else { \
- event_errx(_EVENT_ERR_ABORT, \
+ event_errx(EVENT_ERR_ABORT_, \
"%s: noting an add on a non-setup event %p" \
" (events: 0x%x, fd: %d, flags: 0x%x)", \
__func__, (ev), (ev)->ev_events, \
(ev)->ev_fd, (ev)->ev_flags); \
} \
- EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
} \
event_debug_mode_too_late = 1; \
} while (0)
/* Macro: record that ev is no longer added */
-#define _event_debug_note_del(ev) do { \
- if (_event_debug_mode_on) { \
+#define event_debug_note_del_(ev) do { \
+ if (event_debug_mode_on_) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
- EVLOCK_LOCK(_event_debug_map_lock, 0); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (dent) { \
dent->added = 0; \
} else { \
- event_errx(_EVENT_ERR_ABORT, \
+ event_errx(EVENT_ERR_ABORT_, \
"%s: noting a del on a non-setup event %p" \
" (events: 0x%x, fd: %d, flags: 0x%x)", \
__func__, (ev), (ev)->ev_events, \
(ev)->ev_fd, (ev)->ev_flags); \
} \
- EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
} \
event_debug_mode_too_late = 1; \
} while (0)
/* Macro: assert that ev is setup (i.e., okay to add or inspect) */
-#define _event_debug_assert_is_setup(ev) do { \
- if (_event_debug_mode_on) { \
+#define event_debug_assert_is_setup_(ev) do { \
+ if (event_debug_mode_on_) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
- EVLOCK_LOCK(_event_debug_map_lock, 0); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (!dent) { \
- event_errx(_EVENT_ERR_ABORT, \
+ event_errx(EVENT_ERR_ABORT_, \
"%s called on a non-initialized event %p" \
" (events: 0x%x, fd: %d, flags: 0x%x)", \
__func__, (ev), (ev)->ev_events, \
(ev)->ev_fd, (ev)->ev_flags); \
} \
- EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
} \
} while (0)
/* Macro: assert that ev is not added (i.e., okay to tear down or set
* up again) */
-#define _event_debug_assert_not_added(ev) do { \
- if (_event_debug_mode_on) { \
+#define event_debug_assert_not_added_(ev) do { \
+ if (event_debug_mode_on_) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
- EVLOCK_LOCK(_event_debug_map_lock, 0); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (dent && dent->added) { \
- event_errx(_EVENT_ERR_ABORT, \
+ event_errx(EVENT_ERR_ABORT_, \
"%s called on an already added event %p" \
" (events: 0x%x, fd: %d, flags: 0x%x)", \
__func__, (ev), (ev)->ev_events, \
(ev)->ev_fd, (ev)->ev_flags); \
} \
- EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
} \
} while (0)
#else
-#define _event_debug_note_setup(ev) \
+#define event_debug_note_setup_(ev) \
((void)0)
-#define _event_debug_note_teardown(ev) \
+#define event_debug_note_teardown_(ev) \
((void)0)
-#define _event_debug_note_add(ev) \
+#define event_debug_note_add_(ev) \
((void)0)
-#define _event_debug_note_del(ev) \
+#define event_debug_note_del_(ev) \
((void)0)
-#define _event_debug_assert_is_setup(ev) \
+#define event_debug_assert_is_setup_(ev) \
((void)0)
-#define _event_debug_assert_not_added(ev) \
+#define event_debug_assert_not_added_(ev) \
((void)0)
#endif
event_enable_debug_mode(void)
{
#ifndef EVENT__DISABLE_DEBUG_MODE
- if (_event_debug_mode_on)
+ if (event_debug_mode_on_)
event_errx(1, "%s was called twice!", __func__);
if (event_debug_mode_too_late)
event_errx(1, "%s must be called *before* creating any events "
"or event_bases",__func__);
- _event_debug_mode_on = 1;
+ event_debug_mode_on_ = 1;
HT_INIT(event_debug_map, &global_debug_map);
#endif
{
struct event_debug_entry **ent, *victim;
- EVLOCK_LOCK(_event_debug_map_lock, 0);
+ EVLOCK_LOCK(event_debug_map_lock_, 0);
for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
victim = *ent;
ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
mm_free(victim);
}
HT_CLEAR(event_debug_map, &global_debug_map);
- EVLOCK_UNLOCK(_event_debug_map_lock , 0);
+ EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
}
#endif
if (!base)
base = current_base;
- _event_debug_assert_not_added(ev);
+ event_debug_assert_not_added_(ev);
ev->ev_base = base;
ev->ev_pri = base->nactivequeues / 2;
}
- _event_debug_note_setup(ev);
+ event_debug_note_setup_(ev);
return 0;
}
if (ev->ev_flags != EVLIST_INIT)
return (-1);
- _event_debug_assert_is_setup(ev);
+ event_debug_assert_is_setup_(ev);
ev->ev_base = base;
ev->ev_pri = base->nactivequeues/2;
void
event_free(struct event *ev)
{
- _event_debug_assert_is_setup(ev);
+ event_debug_assert_is_setup_(ev);
/* make sure that this event won't be coming back to haunt us. */
event_del(ev);
- _event_debug_note_teardown(ev);
+ event_debug_note_teardown_(ev);
mm_free(ev);
}
void
event_debug_unassign(struct event *ev)
{
- _event_debug_assert_not_added(ev);
- _event_debug_note_teardown(ev);
+ event_debug_assert_not_added_(ev);
+ event_debug_note_teardown_(ev);
ev->ev_flags &= ~EVLIST_INIT;
}
int
event_priority_set(struct event *ev, int pri)
{
- _event_debug_assert_is_setup(ev);
+ event_debug_assert_is_setup_(ev);
if (ev->ev_flags & EVLIST_ACTIVE)
return (-1);
{
int flags = 0;
- _event_debug_assert_is_setup(ev);
+ event_debug_assert_is_setup_(ev);
if (ev->ev_flags & EVLIST_INSERTED)
flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
void
event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
{
- _event_debug_assert_is_setup(event);
+ event_debug_assert_is_setup_(event);
if (base_out)
*base_out = event->ev_base;
evutil_socket_t
event_get_fd(const struct event *ev)
{
- _event_debug_assert_is_setup(ev);
+ event_debug_assert_is_setup_(ev);
return ev->ev_fd;
}
struct event_base *
event_get_base(const struct event *ev)
{
- _event_debug_assert_is_setup(ev);
+ event_debug_assert_is_setup_(ev);
return ev->ev_base;
}
short
event_get_events(const struct event *ev)
{
- _event_debug_assert_is_setup(ev);
+ event_debug_assert_is_setup_(ev);
return ev->ev_events;
}
event_callback_fn
event_get_callback(const struct event *ev)
{
- _event_debug_assert_is_setup(ev);
+ event_debug_assert_is_setup_(ev);
return ev->ev_callback;
}
void *
event_get_callback_arg(const struct event *ev)
{
- _event_debug_assert_is_setup(ev);
+ event_debug_assert_is_setup_(ev);
return ev->ev_arg;
}
int notify = 0;
EVENT_BASE_ASSERT_LOCKED(base);
- _event_debug_assert_is_setup(ev);
+ event_debug_assert_is_setup_(ev);
event_debug((
"event_add: event: %p (fd %d), %s%s%scall %p",
if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
evthread_notify_base(base);
- _event_debug_note_add(ev);
+ event_debug_note_add_(ev);
return (res);
}
if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
evthread_notify_base(base);
- _event_debug_note_del(ev);
+ event_debug_note_del_(ev);
return (res);
}
EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
- _event_debug_assert_is_setup(ev);
+ event_debug_assert_is_setup_(ev);
event_active_nolock(ev, res, ncalls);
}
#ifndef EVENT__DISABLE_MM_REPLACEMENT
-static void *(*_mm_malloc_fn)(size_t sz) = NULL;
-static void *(*_mm_realloc_fn)(void *p, size_t sz) = NULL;
-static void (*_mm_free_fn)(void *p) = NULL;
+static void *(*mm_malloc_fn_)(size_t sz) = NULL;
+static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
+static void (*mm_free_fn_)(void *p) = NULL;
void *
event_mm_malloc_(size_t sz)
if (sz == 0)
return NULL;
- if (_mm_malloc_fn)
- return _mm_malloc_fn(sz);
+ if (mm_malloc_fn_)
+ return mm_malloc_fn_(sz);
else
return malloc(sz);
}
if (count == 0 || size == 0)
return NULL;
- if (_mm_malloc_fn) {
+ if (mm_malloc_fn_) {
size_t sz = count * size;
void *p = NULL;
if (count > EV_SIZE_MAX / size)
goto error;
- p = _mm_malloc_fn(sz);
+ p = mm_malloc_fn_(sz);
if (p)
return memset(p, 0, sz);
} else {
return NULL;
}
- if (_mm_malloc_fn) {
+ if (mm_malloc_fn_) {
size_t ln = strlen(str);
void *p = NULL;
if (ln == EV_SIZE_MAX)
goto error;
- p = _mm_malloc_fn(ln+1);
+ p = mm_malloc_fn_(ln+1);
if (p)
return memcpy(p, str, ln+1);
} else
void *
event_mm_realloc_(void *ptr, size_t sz)
{
- if (_mm_realloc_fn)
- return _mm_realloc_fn(ptr, sz);
+ if (mm_realloc_fn_)
+ return mm_realloc_fn_(ptr, sz);
else
return realloc(ptr, sz);
}
void
event_mm_free_(void *ptr)
{
- if (_mm_free_fn)
- _mm_free_fn(ptr);
+ if (mm_free_fn_)
+ mm_free_fn_(ptr);
else
free(ptr);
}
void *(*realloc_fn)(void *ptr, size_t sz),
void (*free_fn)(void *ptr))
{
- _mm_malloc_fn = malloc_fn;
- _mm_realloc_fn = realloc_fn;
- _mm_free_fn = free_fn;
+ mm_malloc_fn_ = malloc_fn;
+ mm_realloc_fn_ = realloc_fn;
+ mm_free_fn_ = free_fn;
}
#endif
event_global_setup_locks_(const int enable_locks)
{
#ifndef EVENT__DISABLE_DEBUG_MODE
- EVTHREAD_SETUP_GLOBAL_LOCK(_event_debug_map_lock, 0);
+ EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
#endif
if (evsig_global_setup_locks_(enable_locks) < 0)
return -1;
}
static void
-_event_iocp_port_unlock_and_free(struct event_iocp_port *port)
+event_iocp_port_unlock_and_free_(struct event_iocp_port *port)
{
DeleteCriticalSection(&port->lock);
CloseHandle(port->port);
n = port->n_live_threads;
LeaveCriticalSection(&port->lock);
if (n == 0) {
- _event_iocp_port_unlock_and_free(port);
+ event_iocp_port_unlock_and_free_(port);
return 0;
} else {
return -1;
struct evrpc_hook_ctx;
TAILQ_HEAD(evrpc_pause_list, evrpc_hook_ctx);
-struct _evrpc_hooks {
+struct evrpc_hooks_ {
/* hooks for processing outbound and inbound rpcs */
struct evrpc_hook_list in_hooks;
struct evrpc_hook_list out_hooks;
#define paused_requests common.pause_requests
struct evrpc_base {
- struct _evrpc_hooks common;
+ struct evrpc_hooks_ common;
/* the HTTP server under which we register our RPC calls */
struct evhttp* http_server;
/* A pool for holding evhttp_connection objects */
struct evrpc_pool {
- struct _evrpc_hooks common;
+ struct evrpc_hooks_ common;
struct event_base *base;
int (*cb)(void *, struct evhttp_request *, struct evbuffer *, void *),
void *cb_arg)
{
- struct _evrpc_hooks *base = vbase;
+ struct evrpc_hooks_ *base = vbase;
struct evrpc_hook_list *head = NULL;
struct evrpc_hook *hook = NULL;
switch (hook_type) {
int
evrpc_remove_hook(void *vbase, enum EVRPC_HOOK_TYPE hook_type, void *handle)
{
- struct _evrpc_hooks *base = vbase;
+ struct evrpc_hooks_ *base = vbase;
struct evrpc_hook_list *head = NULL;
switch (hook_type) {
case EVRPC_INPUT:
evrpc_pause_request(void *vbase, void *ctx,
void (*cb)(void *, enum EVRPC_HOOK_RESULT))
{
- struct _evrpc_hooks *base = vbase;
+ struct evrpc_hooks_ *base = vbase;
struct evrpc_hook_ctx *pause = mm_malloc(sizeof(*pause));
if (pause == NULL)
return (-1);
int
evrpc_resume_request(void *vbase, void *ctx, enum EVRPC_HOOK_RESULT res)
{
- struct _evrpc_hooks *base = vbase;
+ struct evrpc_hooks_ *base = vbase;
struct evrpc_pause_list *head = &base->pause_requests;
struct evrpc_hook_ctx *pause;
#if ! defined(EVENT__DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS)
/* Global function pointers to lock-related functions. NULL if locking isn't
enabled. */
-extern struct evthread_lock_callbacks _evthread_lock_fns;
-extern struct evthread_condition_callbacks _evthread_cond_fns;
-extern unsigned long (*_evthread_id_fn)(void);
-extern int _evthread_lock_debugging_enabled;
+extern struct evthread_lock_callbacks evthread_lock_fns_;
+extern struct evthread_condition_callbacks evthread_cond_fns_;
+extern unsigned long (*evthread_id_fn_)(void);
+extern int evthread_lock_debugging_enabled_;
/** Return the ID of the current thread, or 1 if threading isn't enabled. */
#define EVTHREAD_GET_ID() \
- (_evthread_id_fn ? _evthread_id_fn() : 1)
+ (evthread_id_fn_ ? evthread_id_fn_() : 1)
/** Return true iff we're in the thread that is currently (or most recently)
* running a given event_base's loop. Requires lock. */
#define EVBASE_IN_THREAD(base) \
- (_evthread_id_fn == NULL || \
- (base)->th_owner_id == _evthread_id_fn())
+ (evthread_id_fn_ == NULL || \
+ (base)->th_owner_id == evthread_id_fn_())
/** Return true iff we need to notify the base's main thread about changes to
* its state, because it's currently running the main loop in another
* thread. Requires lock. */
#define EVBASE_NEED_NOTIFY(base) \
- (_evthread_id_fn != NULL && \
+ (evthread_id_fn_ != NULL && \
(base)->running_loop && \
- (base)->th_owner_id != _evthread_id_fn())
+ (base)->th_owner_id != evthread_id_fn_())
/** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to
NULL if locking is not enabled. */
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
- ((lockvar) = _evthread_lock_fns.alloc ? \
- _evthread_lock_fns.alloc(locktype) : NULL)
+ ((lockvar) = evthread_lock_fns_.alloc ? \
+ evthread_lock_fns_.alloc(locktype) : NULL)
/** Free a given lock, if it is present and locking is enabled. */
#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
do { \
void *_lock_tmp_ = (lockvar); \
- if (_lock_tmp_ && _evthread_lock_fns.free) \
- _evthread_lock_fns.free(_lock_tmp_, (locktype)); \
+ if (_lock_tmp_ && evthread_lock_fns_.free) \
+ evthread_lock_fns_.free(_lock_tmp_, (locktype)); \
} while (0)
/** Acquire a lock. */
#define EVLOCK_LOCK(lockvar,mode) \
do { \
if (lockvar) \
- _evthread_lock_fns.lock(mode, lockvar); \
+ evthread_lock_fns_.lock(mode, lockvar); \
} while (0)
/** Release a lock */
#define EVLOCK_UNLOCK(lockvar,mode) \
do { \
if (lockvar) \
- _evthread_lock_fns.unlock(mode, lockvar); \
+ evthread_lock_fns_.unlock(mode, lockvar); \
} while (0)
/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
-#define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \
+#define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \
do { \
if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
void *tmp = lockvar1; \
* locked and held by us. */
#define EVLOCK_ASSERT_LOCKED(lock) \
do { \
- if ((lock) && _evthread_lock_debugging_enabled) { \
- EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \
+ if ((lock) && evthread_lock_debugging_enabled_) { \
+ EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
} \
} while (0)
static inline int
EVLOCK_TRY_LOCK(void *lock)
{
- if (lock && _evthread_lock_fns.lock) {
- int r = _evthread_lock_fns.lock(EVTHREAD_TRY, lock);
+ if (lock && evthread_lock_fns_.lock) {
+ int r = evthread_lock_fns_.lock(EVTHREAD_TRY, lock);
return !r;
} else {
/* Locking is disabled either globally or for this thing;
/** Allocate a new condition variable and store it in the void *, condvar */
#define EVTHREAD_ALLOC_COND(condvar) \
do { \
- (condvar) = _evthread_cond_fns.alloc_condition ? \
- _evthread_cond_fns.alloc_condition(0) : NULL; \
+ (condvar) = evthread_cond_fns_.alloc_condition ? \
+ evthread_cond_fns_.alloc_condition(0) : NULL; \
} while (0)
/** Deallocate and free a condition variable in condvar */
#define EVTHREAD_FREE_COND(cond) \
do { \
if (cond) \
- _evthread_cond_fns.free_condition((cond)); \
+ evthread_cond_fns_.free_condition((cond)); \
} while (0)
/** Signal one thread waiting on cond */
#define EVTHREAD_COND_SIGNAL(cond) \
- ( (cond) ? _evthread_cond_fns.signal_condition((cond), 0) : 0 )
+ ( (cond) ? evthread_cond_fns_.signal_condition((cond), 0) : 0 )
/** Signal all threads waiting on cond */
#define EVTHREAD_COND_BROADCAST(cond) \
- ( (cond) ? _evthread_cond_fns.signal_condition((cond), 1) : 0 )
+ ( (cond) ? evthread_cond_fns_.signal_condition((cond), 1) : 0 )
/** Wait until the condition 'cond' is signalled. Must be called while
* holding 'lock'. The lock will be released until the condition is
* signalled, at which point it will be acquired again. Returns 0 for
* success, -1 for failure. */
#define EVTHREAD_COND_WAIT(cond, lock) \
- ( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), NULL) : 0 )
+ ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), NULL) : 0 )
/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
* on timeout. */
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
- ( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), (tv)) : 0 )
+ ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), (tv)) : 0 )
/** True iff locking functions have been configured. */
#define EVTHREAD_LOCKING_ENABLED() \
- (_evthread_lock_fns.lock != NULL)
+ (evthread_lock_fns_.lock != NULL)
#elif ! defined(EVENT__DISABLE_THREAD_SUPPORT)
-unsigned long _evthreadimpl_get_id(void);
-int _evthreadimpl_is_lock_debugging_enabled(void);
-void *_evthreadimpl_lock_alloc(unsigned locktype);
-void _evthreadimpl_lock_free(void *lock, unsigned locktype);
-int _evthreadimpl_lock_lock(unsigned mode, void *lock);
-int _evthreadimpl_lock_unlock(unsigned mode, void *lock);
-void *_evthreadimpl_cond_alloc(unsigned condtype);
-void _evthreadimpl_cond_free(void *cond);
-int _evthreadimpl_cond_signal(void *cond, int broadcast);
-int _evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv);
-int _evthreadimpl_locking_enabled(void);
-
-#define EVTHREAD_GET_ID() _evthreadimpl_get_id()
+unsigned long evthreadimpl_get_id_(void);
+int evthreadimpl_is_lock_debugging_enabled_(void);
+void *evthreadimpl_lock_alloc_(unsigned locktype);
+void evthreadimpl_lock_free_(void *lock, unsigned locktype);
+int evthreadimpl_lock_lock_(unsigned mode, void *lock);
+int evthreadimpl_lock_unlock_(unsigned mode, void *lock);
+void *evthreadimpl_cond_alloc_(unsigned condtype);
+void evthreadimpl_cond_free_(void *cond);
+int evthreadimpl_cond_signal_(void *cond, int broadcast);
+int evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv);
+int evthreadimpl_locking_enabled_(void);
+
+#define EVTHREAD_GET_ID() evthreadimpl_get_id_()
#define EVBASE_IN_THREAD(base) \
- ((base)->th_owner_id == _evthreadimpl_get_id())
+ ((base)->th_owner_id == evthreadimpl_get_id_())
#define EVBASE_NEED_NOTIFY(base) \
((base)->running_loop && \
- ((base)->th_owner_id != _evthreadimpl_get_id()))
+ ((base)->th_owner_id != evthreadimpl_get_id_()))
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
- ((lockvar) = _evthreadimpl_lock_alloc(locktype))
+ ((lockvar) = evthreadimpl_lock_alloc_(locktype))
#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
do { \
void *_lock_tmp_ = (lockvar); \
if (_lock_tmp_) \
- _evthreadimpl_lock_free(_lock_tmp_, (locktype)); \
+ evthreadimpl_lock_free_(_lock_tmp_, (locktype)); \
} while (0)
/** Acquire a lock. */
#define EVLOCK_LOCK(lockvar,mode) \
do { \
if (lockvar) \
- _evthreadimpl_lock_lock(mode, lockvar); \
+ evthreadimpl_lock_lock_(mode, lockvar); \
} while (0)
/** Release a lock */
#define EVLOCK_UNLOCK(lockvar,mode) \
do { \
if (lockvar) \
- _evthreadimpl_lock_unlock(mode, lockvar); \
+ evthreadimpl_lock_unlock_(mode, lockvar); \
} while (0)
/** Lock an event_base, if it is set up for locking. Acquires the lock
* locked and held by us. */
#define EVLOCK_ASSERT_LOCKED(lock) \
do { \
- if ((lock) && _evthreadimpl_is_lock_debugging_enabled()) { \
- EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \
+ if ((lock) && evthreadimpl_is_lock_debugging_enabled_()) { \
+ EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
} \
} while (0)
EVLOCK_TRY_LOCK(void *lock)
{
if (lock) {
- int r = _evthreadimpl_lock_lock(EVTHREAD_TRY, lock);
+ int r = evthreadimpl_lock_lock_(EVTHREAD_TRY, lock);
return !r;
} else {
/* Locking is disabled either globally or for this thing;
/** Allocate a new condition variable and store it in the void *, condvar */
#define EVTHREAD_ALLOC_COND(condvar) \
do { \
- (condvar) = _evthreadimpl_cond_alloc(0); \
+ (condvar) = evthreadimpl_cond_alloc_(0); \
} while (0)
/** Deallocate and free a condition variable in condvar */
#define EVTHREAD_FREE_COND(cond) \
do { \
if (cond) \
- _evthreadimpl_cond_free((cond)); \
+ evthreadimpl_cond_free_((cond)); \
} while (0)
/** Signal one thread waiting on cond */
#define EVTHREAD_COND_SIGNAL(cond) \
- ( (cond) ? _evthreadimpl_cond_signal((cond), 0) : 0 )
+ ( (cond) ? evthreadimpl_cond_signal_((cond), 0) : 0 )
/** Signal all threads waiting on cond */
#define EVTHREAD_COND_BROADCAST(cond) \
- ( (cond) ? _evthreadimpl_cond_signal((cond), 1) : 0 )
+ ( (cond) ? evthreadimpl_cond_signal_((cond), 1) : 0 )
/** Wait until the condition 'cond' is signalled. Must be called while
* holding 'lock'. The lock will be released until the condition is
* signalled, at which point it will be acquired again. Returns 0 for
* success, -1 for failure. */
#define EVTHREAD_COND_WAIT(cond, lock) \
- ( (cond) ? _evthreadimpl_cond_wait((cond), (lock), NULL) : 0 )
+ ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), NULL) : 0 )
/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
* on timeout. */
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
- ( (cond) ? _evthreadimpl_cond_wait((cond), (lock), (tv)) : 0 )
+ ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), (tv)) : 0 )
#define EVTHREAD_LOCKING_ENABLED() \
- (_evthreadimpl_locking_enabled())
+ (evthreadimpl_locking_enabled_())
#else /* EVENT__DISABLE_THREAD_SUPPORT */
#define EVTHREAD_GET_ID() 1
-#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) _EVUTIL_NIL_STMT
-#define EVTHREAD_FREE_LOCK(lockvar, locktype) _EVUTIL_NIL_STMT
+#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
+#define EVTHREAD_FREE_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
-#define EVLOCK_LOCK(lockvar, mode) _EVUTIL_NIL_STMT
-#define EVLOCK_UNLOCK(lockvar, mode) _EVUTIL_NIL_STMT
-#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) _EVUTIL_NIL_STMT
-#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) _EVUTIL_NIL_STMT
+#define EVLOCK_LOCK(lockvar, mode) EVUTIL_NIL_STMT_
+#define EVLOCK_UNLOCK(lockvar, mode) EVUTIL_NIL_STMT_
+#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
+#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
#define EVBASE_IN_THREAD(base) 1
#define EVBASE_NEED_NOTIFY(base) 0
-#define EVBASE_ACQUIRE_LOCK(base, lock) _EVUTIL_NIL_STMT
-#define EVBASE_RELEASE_LOCK(base, lock) _EVUTIL_NIL_STMT
-#define EVLOCK_ASSERT_LOCKED(lock) _EVUTIL_NIL_STMT
+#define EVBASE_ACQUIRE_LOCK(base, lock) EVUTIL_NIL_STMT_
+#define EVBASE_RELEASE_LOCK(base, lock) EVUTIL_NIL_STMT_
+#define EVLOCK_ASSERT_LOCKED(lock) EVUTIL_NIL_STMT_
#define EVLOCK_TRY_LOCK(lock) 1
-#define EVTHREAD_ALLOC_COND(condvar) _EVUTIL_NIL_STMT
-#define EVTHREAD_FREE_COND(cond) _EVUTIL_NIL_STMT
-#define EVTHREAD_COND_SIGNAL(cond) _EVUTIL_NIL_STMT
-#define EVTHREAD_COND_BROADCAST(cond) _EVUTIL_NIL_STMT
-#define EVTHREAD_COND_WAIT(cond, lock) _EVUTIL_NIL_STMT
-#define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) _EVUTIL_NIL_STMT
+#define EVTHREAD_ALLOC_COND(condvar) EVUTIL_NIL_STMT_
+#define EVTHREAD_FREE_COND(cond) EVUTIL_NIL_STMT_
+#define EVTHREAD_COND_SIGNAL(cond) EVUTIL_NIL_STMT_
+#define EVTHREAD_COND_BROADCAST(cond) EVUTIL_NIL_STMT_
+#define EVTHREAD_COND_WAIT(cond, lock) EVUTIL_NIL_STMT_
+#define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) EVUTIL_NIL_STMT_
#define EVTHREAD_LOCKING_ENABLED() 0
/* This code is shared between both lock impls */
#if ! defined(EVENT__DISABLE_THREAD_SUPPORT)
/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
-#define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \
+#define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \
do { \
if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
void *tmp = lockvar1; \
do { \
void *_lock1_tmplock = (lock1); \
void *_lock2_tmplock = (lock2); \
- _EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
+ EVLOCK_SORTLOCKS_(_lock1_tmplock,_lock2_tmplock); \
EVLOCK_LOCK(_lock1_tmplock,mode1); \
if (_lock2_tmplock != _lock1_tmplock) \
EVLOCK_LOCK(_lock2_tmplock,mode2); \
do { \
void *_lock1_tmplock = (lock1); \
void *_lock2_tmplock = (lock2); \
- _EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
+ EVLOCK_SORTLOCKS_(_lock1_tmplock,_lock2_tmplock); \
if (_lock2_tmplock != _lock1_tmplock) \
EVLOCK_UNLOCK(_lock2_tmplock,mode2); \
EVLOCK_UNLOCK(_lock1_tmplock,mode1); \
} while (0)
-int _evthread_is_debug_lock_held(void *lock);
-void *_evthread_debug_get_real_lock(void *lock);
+int evthread_is_debug_lock_held_(void *lock);
+void *evthread_debug_get_real_lock_(void *lock);
void *evthread_setup_global_lock_(void *lock_, unsigned locktype,
int enable_locks);
#endif
/* globals */
-GLOBAL int _evthread_lock_debugging_enabled = 0;
-GLOBAL struct evthread_lock_callbacks _evthread_lock_fns = {
+GLOBAL int evthread_lock_debugging_enabled_ = 0;
+GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
0, 0, NULL, NULL, NULL, NULL
};
-GLOBAL unsigned long (*_evthread_id_fn)(void) = NULL;
-GLOBAL struct evthread_condition_callbacks _evthread_cond_fns = {
+GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
+GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
0, NULL, NULL, NULL, NULL
};
/* Used for debugging */
-static struct evthread_lock_callbacks _original_lock_fns = {
+static struct evthread_lock_callbacks original_lock_fns_ = {
0, 0, NULL, NULL, NULL, NULL
};
-static struct evthread_condition_callbacks _original_cond_fns = {
+static struct evthread_condition_callbacks original_cond_fns_ = {
0, NULL, NULL, NULL, NULL
};
void
evthread_set_id_callback(unsigned long (*id_fn)(void))
{
- _evthread_id_fn = id_fn;
+ evthread_id_fn_ = id_fn;
}
int
evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
{
struct evthread_lock_callbacks *target =
- _evthread_lock_debugging_enabled
- ? &_original_lock_fns : &_evthread_lock_fns;
+ evthread_lock_debugging_enabled_
+ ? &original_lock_fns_ : &evthread_lock_fns_;
if (!cbs) {
if (target->alloc)
event_warnx("Trying to disable lock functions after "
"they have been set up will probaby not work.");
- memset(target, 0, sizeof(_evthread_lock_fns));
+ memset(target, 0, sizeof(evthread_lock_fns_));
return 0;
}
if (target->alloc) {
return -1;
}
if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
- memcpy(target, cbs, sizeof(_evthread_lock_fns));
+ memcpy(target, cbs, sizeof(evthread_lock_fns_));
return event_global_setup_locks_(1);
} else {
return -1;
evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
{
struct evthread_condition_callbacks *target =
- _evthread_lock_debugging_enabled
- ? &_original_cond_fns : &_evthread_cond_fns;
+ evthread_lock_debugging_enabled_
+ ? &original_cond_fns_ : &evthread_cond_fns_;
if (!cbs) {
if (target->alloc_condition)
event_warnx("Trying to disable condition functions "
"after they have been set up will probaby not "
"work.");
- memset(target, 0, sizeof(_evthread_cond_fns));
+ memset(target, 0, sizeof(evthread_cond_fns_));
return 0;
}
if (target->alloc_condition) {
}
if (cbs->alloc_condition && cbs->free_condition &&
cbs->signal_condition && cbs->wait_condition) {
- memcpy(target, cbs, sizeof(_evthread_cond_fns));
+ memcpy(target, cbs, sizeof(evthread_cond_fns_));
}
- if (_evthread_lock_debugging_enabled) {
- _evthread_cond_fns.alloc_condition = cbs->alloc_condition;
- _evthread_cond_fns.free_condition = cbs->free_condition;
- _evthread_cond_fns.signal_condition = cbs->signal_condition;
+ if (evthread_lock_debugging_enabled_) {
+ evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
+ evthread_cond_fns_.free_condition = cbs->free_condition;
+ evthread_cond_fns_.signal_condition = cbs->signal_condition;
}
return 0;
}
struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
if (!result)
return NULL;
- if (_original_lock_fns.alloc) {
- if (!(result->lock = _original_lock_fns.alloc(
+ if (original_lock_fns_.alloc) {
+ if (!(result->lock = original_lock_fns_.alloc(
locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
mm_free(result);
return NULL;
EVUTIL_ASSERT(lock->count == 0);
EVUTIL_ASSERT(locktype == lock->locktype);
EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
- if (_original_lock_fns.free) {
- _original_lock_fns.free(lock->lock,
+ if (original_lock_fns_.free) {
+ original_lock_fns_.free(lock->lock,
lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
}
lock->lock = NULL;
++lock->count;
if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
EVUTIL_ASSERT(lock->count == 1);
- if (_evthread_id_fn) {
+ if (evthread_id_fn_) {
unsigned long me;
- me = _evthread_id_fn();
+ me = evthread_id_fn_();
if (lock->count > 1)
EVUTIL_ASSERT(lock->held_by == me);
lock->held_by = me;
EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
else
EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
- if (_original_lock_fns.lock)
- res = _original_lock_fns.lock(mode, lock->lock);
+ if (original_lock_fns_.lock)
+ res = original_lock_fns_.lock(mode, lock->lock);
if (!res) {
evthread_debug_lock_mark_locked(mode, lock);
}
EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
else
EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
- if (_evthread_id_fn) {
+ if (evthread_id_fn_) {
unsigned long me;
- me = _evthread_id_fn();
+ me = evthread_id_fn_();
EVUTIL_ASSERT(lock->held_by == me);
if (lock->count == 1)
lock->held_by = 0;
struct debug_lock *lock = lock_;
int res = 0;
evthread_debug_lock_mark_unlocked(mode, lock);
- if (_original_lock_fns.unlock)
- res = _original_lock_fns.unlock(mode, lock->lock);
+ if (original_lock_fns_.unlock)
+ res = original_lock_fns_.unlock(mode, lock->lock);
return res;
}
EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
EVLOCK_ASSERT_LOCKED(_lock);
evthread_debug_lock_mark_unlocked(0, lock);
- r = _original_cond_fns.wait_condition(_cond, lock->lock, tv);
+ r = original_cond_fns_.wait_condition(_cond, lock->lock, tv);
evthread_debug_lock_mark_locked(0, lock);
return r;
}
debug_lock_lock,
debug_lock_unlock
};
- if (_evthread_lock_debugging_enabled)
+ if (evthread_lock_debugging_enabled_)
return;
- memcpy(&_original_lock_fns, &_evthread_lock_fns,
+ memcpy(&original_lock_fns_, &evthread_lock_fns_,
sizeof(struct evthread_lock_callbacks));
- memcpy(&_evthread_lock_fns, &cbs,
+ memcpy(&evthread_lock_fns_, &cbs,
sizeof(struct evthread_lock_callbacks));
- memcpy(&_original_cond_fns, &_evthread_cond_fns,
+ memcpy(&original_cond_fns_, &evthread_cond_fns_,
sizeof(struct evthread_condition_callbacks));
- _evthread_cond_fns.wait_condition = debug_cond_wait;
- _evthread_lock_debugging_enabled = 1;
+ evthread_cond_fns_.wait_condition = debug_cond_wait;
+ evthread_lock_debugging_enabled_ = 1;
/* XXX return value should get checked. */
event_global_setup_locks_(0);
}
int
-_evthread_is_debug_lock_held(void *lock_)
+evthread_is_debug_lock_held_(void *lock_)
{
struct debug_lock *lock = lock_;
if (! lock->count)
return 0;
- if (_evthread_id_fn) {
- unsigned long me = _evthread_id_fn();
+ if (evthread_id_fn_) {
+ unsigned long me = evthread_id_fn_();
if (lock->held_by != me)
return 0;
}
}
void *
-_evthread_debug_get_real_lock(void *lock_)
+evthread_debug_get_real_lock_(void *lock_)
{
struct debug_lock *lock = lock_;
return lock->lock;
3) we're turning on locking; debugging is not on.
4) we're turning on locking; debugging is on. */
- if (!enable_locks && _original_lock_fns.alloc == NULL) {
+ if (!enable_locks && original_lock_fns_.alloc == NULL) {
/* Case 1: allocate a debug lock. */
EVUTIL_ASSERT(lock_ == NULL);
return debug_lock_alloc(locktype);
- } else if (!enable_locks && _original_lock_fns.alloc != NULL) {
+ } else if (!enable_locks && original_lock_fns_.alloc != NULL) {
/* Case 2: wrap the lock in a debug lock. */
struct debug_lock *lock;
EVUTIL_ASSERT(lock_ != NULL);
if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
/* We can't wrap it: We need a recursive lock */
- _original_lock_fns.free(lock_, locktype);
+ original_lock_fns_.free(lock_, locktype);
return debug_lock_alloc(locktype);
}
lock = mm_malloc(sizeof(struct debug_lock));
if (!lock) {
- _original_lock_fns.free(lock_, locktype);
+ original_lock_fns_.free(lock_, locktype);
return NULL;
}
lock->lock = lock_;
lock->count = 0;
lock->held_by = 0;
return lock;
- } else if (enable_locks && ! _evthread_lock_debugging_enabled) {
+ } else if (enable_locks && ! evthread_lock_debugging_enabled_) {
/* Case 3: allocate a regular lock */
EVUTIL_ASSERT(lock_ == NULL);
- return _evthread_lock_fns.alloc(locktype);
+ return evthread_lock_fns_.alloc(locktype);
} else {
/* Case 4: Fill in a debug lock with a real lock */
struct debug_lock *lock = lock_;
EVUTIL_ASSERT(enable_locks &&
- _evthread_lock_debugging_enabled);
+ evthread_lock_debugging_enabled_);
EVUTIL_ASSERT(lock->locktype == locktype);
EVUTIL_ASSERT(lock->lock == NULL);
- lock->lock = _original_lock_fns.alloc(
+ lock->lock = original_lock_fns_.alloc(
locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
if (!lock->lock) {
lock->count = -200;
#ifndef EVTHREAD_EXPOSE_STRUCTS
unsigned long
-_evthreadimpl_get_id()
+evthreadimpl_get_id_()
{
- return _evthread_id_fn ? _evthread_id_fn() : 1;
+ return evthread_id_fn_ ? evthread_id_fn_() : 1;
}
void *
-_evthreadimpl_lock_alloc(unsigned locktype)
+evthreadimpl_lock_alloc_(unsigned locktype)
{
- return _evthread_lock_fns.alloc ?
- _evthread_lock_fns.alloc(locktype) : NULL;
+ return evthread_lock_fns_.alloc ?
+ evthread_lock_fns_.alloc(locktype) : NULL;
}
void
-_evthreadimpl_lock_free(void *lock, unsigned locktype)
+evthreadimpl_lock_free_(void *lock, unsigned locktype)
{
- if (_evthread_lock_fns.free)
- _evthread_lock_fns.free(lock, locktype);
+ if (evthread_lock_fns_.free)
+ evthread_lock_fns_.free(lock, locktype);
}
int
-_evthreadimpl_lock_lock(unsigned mode, void *lock)
+evthreadimpl_lock_lock_(unsigned mode, void *lock)
{
- if (_evthread_lock_fns.lock)
- return _evthread_lock_fns.lock(mode, lock);
+ if (evthread_lock_fns_.lock)
+ return evthread_lock_fns_.lock(mode, lock);
else
return 0;
}
int
-_evthreadimpl_lock_unlock(unsigned mode, void *lock)
+evthreadimpl_lock_unlock_(unsigned mode, void *lock)
{
- if (_evthread_lock_fns.unlock)
- return _evthread_lock_fns.unlock(mode, lock);
+ if (evthread_lock_fns_.unlock)
+ return evthread_lock_fns_.unlock(mode, lock);
else
return 0;
}
void *
-_evthreadimpl_cond_alloc(unsigned condtype)
+evthreadimpl_cond_alloc_(unsigned condtype)
{
- return _evthread_cond_fns.alloc_condition ?
- _evthread_cond_fns.alloc_condition(condtype) : NULL;
+ return evthread_cond_fns_.alloc_condition ?
+ evthread_cond_fns_.alloc_condition(condtype) : NULL;
}
void
-_evthreadimpl_cond_free(void *cond)
+evthreadimpl_cond_free_(void *cond)
{
- if (_evthread_cond_fns.free_condition)
- _evthread_cond_fns.free_condition(cond);
+ if (evthread_cond_fns_.free_condition)
+ evthread_cond_fns_.free_condition(cond);
}
int
-_evthreadimpl_cond_signal(void *cond, int broadcast)
+evthreadimpl_cond_signal_(void *cond, int broadcast)
{
- if (_evthread_cond_fns.signal_condition)
- return _evthread_cond_fns.signal_condition(cond, broadcast);
+ if (evthread_cond_fns_.signal_condition)
+ return evthread_cond_fns_.signal_condition(cond, broadcast);
else
return 0;
}
int
-_evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv)
+evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
{
- if (_evthread_cond_fns.wait_condition)
- return _evthread_cond_fns.wait_condition(cond, lock, tv);
+ if (evthread_cond_fns_.wait_condition)
+ return evthread_cond_fns_.wait_condition(cond, lock, tv);
else
return 0;
}
int
-_evthreadimpl_is_lock_debugging_enabled(void)
+evthreadimpl_is_lock_debugging_enabled_(void)
{
- return _evthread_lock_debugging_enabled;
+ return evthread_lock_debugging_enabled_;
}
int
-_evthreadimpl_locking_enabled(void)
+evthreadimpl_locking_enabled_(void)
{
- return _evthread_lock_fns.lock != NULL;
+ return evthread_lock_fns_.lock != NULL;
}
#endif
}
long
-_evutil_weakrand(void)
+evutil_weakrand_(void)
{
#ifdef _WIN32
return rand();
#define ssize_t _EVENT_SSIZE_t
#endif
#define ARC4RANDOM_EXPORT static
-#define _ARC4_LOCK() EVLOCK_LOCK(arc4rand_lock, 0)
-#define _ARC4_UNLOCK() EVLOCK_UNLOCK(arc4rand_lock, 0)
+#define ARC4_LOCK_() EVLOCK_LOCK(arc4rand_lock, 0)
+#define ARC4_UNLOCK_() EVLOCK_UNLOCK(arc4rand_lock, 0)
#ifndef EVENT__DISABLE_THREAD_SUPPORT
static void *arc4rand_lock;
#endif
{
int val;
- _ARC4_LOCK();
+ ARC4_LOCK_();
if (!arc4_seeded_ok)
arc4_stir();
val = arc4_seeded_ok ? 0 : -1;
- _ARC4_UNLOCK();
+ ARC4_UNLOCK_();
return val;
}
struct evhttp_request;
/* Indicates an unknown request method. */
-#define _EVHTTP_REQ_UNKNOWN (1<<15)
+#define EVHTTP_REQ_UNKNOWN_ (1<<15)
enum evhttp_connection_state {
EVCON_DISCONNECTED, /**< not currently connected not trying either*/
return (-1);
method_len = (uri - method) - 1;
- type = _EVHTTP_REQ_UNKNOWN;
+ type = EVHTTP_REQ_UNKNOWN_;
/* First line */
switch (method_len) {
break;
} /* switch */
- if (type == _EVHTTP_REQ_UNKNOWN) {
+ if (type == EVHTTP_REQ_UNKNOWN_) {
event_debug(("%s: bad method %s on request %p from %s",
__func__, method, req, req->remote_host));
/* No error yet; we'll give a better error later when
void
evhttp_uri_free(struct evhttp_uri *uri)
{
-#define _URI_FREE_STR(f) \
+#define URI_FREE_STR_(f) \
if (uri->f) { \
mm_free(uri->f); \
}
- _URI_FREE_STR(scheme);
- _URI_FREE_STR(userinfo);
- _URI_FREE_STR(host);
- _URI_FREE_STR(path);
- _URI_FREE_STR(query);
- _URI_FREE_STR(fragment);
+ URI_FREE_STR_(scheme);
+ URI_FREE_STR_(userinfo);
+ URI_FREE_STR_(host);
+ URI_FREE_STR_(path);
+ URI_FREE_STR_(query);
+ URI_FREE_STR_(fragment);
mm_free(uri);
-#undef _URI_FREE_STR
+#undef URI_FREE_STR_
}
char *
size_t joined_size = 0;
char *output = NULL;
-#define _URI_ADD(f) evbuffer_add(tmp, uri->f, strlen(uri->f))
+#define URI_ADD_(f) evbuffer_add(tmp, uri->f, strlen(uri->f))
if (!uri || !buf || !limit)
return NULL;
return NULL;
if (uri->scheme) {
- _URI_ADD(scheme);
+ URI_ADD_(scheme);
evbuffer_add(tmp, ":", 1);
}
if (uri->host) {
evbuffer_add(tmp, "//", 2);
if (uri->userinfo)
evbuffer_add_printf(tmp,"%s@", uri->userinfo);
- _URI_ADD(host);
+ URI_ADD_(host);
if (uri->port >= 0)
evbuffer_add_printf(tmp,":%d", uri->port);
}
if (uri->path)
- _URI_ADD(path);
+ URI_ADD_(path);
if (uri->query) {
evbuffer_add(tmp, "?", 1);
- _URI_ADD(query);
+ URI_ADD_(query);
}
if (uri->fragment) {
evbuffer_add(tmp, "#", 1);
- _URI_ADD(fragment);
+ URI_ADD_(fragment);
}
evbuffer_add(tmp, "\0", 1); /* NUL */
evbuffer_free(tmp);
return output;
-#undef _URI_ADD
+#undef URI_ADD_
}
const char *
return uri->fragment;
}
-#define _URI_SET_STR(f) do { \
+#define URI_SET_STR_(f) do { \
if (uri->f) \
mm_free(uri->f); \
if (f) { \
if (scheme && !scheme_ok(scheme, scheme+strlen(scheme)))
return -1;
- _URI_SET_STR(scheme);
+ URI_SET_STR_(scheme);
return 0;
}
int
{
if (userinfo && !userinfo_ok(userinfo, userinfo+strlen(userinfo)))
return -1;
- _URI_SET_STR(userinfo);
+ URI_SET_STR_(userinfo);
return 0;
}
int
}
}
- _URI_SET_STR(host);
+ URI_SET_STR_(host);
return 0;
}
int
if (path && end_of_cpath(path, PART_PATH, uri->flags) != path+strlen(path))
return -1;
- _URI_SET_STR(path);
+ URI_SET_STR_(path);
return 0;
}
int
{
if (query && end_of_cpath(query, PART_QUERY, uri->flags) != query+strlen(query))
return -1;
- _URI_SET_STR(query);
+ URI_SET_STR_(query);
return 0;
}
int
{
if (fragment && end_of_cpath(fragment, PART_FRAGMENT, uri->flags) != fragment+strlen(fragment))
return -1;
- _URI_SET_STR(fragment);
+ URI_SET_STR_(fragment);
return 0;
}
struct {
void *chain;
size_t pos_in_chain;
- } _internal;
+ } internal_;
};
/** Describes a single extent of memory inside an evbuffer. Used for
#ifdef EVENT__HAVE_SYS_UIO_H
#define evbuffer_iovec iovec
/* Internal use -- defined only if we are using the native struct iovec */
-#define _EVBUFFER_IOVEC_IS_NATIVE
+#define EVBUFFER_IOVEC_IS_NATIVE_
#else
struct evbuffer_iovec {
/** The start of the extent of memory. */
/** @name Log severities
*/
/**@{*/
-#define _EVENT_LOG_DEBUG 0
-#define _EVENT_LOG_MSG 1
-#define _EVENT_LOG_WARN 2
-#define _EVENT_LOG_ERR 3
+#define EVENT_LOG_DEBUG 0
+#define EVENT_LOG_MSG 1
+#define EVENT_LOG_WARN 2
+#define EVENT_LOG_ERR 3
/**@}*/
/**
Redirect Libevent's log messages.
@param cb a function taking two arguments: an integer severity between
- _EVENT_LOG_DEBUG and _EVENT_LOG_ERR, and a string. If cb is NULL,
+ EVENT_LOG_DEBUG and EVENT_LOG_ERR, and a string. If cb is NULL,
then the default log is used.
NOTE: The function you provide *must not* call any other libevent
something is wrong with your program, or with Libevent: any subsequent calls
to Libevent may result in undefined behavior.
- Libevent will (almost) always log an _EVENT_LOG_ERR message before calling
+ Libevent will (almost) always log an EVENT_LOG_ERR message before calling
this function; look at the last log message to see why Libevent has died.
*/
void event_set_fatal_callback(event_fatal_cb cb);
/* Fix so that people don't have to run with <sys/queue.h> */
#ifndef TAILQ_ENTRY
-#define _EVENT_DEFINED_TQENTRY
+#define EVENT_DEFINED_TQENTRY_
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
#endif /* !TAILQ_ENTRY */
#ifndef TAILQ_HEAD
-#define _EVENT_DEFINED_TQHEAD
+#define EVENT_DEFINED_TQHEAD_
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; \
/* Fix so that people don't have to run with <sys/queue.h> */
#ifndef LIST_ENTRY
-#define _EVENT_DEFINED_LISTENTRY
+#define EVENT_DEFINED_LISTENTRY_
#define LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
/* Allows deletes in callback */
short *ev_pncalls;
} ev_signal;
- } _ev;
+ } ev_;
short ev_events;
short ev_res; /* result passed to event callback */
TAILQ_HEAD (event_list, event);
-#ifdef _EVENT_DEFINED_TQENTRY
+#ifdef EVENT_DEFINED_TQENTRY_
#undef TAILQ_ENTRY
#endif
-#ifdef _EVENT_DEFINED_TQHEAD
+#ifdef EVENT_DEFINED_TQHEAD_
#undef TAILQ_HEAD
#endif
-#ifdef _EVENT_DEFINED_LISTENTRY
+#ifdef EVENT_DEFINED_LISTENTRY_
#undef LIST_ENTRY
struct event_dlist;
-#undef _EVENT_DEFINED_LISTENTRY
+#undef EVENT_DEFINED_LISTENTRY_
#else
LIST_HEAD (event_dlist, event);
-#endif /* _EVENT_DEFINED_LISTENTRY */
+#endif /* EVENT_DEFINED_LISTENTRY_ */
#ifdef __cplusplus
}
/* Fix so that people don't have to run with <sys/queue.h> */
/* XXXX This code is duplicated with event_struct.h */
#ifndef TAILQ_ENTRY
-#define _EVENT_DEFINED_TQENTRY
+#define EVENT_DEFINED_TQENTRY_
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
#endif /* !TAILQ_ENTRY */
#ifndef TAILQ_HEAD
-#define _EVENT_DEFINED_TQHEAD
+#define EVENT_DEFINED_TQHEAD_
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; \
TAILQ_HEAD (evkeyvalq, evkeyval);
/* XXXX This code is duplicated with event_struct.h */
-#ifdef _EVENT_DEFINED_TQENTRY
+#ifdef EVENT_DEFINED_TQENTRY_
#undef TAILQ_ENTRY
#endif
-#ifdef _EVENT_DEFINED_TQHEAD
+#ifdef EVENT_DEFINED_TQHEAD_
#undef TAILQ_HEAD
#endif
@name Limits for integer types
These macros hold the largest or smallest values possible for the
- ev_[u]int*_t types.
+ ev_[u]int*t_ types.
@{
*/
struct evbuffer *evbuffer_overlapped_new(evutil_socket_t fd);
/** XXXX Document (nickm) */
-evutil_socket_t _evbuffer_overlapped_get_fd(struct evbuffer *buf);
+evutil_socket_t evbuffer_overlapped_get_fd_(struct evbuffer *buf);
-void _evbuffer_overlapped_set_fd(struct evbuffer *buf, evutil_socket_t fd);
+void evbuffer_overlapped_set_fd_(struct evbuffer *buf, evutil_socket_t fd);
/** Start reading data onto the end of an overlapped evbuffer.
* if the handler for SIGCHLD is SIG_IGN, the system reaps
* zombie processes for us, and we don't get any notification.
* This appears to be the only signal with this quirk. */
- if (_evsig_set_handler(base, nsignal,
+ if (evsig_set_handler_(base, nsignal,
nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1)
return (-1);
if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
return (-1);
- if (_evsig_restore_handler(base, nsignal) == -1)
+ if (evsig_restore_handler_(base, nsignal) == -1)
return (-1);
return (0);
#define EV_NORETURN
#endif
-#define _EVENT_ERR_ABORT ((int)0xdeaddead)
+#define EVENT_ERR_ABORT_ ((int)0xdeaddead)
#define USE_GLOBAL_FOR_DEBUG_LOGGING
#ifdef EVENT_DEBUG_LOGGING_ENABLED
#ifdef USE_GLOBAL_FOR_DEBUG_LOGGING
-extern ev_uint32_t _event_debug_logging_mask;
-#define _event_debug_get_logging_mask() (_event_debug_logging_mask)
+extern ev_uint32_t event_debug_logging_mask_;
+#define event_debug_get_logging_mask_() (event_debug_logging_mask_)
#else
-ev_uint32_t _event_debug_get_logging_mask(void);
+ev_uint32_t event_debug_get_logging_mask_(void);
#endif
#else
-#define _event_debug_get_logging_mask() (0)
+#define event_debug_get_logging_mask_() (0)
#endif
void event_err(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN;
void event_errx(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN;
void event_warnx(const char *fmt, ...) EV_CHECK_FMT(1,2);
void event_msgx(const char *fmt, ...) EV_CHECK_FMT(1,2);
-void _event_debugx(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void event_debugx_(const char *fmt, ...) EV_CHECK_FMT(1,2);
#ifdef EVENT_DEBUG_LOGGING_ENABLED
#define event_debug(x) do { \
- if (_event_debug_get_logging_mask()) { \
- _event_debugx x; \
+ if (event_debug_get_logging_mask_()) { \
+ event_debugx_ x; \
} \
} while (0)
#else
#include "log-internal.h"
-static void _warn_helper(int severity, const char *errstr, const char *fmt,
+static void warn_helper_(int severity, const char *errstr, const char *fmt,
va_list ap);
static void event_log(int severity, const char *msg);
static void event_exit(int errcode) EV_NORETURN;
#endif
#ifdef USE_GLOBAL_FOR_DEBUG_LOGGING
-ev_uint32_t _event_debug_logging_mask = DEFAULT_MASK;
+ev_uint32_t event_debug_logging_mask_ = DEFAULT_MASK;
#else
-static ev_uint32_t _event_debug_logging_mask = DEFAULT_MASK;
+static ev_uint32_t event_debug_logging_mask_ = DEFAULT_MASK;
ev_uint32_t
-_event_debug_get_logging_mask(void)
+event_debug_get_logging_mask_(void)
{
- return _event_debug_logging_mask;
+ return event_debug_logging_mask_;
}
#endif
#endif /* EVENT_DEBUG_LOGGING_ENABLED */
if (fatal_fn) {
fatal_fn(errcode);
exit(errcode); /* should never be reached */
- } else if (errcode == _EVENT_ERR_ABORT)
+ } else if (errcode == EVENT_ERR_ABORT_)
abort();
else
exit(errcode);
va_list ap;
va_start(ap, fmt);
- _warn_helper(_EVENT_LOG_ERR, strerror(errno), fmt, ap);
+ warn_helper_(EVENT_LOG_ERR, strerror(errno), fmt, ap);
va_end(ap);
event_exit(eval);
}
va_list ap;
va_start(ap, fmt);
- _warn_helper(_EVENT_LOG_WARN, strerror(errno), fmt, ap);
+ warn_helper_(EVENT_LOG_WARN, strerror(errno), fmt, ap);
va_end(ap);
}
int err = evutil_socket_geterror(sock);
va_start(ap, fmt);
- _warn_helper(_EVENT_LOG_ERR, evutil_socket_error_to_string(err), fmt, ap);
+ warn_helper_(EVENT_LOG_ERR, evutil_socket_error_to_string(err), fmt, ap);
va_end(ap);
event_exit(eval);
}
int err = evutil_socket_geterror(sock);
va_start(ap, fmt);
- _warn_helper(_EVENT_LOG_WARN, evutil_socket_error_to_string(err), fmt, ap);
+ warn_helper_(EVENT_LOG_WARN, evutil_socket_error_to_string(err), fmt, ap);
va_end(ap);
}
va_list ap;
va_start(ap, fmt);
- _warn_helper(_EVENT_LOG_ERR, NULL, fmt, ap);
+ warn_helper_(EVENT_LOG_ERR, NULL, fmt, ap);
va_end(ap);
event_exit(eval);
}
va_list ap;
va_start(ap, fmt);
- _warn_helper(_EVENT_LOG_WARN, NULL, fmt, ap);
+ warn_helper_(EVENT_LOG_WARN, NULL, fmt, ap);
va_end(ap);
}
va_list ap;
va_start(ap, fmt);
- _warn_helper(_EVENT_LOG_MSG, NULL, fmt, ap);
+ warn_helper_(EVENT_LOG_MSG, NULL, fmt, ap);
va_end(ap);
}
void
-_event_debugx(const char *fmt, ...)
+event_debugx_(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
- _warn_helper(_EVENT_LOG_DEBUG, NULL, fmt, ap);
+ warn_helper_(EVENT_LOG_DEBUG, NULL, fmt, ap);
va_end(ap);
}
static void
-_warn_helper(int severity, const char *errstr, const char *fmt, va_list ap)
+warn_helper_(int severity, const char *errstr, const char *fmt, va_list ap)
{
char buf[1024];
size_t len;
else {
const char *severity_str;
switch (severity) {
- case _EVENT_LOG_DEBUG:
+ case EVENT_LOG_DEBUG:
severity_str = "debug";
break;
- case _EVENT_LOG_MSG:
+ case EVENT_LOG_MSG:
severity_str = "msg";
break;
- case _EVENT_LOG_WARN:
+ case EVENT_LOG_WARN:
severity_str = "warn";
break;
- case _EVENT_LOG_ERR:
+ case EVENT_LOG_ERR:
severity_str = "err";
break;
default:
/* Helper: set the signal handler for evsignal to handler in base, so that
* we can restore the original handler when we clear the current one. */
int
-_evsig_set_handler(struct event_base *base,
+evsig_set_handler_(struct event_base *base,
int evsignal, void (__cdecl *handler)(int))
{
#ifdef EVENT__HAVE_SIGACTION
EVSIGBASE_UNLOCK();
event_debug(("%s: %d: changing signal handler", __func__, (int)evsignal));
- if (_evsig_set_handler(base, (int)evsignal, evsig_handler) == -1) {
+ if (evsig_set_handler_(base, (int)evsignal, evsig_handler) == -1) {
goto err;
}
}
int
-_evsig_restore_handler(struct event_base *base, int evsignal)
+evsig_restore_handler_(struct event_base *base, int evsignal)
{
int ret = 0;
struct evsig_info *sig = &base->sig;
--base->sig.ev_n_signals_added;
EVSIGBASE_UNLOCK();
- return (_evsig_restore_handler(base, (int)evsignal));
+ return (evsig_restore_handler_(base, (int)evsignal));
}
static void __cdecl
for (i = 0; i < NSIG; ++i) {
if (i < base->sig.sh_old_max && base->sig.sh_old[i] != NULL)
- _evsig_restore_handler(base, i);
+ evsig_restore_handler_(base, i);
}
EVSIGBASE_LOCK();
if (base == evsig_base) {
#ifndef EVENT__HAVE_STRLCPY
#include <string.h>
-size_t _event_strlcpy(char *dst, const char *src, size_t siz);
-#define strlcpy _event_strlcpy
+size_t event_strlcpy_(char *dst, const char *src, size_t siz);
+#define strlcpy event_strlcpy_
#endif
#ifdef __cplusplus
* Returns strlen(src); if retval >= siz, truncation occurred.
*/
size_t
-_event_strlcpy(dst, src, siz)
+event_strlcpy_(dst, src, siz)
char *dst;
const char *src;
size_t siz;
struct evutil_addrinfo;
struct evutil_addrinfo *ai_find_by_family(struct evutil_addrinfo *ai, int f);
struct evutil_addrinfo *ai_find_by_protocol(struct evutil_addrinfo *ai, int p);
-int _test_ai_eq(const struct evutil_addrinfo *ai, const char *sockaddr_port,
+int test_ai_eq_(const struct evutil_addrinfo *ai, const char *sockaddr_port,
int socktype, int protocol, int line);
#define test_ai_eq(ai, str, s, p) do { \
- if (_test_ai_eq((ai), (str), (s), (p), __LINE__)<0) \
+ if (test_ai_eq_((ai), (str), (s), (p), __LINE__)<0) \
goto end; \
} while (0)
/* Validates that an evbuffer is good. Returns false if it isn't, true if it
* is*/
static int
-_evbuffer_validate(struct evbuffer *buf)
+evbuffer_validate_(struct evbuffer *buf)
{
struct evbuffer_chain *chain;
size_t sum = 0;
}
#define evbuffer_validate(buf) \
- TT_STMT_BEGIN if (!_evbuffer_validate(buf)) TT_DIE(("Buffer format invalid")); TT_STMT_END
+ TT_STMT_BEGIN if (!evbuffer_validate_(buf)) TT_DIE(("Buffer format invalid")); TT_STMT_END
static void
test_evbuffer(void *ptr)
data = malloc(1024*512);
tt_assert(data);
for (i = 0; i < datalen; ++i)
- data[i] = _evutil_weakrand();
+ data[i] = evutil_weakrand_();
} else {
data = strdup("here is a relatively small string.");
tt_assert(data);
static void http_request_empty_done(struct evhttp_request *, void *);
static void
-_http_connection_test(struct basic_test_data *data, int persistent)
+http_connection_test_(struct basic_test_data *data, int persistent)
{
ev_uint16_t port = 0;
struct evhttp_connection *evcon = NULL;
static void
http_connection_test(void *arg)
{
- _http_connection_test(arg, 0);
+ http_connection_test_(arg, 0);
}
static void
http_persist_connection_test(void *arg)
{
- _http_connection_test(arg, 1);
+ http_connection_test_(arg, 1);
}
static struct regress_dns_server_table search_table[] = {
static void
-_http_close_detection(struct basic_test_data *data, int with_delay)
+http_close_detection_(struct basic_test_data *data, int with_delay)
{
ev_uint16_t port = 0;
struct evhttp_connection *evcon = NULL;
static void
http_close_detection_test(void *arg)
{
- _http_close_detection(arg, 0);
+ http_close_detection_(arg, 0);
}
static void
http_close_detection_delay_test(void *arg)
{
- _http_close_detection(arg, 1);
+ http_close_detection_(arg, 1);
}
static void
}
static void
-_http_incomplete_test(struct basic_test_data *data, int use_timeout)
+http_incomplete_test_(struct basic_test_data *data, int use_timeout)
{
struct bufferevent *bev;
evutil_socket_t fd;
static void
http_incomplete_test(void *arg)
{
- _http_incomplete_test(arg, 0);
+ http_incomplete_test_(arg, 0);
}
static void
http_incomplete_timeout_test(void *arg)
{
- _http_incomplete_test(arg, 1);
+ http_incomplete_test_(arg, 1);
}
/*
* Makes a request and reads the response in chunks.
*/
static void
-_http_stream_in_test(struct basic_test_data *data, char const *url,
+http_stream_in_test_(struct basic_test_data *data, char const *url,
size_t expected_len, char const *expected)
{
struct evhttp_connection *evcon;
static void
http_stream_in_test(void *arg)
{
- _http_stream_in_test(arg, "/chunked", 13 + 18 + 8,
+ http_stream_in_test_(arg, "/chunked", 13 + 18 + 8,
"This is funnybut not hilarious.bwv 1052");
- _http_stream_in_test(arg, "/test", strlen(BASIC_REQUEST_BODY),
+ http_stream_in_test_(arg, "/test", strlen(BASIC_REQUEST_BODY),
BASIC_REQUEST_BODY);
}
/* we just pause the rpc and continue it in the next callback */
-struct _rpc_hook_ctx {
+struct rpc_hook_ctx_ {
void *vbase;
void *ctx;
};
static void
rpc_hook_pause_cb(evutil_socket_t fd, short what, void *arg)
{
- struct _rpc_hook_ctx *ctx = arg;
+ struct rpc_hook_ctx_ *ctx = arg;
++hook_pause_cb_called;
evrpc_resume_request(ctx->vbase, ctx->ctx, EVRPC_CONTINUE);
free(arg);
rpc_hook_pause(void *ctx, struct evhttp_request *req, struct evbuffer *evbuf,
void *arg)
{
- struct _rpc_hook_ctx *tmp = malloc(sizeof(*tmp));
+ struct rpc_hook_ctx_ *tmp = malloc(sizeof(*tmp));
struct timeval tv;
assert(tmp != NULL);
* module didn't enforce the requirement that a fatal callback
* actually exit. Now, it exits no matter what, so if we wan to
* reinstate these tests, we'll need to fork for each one. */
- check_error_logging(errx_fn, 2, _EVENT_LOG_ERR,
+ check_error_logging(errx_fn, 2, EVENT_LOG_ERR,
"Fatal error; too many kumquats (5)");
RESET();
#endif
event_warnx("Far too many %s (%d)", "wombats", 99);
- LOGEQ(_EVENT_LOG_WARN, "Far too many wombats (99)");
+ LOGEQ(EVENT_LOG_WARN, "Far too many wombats (99)");
RESET();
event_msgx("Connecting lime to coconut");
- LOGEQ(_EVENT_LOG_MSG, "Connecting lime to coconut");
+ LOGEQ(EVENT_LOG_MSG, "Connecting lime to coconut");
RESET();
event_debug(("A millisecond passed! We should log that!"));
#ifdef USE_DEBUG
- LOGEQ(_EVENT_LOG_DEBUG, "A millisecond passed! We should log that!");
+ LOGEQ(EVENT_LOG_DEBUG, "A millisecond passed! We should log that!");
#else
tt_int_op(logsev,==,0);
tt_ptr_op(logmsg,==,NULL);
event_warn("Couldn't open %s", "/bad/file");
evutil_snprintf(buf, sizeof(buf),
"Couldn't open /bad/file: %s",strerror(ENOENT));
- LOGEQ(_EVENT_LOG_WARN,buf);
+ LOGEQ(EVENT_LOG_WARN,buf);
RESET();
#ifdef CAN_CHECK_ERR
evutil_snprintf(buf, sizeof(buf),
"Couldn't open /very/bad/file: %s",strerror(ENOENT));
- check_error_logging(err_fn, 5, _EVENT_LOG_ERR, buf);
+ check_error_logging(err_fn, 5, EVENT_LOG_ERR, buf);
RESET();
#endif
errno = EAGAIN;
#endif
event_sock_warn(fd, "Unhappy socket");
- LOGEQ(_EVENT_LOG_WARN, buf);
+ LOGEQ(EVENT_LOG_WARN, buf);
RESET();
#ifdef CAN_CHECK_ERR
- check_error_logging(sock_err_fn, 20, _EVENT_LOG_ERR, buf);
+ check_error_logging(sock_err_fn, 20, EVENT_LOG_ERR, buf);
RESET();
#endif
int
-_test_ai_eq(const struct evutil_addrinfo *ai, const char *sockaddr_port,
+test_ai_eq_(const struct evutil_addrinfo *ai, const char *sockaddr_port,
int socktype, int protocol, int line)
{
struct sockaddr_storage ss;
for (k=0;k<32;++k) {
/* Try a few different start and end points; try to catch
* the various misaligned cases of arc4random_buf */
- int startpoint = _evutil_weakrand() % 4;
- int endpoint = 32 - (_evutil_weakrand() % 4);
+ int startpoint = evutil_weakrand_() % 4;
+ int endpoint = 32 - (evutil_weakrand_() % 4);
memset(buf2, 0, sizeof(buf2));
#endif
/* A good no-op to use in macro definitions. */
-#define _EVUTIL_NIL_STMT ((void)0)
+#define EVUTIL_NIL_STMT_ ((void)0)
/* A no-op that tricks the compiler into thinking a condition is used while
* definitely not making any code for it. Used to compile out asserts while
* avoiding "unused variable" warnings. The "!" forces the compiler to
* do the sizeof() on an int, in case "condition" is a bitfield value.
*/
-#define _EVUTIL_NIL_CONDITION(condition) do { \
+#define EVUTIL_NIL_CONDITION_(condition) do { \
(void)sizeof(!(condition)); \
} while(0)
const char *evutil_getenv(const char *name);
-long _evutil_weakrand(void);
+long evutil_weakrand_(void);
/* Evaluates to the same boolean value as 'p', and hints to the compiler that
* we expect this value to be false. */
/* Replacement for assert() that calls event_errx on failure. */
#ifdef NDEBUG
-#define EVUTIL_ASSERT(cond) _EVUTIL_NIL_CONDITION(cond)
+#define EVUTIL_ASSERT(cond) EVUTIL_NIL_CONDITION_(cond)
#define EVUTIL_FAILURE_CHECK(cond) 0
#else
#define EVUTIL_ASSERT(cond) \
do { \
if (EVUTIL_UNLIKELY(!(cond))) { \
- event_errx(_EVENT_ERR_ABORT, \
+ event_errx(EVENT_ERR_ABORT_, \
"%s:%d: Assertion %s failed in %s", \
__FILE__,__LINE__,#cond,__func__); \
/* In case a user-supplied handler tries to */ \