#define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
-
#define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
#define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
return result;
}
-unsigned char *
-evbuffer_reserve_space(struct evbuffer *buf, size_t size)
+int
+evbuffer_reserve_space(struct evbuffer *buf, ssize_t size,
+ struct evbuffer_iovec *vec, int n_vecs)
{
struct evbuffer_chain *chain;
- unsigned char *result = NULL;
-
- EVBUFFER_LOCK(buf, EVTHREAD_WRITE);
+ int n = -1;
+ EVBUFFER_LOCK(buf, EVTHREAD_WRITE);
if (buf->freeze_end)
goto done;
+ if (n_vecs < 1)
+ goto done;
+ if (n_vecs == 1) {
+ if (evbuffer_expand(buf, size) == -1)
+ goto done;
+ chain = buf->last;
- if (evbuffer_expand(buf, size) == -1)
- goto done;
-
- chain = buf->last;
-
- result = (chain->buffer + chain->misalign + chain->off);
+ vec[0].iov_base = CHAIN_SPACE_PTR(chain);
+ vec[0].iov_len = CHAIN_SPACE_LEN(chain);
+ n = 1;
+ } else {
+ if (_evbuffer_expand_fast(buf, size)<0)
+ goto done;
+ n = _evbuffer_read_setup_vecs(buf, size, vec, &chain, 0);
+ }
done:
- EVBUFFER_UNLOCK(buf, EVTHREAD_WRITE);
+ EVBUFFER_UNLOCK(buf, EVTHREAD_WRITE);
+ return n;
- return result;
}
int
-evbuffer_commit_space(struct evbuffer *buf, size_t size)
+evbuffer_commit_space(struct evbuffer *buf,
+ struct evbuffer_iovec *vec, int n_vecs)
{
- struct evbuffer_chain *chain;
- int result = -1;
-
- EVBUFFER_LOCK(buf, EVTHREAD_WRITE);
- if (buf->freeze_end) {
- goto done;
- }
+ struct evbuffer_chain *prev = buf->previous_to_last;
+ struct evbuffer_chain *last = buf->last;
+ int result = -1;
+ size_t added;
- chain = buf->last;
- if (chain == NULL ||
- chain->buffer_len - chain->off - chain->misalign < size)
+ EVBUFFER_LOCK(buf, EVTHREAD_WRITE);
+ if (buf->freeze_end)
goto done;
+ if (n_vecs < 1 || n_vecs > 2)
+ goto done;
+ if (n_vecs == 2) {
+ if (!prev || !last ||
+ vec[0].iov_base != CHAIN_SPACE_PTR(prev) ||
+ vec[1].iov_base != CHAIN_SPACE_PTR(last) ||
+ vec[0].iov_len > CHAIN_SPACE_LEN(prev) ||
+ vec[1].iov_len > CHAIN_SPACE_LEN(last))
+ goto done;
+
+ prev->off += vec[0].iov_len;
+ last->off += vec[1].iov_len;
+ added = vec[0].iov_len + vec[1].iov_len;
+ } else {
+ /* n_vecs == 1 */
+ struct evbuffer_chain *chain;
+ if (prev && vec[0].iov_base == CHAIN_SPACE_PTR(prev))
+ chain = prev;
+ else if (last && vec[0].iov_base == CHAIN_SPACE_PTR(last))
+ chain = last;
+ else
+ goto done;
+ if (vec[0].iov_len > CHAIN_SPACE_LEN(chain))
+ goto done;
- chain->off += size;
- buf->total_len += size;
- buf->n_add_for_cb += size;
+ chain->off += vec[0].iov_len;
+ added = vec[0].iov_len;
+ }
+ buf->total_len += added;
+ buf->n_add_for_cb += added;
result = 0;
evbuffer_invoke_callbacks(buf);
+
done:
- EVBUFFER_UNLOCK(buf, EVTHREAD_WRITE);
+ EVBUFFER_UNLOCK(buf, EVTHREAD_WRITE);
return result;
}
#define IOV_PTR_FIELD buf
#define IOV_LEN_FIELD len
#endif
+
+#define IOV_TYPE_FROM_EVBUFFER_IOV(i,ei) do { \
+ (i)->IOV_PTR_FIELD = (ei)->iov_base; \
+ (i)->IOV_LEN_FIELD = (ei)->iov_len; \
+ } while(0)
+
#endif
#define EVBUFFER_MAX_READ 4096
@param vecs An array of two iovecs or WSABUFs.
@param chainp A pointer to a variable to hold the first chain we're
reading into.
+ @param exact DOCDOC
@return The number of buffers we're using.
*/
int
_evbuffer_read_setup_vecs(struct evbuffer *buf, ssize_t howmuch,
- IOV_TYPE *vecs, struct evbuffer_chain **chainp)
+ struct evbuffer_iovec *vecs, struct evbuffer_chain **chainp, int exact)
{
struct evbuffer_chain *chain;
int nvecs;
use the space in the next-to-last chain.
*/
struct evbuffer_chain *prev = buf->previous_to_last;
- vecs[0].IOV_PTR_FIELD = CHAIN_SPACE_PTR(prev);
- vecs[0].IOV_LEN_FIELD = CHAIN_SPACE_LEN(prev);
- vecs[1].IOV_PTR_FIELD = CHAIN_SPACE_PTR(chain);
- vecs[1].IOV_LEN_FIELD = CHAIN_SPACE_LEN(chain);
- if (vecs[0].IOV_LEN_FIELD >= (size_t)howmuch) {
+ vecs[0].iov_base = CHAIN_SPACE_PTR(prev);
+ vecs[0].iov_len = CHAIN_SPACE_LEN(prev);
+ vecs[1].iov_base = CHAIN_SPACE_PTR(chain);
+ vecs[1].iov_len = CHAIN_SPACE_LEN(chain);
+ if (vecs[0].iov_len >= (size_t)howmuch) {
/* The next-to-last chain has enough
* space on its own. */
chain = prev;
/* We'll need both chains. */
chain = prev;
nvecs = 2;
- if (vecs[0].IOV_LEN_FIELD + vecs[1].IOV_LEN_FIELD > (size_t)howmuch) {
- vecs[1].IOV_LEN_FIELD = howmuch - vecs[0].IOV_LEN_FIELD;
+ if (exact &&
+ (vecs[0].iov_len + vecs[1].iov_len > (size_t)howmuch)) {
+ vecs[1].iov_len = howmuch - vecs[0].iov_len;
}
}
} else {
/* There's data in the last chain, so we're
* not allowed to use the next-to-last. */
nvecs = 1;
- vecs[0].IOV_PTR_FIELD = CHAIN_SPACE_PTR(chain);
- vecs[0].IOV_LEN_FIELD = CHAIN_SPACE_LEN(chain);
- if (vecs[0].IOV_LEN_FIELD > (size_t)howmuch)
- vecs[0].IOV_LEN_FIELD = howmuch;
+ vecs[0].iov_base = CHAIN_SPACE_PTR(chain);
+ vecs[0].iov_len = CHAIN_SPACE_LEN(chain);
+ if (exact && (vecs[0].iov_len > (size_t)howmuch))
+ vecs[0].iov_len = howmuch;
}
*chainp = chain;
goto done;
} else {
IOV_TYPE vecs[2];
- nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs,
- &chain);
+ struct evbuffer_iovec ev_vecs[2];
+ nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs,
+ &chain, 1);
+
+ if (nvecs == 2) {
+ IOV_TYPE_FROM_EVBUFFER_IOV(&vecs[1], &ev_vecs[1]);
+ IOV_TYPE_FROM_EVBUFFER_IOV(&vecs[0], &ev_vecs[0]);
+ } else if (nvecs == 1) {
+ IOV_TYPE_FROM_EVBUFFER_IOV(&vecs[0], &ev_vecs[0]);
+ }
#ifdef WIN32
{
return pos;
}
+int
+evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
+ struct evbuffer_ptr *start_at,
+ struct evbuffer_iovec *vec, int n_vec)
+{
+ struct evbuffer_chain *chain;
+ int idx = 0;
+ size_t len_so_far = 0;
+
+ EVBUFFER_LOCK(buffer, EVTHREAD_READ);
+
+ if (start_at) {
+ chain = start_at->_internal.chain;
+ len_so_far = chain->off
+ - start_at->_internal.pos_in_chain;
+ idx = 1;
+ if (n_vec > 0) {
+ vec[0].iov_base = chain->buffer + chain->misalign
+ + start_at->_internal.pos_in_chain;
+ vec[0].iov_len = len_so_far;
+ }
+ chain = chain->next;
+ } else {
+ chain = buffer->first;
+ }
+
+ while (chain) {
+ if (len >= 0 && len_so_far >= len)
+ break;
+ if (idx<n_vec) {
+ vec[idx].iov_base = chain->buffer + chain->misalign;
+ vec[idx].iov_len = chain->off;
+ } else if (len<0)
+ break;
+ ++idx;
+ len_so_far += chain->off;
+ chain = chain->next;
+ }
+
+ EVBUFFER_UNLOCK(buffer, EVTHREAD_READ);
+
+ return idx;
+}
+
int
evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
struct evbuffer *evbuf = &buf->buffer;
struct evbuffer_chain *chain = buf_o->first_pinned;
+ struct evbuffer_iovec iov[2];
+ int n_vec;
EVBUFFER_LOCK(evbuf, EVTHREAD_WRITE);
buf->read_in_progress = 0;
evbuffer_unfreeze(evbuf, 0);
- if (chain == evbuf->previous_to_last) {
- ssize_t n = chain->buffer_len - (chain->misalign + chain->off);
- if (n>nBytes)
- n=nBytes;
- chain->off += n;
- nBytes -= n;
- evbuf->n_add_for_cb += n;
-
- evbuffer_commit_space(evbuf, nBytes);
- } else if (chain == evbuf->last) {
- evbuffer_commit_space(evbuf, nBytes);
+ iov[0].iov_base = buf_o->buffers[0].buf;
+ if (nBytes <= buf_o->buffers[0].len) {
+ iov[0].iov_len = nBytes;
+ n_vec = 1;
} else {
- assert(0);
+ iov[0].iov_len = buf_o->buffers[0].len;
+ iov[1].iov_base = buf_o->buffers[1].buf;
+ iov[1].iov_len = nBytes - iov[0].iov_len;
+ n_vec = 2;
}
+ if (evbuffer_commit_space(evbuf, iov, n_vec) < 0)
+ assert(0); /* XXXX fail nicer. */
+
pin_release(eo, EVBUFFER_MEM_PINNED_R);
_evbuffer_decref_and_unlock(evbuf);
return r;
}
+#define IOV_TYPE_FROM_EVBUFFER_IOV(i,ei) do { \
+ (i)->buf = (ei)->iov_base; \
+ (i)->len = (ei)->iov_len; \
+ } while(0)
+
int
evbuffer_launch_read(struct evbuffer *buf, size_t at_most)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
- int r = -1;
+ int r = -1, i;
int nvecs;
int npin=0;
struct evbuffer_chain *chain=NULL;
DWORD bytesRead;
DWORD flags = 0;
+ struct evbuffer_iovec vecs[MAX_VECS];
if (!buf_o)
return -1;
buf_o->read_info.event_overlapped.cb = read_completed;
nvecs = _evbuffer_read_setup_vecs(buf, at_most,
- buf_o->read_info.buffers, &chain);
+ vecs, &chain, 1);
+ for (i=0;i<nvecs;++i) {
+ IOV_TYPE_FROM_EVBUFFER_IOV(
+ &buf_o->read_info.buffers[i],
+ &vecs[i]);
+ }
+
buf_o->read_info.n_buffers = nvecs;
buf_o->read_info.first_pinned = chain;
npin=0;
* is contiguous. Instead, it may be split across two chunks. */
int _evbuffer_expand_fast(struct evbuffer *, size_t);
-#ifdef _EVENT_HAVE_SYS_UIO_H
/** Helper: prepares for a readv/WSARecv call by expanding the buffer to
* hold enough memory to read 'howmuch' bytes in possibly noncontiguous memory.
* Sets up the one or two iovecs in 'vecs' to point to the free memory and its
* Returns the number of vecs used.
*/
int _evbuffer_read_setup_vecs(struct evbuffer *buf, ssize_t howmuch,
- struct iovec *vecs, struct evbuffer_chain **chainp);
-#elif defined(WIN32)
-int _evbuffer_read_setup_vecs(struct evbuffer *buf, ssize_t howmuch,
- WSABUF *vecs, struct evbuffer_chain **chainp);
-#endif
+ struct evbuffer_iovec *vecs, struct evbuffer_chain **chainp, int exact);
#ifdef __cplusplus
}
} _internal;
};
+/** Describes a single extent of memory inside an evbuffer. Used for
+ direct-access functions.
+
+ @see evbuffer_reserve_space, evbuffer_commit_space, evbuffer_peek
+ */
+struct evbuffer_iovec {
+ /** The start of the extent of memory. */
+ void *iov_base;
+ /** The length of the extent of memory. */
+ size_t iov_len;
+};
+
/**
Allocate storage for a new evbuffer.
available for reading until it has been committed with
evbuffer_commit_space().
+ The space is made available as one or more extents, represented by
+ an initial pointer and a length. You can force the memory to be
+ available as only one extent. Allowing more, however, makes the
+ function more efficient.
+
Multiple subsequent calls to this function will make the same space
available until evbuffer_commit_space() has been called.
+ It is an error to do anything that moves around the buffer's internal
+ memory structures before committing the space.
+
+ NOTE: The code currently does not ever use more than two extents.
+ This may change in future versions.
+
@param buf the event buffer in which to reserve space.
- @param size how much space to make available.
- @return the pointer to the available space or NULL on error.
+ @param size how much space to make available, at minimum. The
+ total length of the extents may be greater than the requested
+ length.
+ @param vec an array of one or more evbuffer_iovec structures to
+ hold pointers to the reserved extents of memory.
+ @param n_vec The length of the vec array. Must be at least 1.
+ @return the number of provided extents, or -1 on error.
@see evbuffer_commit_space
*/
-/* FIXME: This interface is prone to leaving gaps in the buffer and
- * reallocating stuff needlessly. Nothing uses it. It was new in Libevent 2.0.
- * It should get re-thought. */
-unsigned char *evbuffer_reserve_space(struct evbuffer *buf, size_t size);
+int
+evbuffer_reserve_space(struct evbuffer *buf, ssize_t size,
+ struct evbuffer_iovec *vec, int n_vecs);
/**
Commits previously reserved space.
Commits some of the space previously reserved with
evbuffer_reserve_space(). It then becomes available for reading.
+ This function may return an error if the pointer in the extents do
+ not match those returned from evbuffer_reserve_space, or if data
+ has been added to the buffer since the space was reserved.
+
+ If you want to commit less data than you got reserved space for,
+ modify the iov_len pointer of the buffer to a smaller value. Note
+ that you may have received more space than you requested if it was
+ available!
+
@param buf the event buffer in which to reserve space.
- @param size how much space to commit.
+ @param vec one or two extents returned by evbuffer_reserve_space.
+ @paramm n_vecs the number of extents.
@return 0 on success, -1 on error
@see evbuffer_reserve_space
*/
-
-int evbuffer_commit_space(struct evbuffer *buf, size_t size);
-
+int evbuffer_commit_space(struct evbuffer *buf,
+ struct evbuffer_iovec *vec, int n_vecs);
/**
Append data to the end of an evbuffer.
size_t n_deleted;
};
+/** Function to peek at data inside an evbuffer without removing it or
+ copying it out.
+
+ Pointers to the data are returned by filling the 'vec_out' array
+ with pointers to one or more extents of data inside the buffer.
+
+ The total data in the extents that you get back may be more than
+ you requested (if there is more data last extent than you asked
+ for), or less (if you do not provide enough evbuffer_iovecs, or if
+ the buffer does not have as much data as you asked to see).
+
+ @param buffer the evbuffer to peek into,
+ @param len the number of bytes to try to peek. If negative, we
+ will try to fill as much of vec_out as we can.
+ @param start_at an evbuffer_ptr indicating the point at which we
+ should start looking for data. NULL means, "At the start of the
+ buffer."
+ @param vec_out an array of evbuffer_iovec
+ @param n_vec the length of n_vec. If 0, we only count how many
+ extents would be necessary to point to the requested amount of
+ data.
+ @return The number of extents needed. This may be less than n_vec
+ if we didn't need all the evbuffer_iovecs we were given, or more
+ than n_vec if we would need more to return all the data that was
+ requested.
+ */
+int evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
+ struct evbuffer_ptr *start_at,
+ struct evbuffer_iovec *vec_out, int n_vec);
+
/** Type definition for a callback that is invoked whenever data is added or
removed from an evbuffer.
evbuffer_validate(evb);
- /* testing reserve and commit */
+ /* testing one-vector reserve and commit */
{
- u_char *buf;
- int i, j;
+ struct evbuffer_iovec v[1];
+ char *buf;
+ int i, j, r;
for (i = 0; i < 3; ++i) {
- buf = evbuffer_reserve_space(evb, 10000);
- tt_assert(buf != NULL);
+ r = evbuffer_reserve_space(evb, 10000, v, 1);
+ tt_int_op(r, ==, 1);
+ tt_assert(v[0].iov_len >= 10000);
+ tt_assert(v[0].iov_base != NULL);
+
evbuffer_validate(evb);
+ buf = v[0].iov_base;
for (j = 0; j < 10000; ++j) {
buf[j] = j;
}
evbuffer_validate(evb);
- tt_assert(evbuffer_commit_space(evb, 10000) == 0);
+ tt_int_op(evbuffer_commit_space(evb, v, 1), ==, 0);
evbuffer_validate(evb);
tt_assert(evbuffer_get_length(evb) >= 10000);
evbuffer_free(evb_two);
}
+static void
+test_evbuffer_reserve2(void *ptr)
+{
+ /* Test the two-vector cases of reserve/commit. */
+ struct evbuffer *buf = evbuffer_new();
+ int n, i;
+ struct evbuffer_iovec v[2];
+ size_t remaining;
+ char *cp, *cp2;
+
+ /* First chunk will necessarily be one chunk. Use 512 bytes of it.*/
+ n = evbuffer_reserve_space(buf, 1024, v, 2);
+ tt_int_op(n, ==, 1);
+ tt_int_op(evbuffer_get_length(buf), ==, 0);
+ tt_assert(v[0].iov_base != NULL);
+ tt_int_op(v[0].iov_len, >=, 1024);
+ memset(v[0].iov_base, 'X', 512);
+ cp = v[0].iov_base;
+ remaining = v[0].iov_len - 512;
+ v[0].iov_len = 512;
+ tt_int_op(0, ==, evbuffer_commit_space(buf, v, 1));
+ tt_int_op(evbuffer_get_length(buf), ==, 512);
+
+ /* Ask for another same-chunk request, in an existing chunk. Use 8
+ * bytes of it. */
+ n = evbuffer_reserve_space(buf, 32, v, 2);
+ tt_int_op(n, ==, 1);
+ tt_assert(cp + 512 == v[0].iov_base);
+ tt_int_op(remaining, ==, v[0].iov_len);
+ memset(v[0].iov_base, 'Y', 8);
+ v[0].iov_len = 8;
+ tt_int_op(0, ==, evbuffer_commit_space(buf, v, 1));
+ tt_int_op(evbuffer_get_length(buf), ==, 520);
+ remaining -= 8;
+
+ /* Now ask for a request that will be split. Use only one byte of it,
+ though. */
+ n = evbuffer_reserve_space(buf, remaining+64, v, 2);
+ tt_int_op(n, ==, 2);
+ tt_assert(cp + 520 == v[0].iov_base);
+ tt_int_op(remaining, ==, v[0].iov_len);
+ tt_assert(v[1].iov_base);
+ tt_assert(v[1].iov_len >= 64);
+ cp2 = v[1].iov_base;
+ memset(v[0].iov_base, 'Z', 1);
+ v[0].iov_len = 1;
+ tt_int_op(0, ==, evbuffer_commit_space(buf, v, 1));
+ tt_int_op(evbuffer_get_length(buf), ==, 521);
+ remaining -= 1;
+
+ /* Now ask for a request that will be split. Use some of the first
+ * part and some of the second. */
+ n = evbuffer_reserve_space(buf, remaining+64, v, 2);
+ tt_int_op(n, ==, 2);
+ tt_assert(cp + 521 == v[0].iov_base);
+ tt_int_op(remaining, ==, v[0].iov_len);
+ tt_assert(v[1].iov_base == cp2);
+ tt_assert(v[1].iov_len >= 64);
+ memset(v[0].iov_base, 'W', 400);
+ v[0].iov_len = 400;
+ memset(v[1].iov_base, 'x', 60);
+ v[1].iov_len = 60;
+ tt_int_op(0, ==, evbuffer_commit_space(buf, v, 2));
+ tt_int_op(evbuffer_get_length(buf), ==, 981);
+
+
+ /* Now peek to make sure stuff got made how we like. */
+ memset(v,0,sizeof(v));
+ n = evbuffer_peek(buf, -1, NULL, v, 2);
+ tt_int_op(n, ==, 2);
+ tt_int_op(v[0].iov_len, ==, 921);
+ tt_int_op(v[1].iov_len, ==, 60);
+
+ cp = v[0].iov_base;
+ for (i=0; i<512; ++i)
+ tt_int_op(cp[i], ==, 'X');
+ for (i=512; i<520; ++i)
+ tt_int_op(cp[i], ==, 'Y');
+ for (i=520; i<521; ++i)
+ tt_int_op(cp[i], ==, 'Z');
+ for (i=521; i<921; ++i)
+ tt_int_op(cp[i], ==, 'W');
+
+ cp = v[1].iov_base;
+ for (i=0; i<60; ++i)
+ tt_int_op(cp[i], ==, 'x');
+
+end:
+ evbuffer_free(buf);
+}
+
static int reference_cb_called;
static void
reference_cb(const void *data, size_t len, void *extra)
{
struct evbuffer *src = evbuffer_new();
struct evbuffer *dst = evbuffer_new();
- unsigned char *tmp;
+ struct evbuffer_iovec v[1];
const char *data = "this is what we add as read-only memory.";
reference_cb_called = 0;
tt_assert(evbuffer_add_reference(src, data, strlen(data),
reference_cb, (void *)0xdeadaffe) != -1);
- tmp = evbuffer_reserve_space(dst, strlen(data));
- tt_assert(evbuffer_remove(src, tmp, 10) != -1);
+ evbuffer_reserve_space(dst, strlen(data), v, 1);
+ tt_assert(evbuffer_remove(src, v[0].iov_base, 10) != -1);
evbuffer_validate(src);
evbuffer_validate(dst);
evbuffer_validate(src);
evbuffer_drain(src, 5);
- tt_assert(evbuffer_remove(src, tmp + 10, strlen(data) - 10) != -1);
+ tt_assert(evbuffer_remove(src, ((char*)(v[0].iov_base)) + 10,
+ strlen(data) - 10) != -1);
+
+ v[0].iov_len = strlen(data);
- evbuffer_commit_space(dst, strlen(data));
+ evbuffer_commit_space(dst, v, 1);
evbuffer_validate(src);
evbuffer_validate(dst);
{
struct evbuffer *buf = evbuffer_new();
struct evbuffer_ptr pos;
+ struct evbuffer_iovec v[1];
/* create some chains */
- evbuffer_reserve_space(buf, 5000);
- evbuffer_commit_space(buf, 5000);
- evbuffer_reserve_space(buf, 4000);
- evbuffer_commit_space(buf, 4000);
- evbuffer_reserve_space(buf, 3000);
- evbuffer_commit_space(buf, 3000);
+ evbuffer_reserve_space(buf, 5000, v, 1);
+ v[0].iov_len = 5000;
+ memset(v[0].iov_base, 1, v[0].iov_len);
+ evbuffer_commit_space(buf, v, 1);
+
+ evbuffer_reserve_space(buf, 4000, v, 1);
+ v[0].iov_len = 4000;
+ memset(v[0].iov_base, 2, v[0].iov_len);
+ evbuffer_commit_space(buf, v, 1);
+
+ evbuffer_reserve_space(buf, 3000, v, 1);
+ v[0].iov_len = 3000;
+ memset(v[0].iov_base, 3, v[0].iov_len);
+ evbuffer_commit_space(buf, v, 1);
+
+ tt_int_op(evbuffer_get_length(buf), ==, 12000);
tt_assert(evbuffer_ptr_set(buf, &pos, 13000, EVBUFFER_PTR_SET) == -1);
tt_assert(pos.pos == -1);
}
+static void
+test_evbuffer_peek(void *info)
+{
+ struct evbuffer *buf = NULL, *tmp_buf = NULL;
+ int i;
+ struct evbuffer_iovec v[20];
+ struct evbuffer_ptr ptr;
+
+#define tt_iov_eq(v, s) \
+ tt_int_op((v)->iov_len, ==, strlen(s)); \
+ tt_assert(!memcmp((v)->iov_base, (s), strlen(s)))
+
+ /* Let's make a very fragmented buffer. */
+ buf = evbuffer_new();
+ tmp_buf = evbuffer_new();
+ for (i = 0; i < 16; ++i) {
+ evbuffer_add_printf(tmp_buf, "Contents of chunk [%d]\n", i);
+ evbuffer_add_buffer(buf, tmp_buf);
+ }
+
+ /* Simple peek: get everything. */
+ i = evbuffer_peek(buf, -1, NULL, v, 20);
+ tt_int_op(i, ==, 16); /* we used only 16 chunks. */
+ tt_iov_eq(&v[0], "Contents of chunk [0]\n");
+ tt_iov_eq(&v[3], "Contents of chunk [3]\n");
+ tt_iov_eq(&v[12], "Contents of chunk [12]\n");
+ tt_iov_eq(&v[15], "Contents of chunk [15]\n");
+
+ /* Just get one chunk worth. */
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(buf, -1, NULL, v, 1);
+ tt_int_op(i, ==, 1);
+ tt_iov_eq(&v[0], "Contents of chunk [0]\n");
+ tt_assert(v[1].iov_base == NULL);
+
+ /* Suppose we want at least the first 40 bytes. */
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(buf, 40, NULL, v, 16);
+ tt_int_op(i, ==, 2);
+ tt_iov_eq(&v[0], "Contents of chunk [0]\n");
+ tt_iov_eq(&v[1], "Contents of chunk [1]\n");
+ tt_assert(v[2].iov_base == NULL);
+
+ /* How many chunks do we need for 100 bytes? */
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(buf, 100, NULL, NULL, 0);
+ tt_int_op(i, ==, 5);
+ tt_assert(v[0].iov_base == NULL);
+
+ /* Now we ask for more bytes than we provide chunks for */
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(buf, 60, NULL, v, 1);
+ tt_int_op(i, ==, 3);
+ tt_iov_eq(&v[0], "Contents of chunk [0]\n");
+ tt_assert(v[1].iov_base == NULL);
+
+ /* Now we ask for more bytes than the buffer has. */
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(buf, 65536, NULL, v, 20);
+ tt_int_op(i, ==, 16); /* we used only 16 chunks. */
+ tt_iov_eq(&v[0], "Contents of chunk [0]\n");
+ tt_iov_eq(&v[3], "Contents of chunk [3]\n");
+ tt_iov_eq(&v[12], "Contents of chunk [12]\n");
+ tt_iov_eq(&v[15], "Contents of chunk [15]\n");
+ tt_assert(v[16].iov_base == NULL);
+
+ /* What happens if we try an empty buffer? */
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(tmp_buf, -1, NULL, v, 20);
+ tt_int_op(i, ==, 0);
+ tt_assert(v[0].iov_base == NULL);
+ memset(v, 0, sizeof(v));
+ i = evbuffer_peek(tmp_buf, 50, NULL, v, 20);
+ tt_int_op(i, ==, 0);
+ tt_assert(v[0].iov_base == NULL);
+
+ /* Okay, now time to have fun with pointers. */
+ memset(v, 0, sizeof(v));
+ evbuffer_ptr_set(buf, &ptr, 30, EVBUFFER_PTR_SET);
+ i = evbuffer_peek(buf, 50, &ptr, v, 20);
+ tt_int_op(i, ==, 3);
+ tt_iov_eq(&v[0], " of chunk [1]\n");
+ tt_iov_eq(&v[1], "Contents of chunk [2]\n");
+ tt_iov_eq(&v[2], "Contents of chunk [3]\n"); /*more than we asked for*/
+
+ /* advance to the start of another chain. */
+ memset(v, 0, sizeof(v));
+ evbuffer_ptr_set(buf, &ptr, 14, EVBUFFER_PTR_ADD);
+ i = evbuffer_peek(buf, 44, &ptr, v, 20);
+ tt_int_op(i, ==, 2);
+ tt_iov_eq(&v[0], "Contents of chunk [2]\n");
+ tt_iov_eq(&v[1], "Contents of chunk [3]\n"); /*more than we asked for*/
+
+end:
+ if (buf)
+ evbuffer_free(buf);
+ if (tmp_buf)
+ evbuffer_free(tmp_buf);
+}
+
/* Check whether evbuffer freezing works right. This is called twice,
once with the argument "start" and once with the argument "end".
When we test "start", we freeze the start of an evbuffer and make sure
char charbuf[128];
int r;
size_t orig_length;
+ struct evbuffer_iovec v[1];
if (!start)
tt_str_op(ptr, ==, "end");
/* These functions all manipulate the end of buf. */
r = evbuffer_add(buf, "abc", 0);
FREEZE_EQ(r, 0, -1);
- cp = (char*)evbuffer_reserve_space(buf, 10);
- FREEZE_EQ(cp==NULL, 0, 1);
- if (cp)
- memset(cp, 'X', 10);
- r = evbuffer_commit_space(buf, 10);
+ r = evbuffer_reserve_space(buf, 10, v, 1);
+ FREEZE_EQ(r, 1, -1);
+ if (r == 0) {
+ memset(v[0].iov_base, 'X', 10);
+ v[0].iov_len = 10;
+ }
+ r = evbuffer_commit_space(buf, v, 1);
FREEZE_EQ(r, 0, -1);
r = evbuffer_add_reference(buf, string, 5, NULL, NULL);
FREEZE_EQ(r, 0, -1);
struct testcase_t evbuffer_testcases[] = {
{ "evbuffer", test_evbuffer, 0, NULL, NULL },
+ { "reserve2", test_evbuffer_reserve2, 0, NULL, NULL },
{ "reference", test_evbuffer_reference, 0, NULL, NULL },
{ "iterative", test_evbuffer_iterative, 0, NULL, NULL },
{ "readln", test_evbuffer_readln, 0, NULL, NULL },
{ "callbacks", test_evbuffer_callbacks, 0, NULL, NULL },
{ "add_reference", test_evbuffer_add_reference, 0, NULL, NULL },
{ "prepend", test_evbuffer_prepend, 0, NULL, NULL },
+ { "peek", test_evbuffer_peek, 0, NULL, NULL },
{ "freeze_start", test_evbuffer_freeze, 0, &nil_setup, (void*)"start" },
{ "freeze_end", test_evbuffer_freeze, 0, &nil_setup, (void*)"end" },
#ifndef WIN32
zlib_input_filter(struct evbuffer *src, struct evbuffer *dst,
ssize_t lim, enum bufferevent_flush_mode state, void *ctx)
{
+ struct evbuffer_iovec v_in[1];
+ struct evbuffer_iovec v_out[1];
int nread, nwrite;
- int res;
+ int res, n;
z_streamp p = ctx;
do {
/* let's do some decompression */
- p->avail_in = evbuffer_get_contiguous_space(src);
- p->next_in = evbuffer_pullup(src, p->avail_in);
+ n = evbuffer_peek(src, -1, NULL, v_in, 1);
+ if (n) {
+ p->avail_in = v_in[0].iov_len;
+ p->next_in = v_in[0].iov_base;
+ } else {
+ p->avail_in = 0;
+ p->next_in = 0;
+ }
- p->next_out = evbuffer_reserve_space(dst, 4096);
- p->avail_out = 4096;
+ evbuffer_reserve_space(dst, 4096, v_out, 1);
+ p->next_out = v_out[0].iov_base;
+ p->avail_out = v_out[0].iov_len;
/* we need to flush zlib if we got a flush */
res = inflate(p, getstate(state));
/* let's figure out how much was compressed */
- nread = evbuffer_get_contiguous_space(src) - p->avail_in;
- nwrite = 4096 - p->avail_out;
+ nread = v_in[0].iov_len - p->avail_in;
+ nwrite = v_out[0].iov_len - p->avail_out;
evbuffer_drain(src, nread);
- evbuffer_commit_space(dst, nwrite);
+ v_out[0].iov_len = nwrite;
+ evbuffer_commit_space(dst, v_out, 1);
if (res==Z_BUF_ERROR) {
/* We're out of space, or out of decodeable input.
zlib_output_filter(struct evbuffer *src, struct evbuffer *dst,
ssize_t lim, enum bufferevent_flush_mode state, void *ctx)
{
+ struct evbuffer_iovec v_in[1];
+ struct evbuffer_iovec v_out[1];
int nread, nwrite;
- int res;
+ int res, n;
z_streamp p = ctx;
do {
/* let's do some compression */
- p->avail_in = evbuffer_get_contiguous_space(src);
- p->next_in = evbuffer_pullup(src, p->avail_in);
-
- p->next_out = evbuffer_reserve_space(dst, 4096);
- p->avail_out = 4096;
+ n = evbuffer_peek(src, -1, NULL, v_in, 1);
+ if (n) {
+ p->avail_in = v_in[0].iov_len;
+ p->next_in = v_in[0].iov_base;
+ } else {
+ p->avail_in = 0;
+ p->next_in = 0;
+ }
+ evbuffer_reserve_space(dst, 4096, v_out, 1);
+ p->next_out = v_out[0].iov_base;
+ p->avail_out = v_out[0].iov_len;
/* we need to flush zlib if we got a flush */
res = deflate(p, getstate(state));
- /* let's figure out how much was compressed */
- nread = evbuffer_get_contiguous_space(src) - p->avail_in;
- nwrite = 4096 - p->avail_out;
+ /* let's figure out how much was decompressed */
+ nread = v_in[0].iov_len - p->avail_in;
+ nwrite = v_out[0].iov_len - p->avail_out;
evbuffer_drain(src, nread);
- evbuffer_commit_space(dst, nwrite);
+ v_out[0].iov_len = nwrite;
+ evbuffer_commit_space(dst, v_out, 1);
if (res==Z_BUF_ERROR) {
/* We're out of space, or out of decodeable input.