}
struct ev_token_bucket_cfg *
-ev_token_bucket_cfg_new(ev_uint32_t read_rate, ev_uint32_t read_burst,
- ev_uint32_t write_rate, ev_uint32_t write_burst,
+ev_token_bucket_cfg_new(size_t read_rate, size_t read_burst,
+ size_t write_rate, size_t write_burst,
const struct timeval *tick_len)
{
struct ev_token_bucket_cfg *r;
if (read_rate > read_burst || write_rate > write_burst ||
read_rate < 1 || write_rate < 1)
return NULL;
+ if (read_rate > EV_RATE_LIMIT_MAX ||
+ write_rate > EV_RATE_LIMIT_MAX ||
+ read_burst > EV_RATE_LIMIT_MAX ||
+ write_burst > EV_RATE_LIMIT_MAX)
+ return NULL;
r = mm_calloc(1, sizeof(struct ev_token_bucket_cfg));
if (!r)
return NULL;
if (bev->rate_limiting->group) {
struct bufferevent_rate_limit_group *g =
bev->rate_limiting->group;
- ev_int32_t share;
+ ev_ssize_t share;
LOCK_GROUP(g);
if (GROUP_SUSPENDED(g)) {
/* We can get here if we failed to lock this
&g->rate_limit_cfg.tick_timeout, &cfg->tick_timeout, ==);
memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg));
- if (g->rate_limit.read_limit > (ev_int32_t)cfg->read_maximum)
+ if (g->rate_limit.read_limit > (ev_ssize_t)cfg->read_maximum)
g->rate_limit.read_limit = cfg->read_maximum;
- if (g->rate_limit.write_limit > (ev_int32_t)cfg->write_maximum)
+ if (g->rate_limit.write_limit > (ev_ssize_t)cfg->write_maximum)
g->rate_limit.write_limit = cfg->write_maximum;
if (!same_tick) {
struct bufferevent_rate_limit_group *g,
size_t share)
{
- if (share > EV_INT32_MAX)
+ if (share > EV_SSIZE_MAX)
return -1;
g->min_share = share;
bufferevent_decrement_read_limit(struct bufferevent *bev, ev_ssize_t decr)
{
int r = 0;
- ev_int32_t old_limit, new_limit;
+ ev_ssize_t old_limit, new_limit;
struct bufferevent_private *bevp;
BEV_LOCK(bev);
bevp = BEV_UPCAST(bev);
/* XXXX this is mostly copy-and-paste from
* bufferevent_decrement_read_limit */
int r = 0;
- ev_int32_t old_limit, new_limit;
+ ev_ssize_t old_limit, new_limit;
struct bufferevent_private *bevp;
BEV_LOCK(bev);
bevp = BEV_UPCAST(bev);
struct bufferevent_rate_limit_group *grp, ev_ssize_t decr)
{
int r = 0;
- ev_int32_t old_limit, new_limit;
+ ev_ssize_t old_limit, new_limit;
LOCK_GROUP(grp);
old_limit = grp->rate_limit.read_limit;
new_limit = (grp->rate_limit.read_limit -= decr);
struct bufferevent_rate_limit_group *grp, ev_ssize_t decr)
{
int r = 0;
- ev_int32_t old_limit, new_limit;
+ ev_ssize_t old_limit, new_limit;
LOCK_GROUP(grp);
old_limit = grp->rate_limit.write_limit;
new_limit = (grp->rate_limit.write_limit -= decr);
*/
struct bufferevent_rate_limit_group;
+/** Maximum configurable rate- or burst-limit. */
+#define EV_RATE_LIMIT_MAX EV_SSIZE_MAX
+
/**
Initialize and return a new object to configure the rate-limiting behavior
of bufferevents.
of Libevent may implement them more tightly.
*/
struct ev_token_bucket_cfg *ev_token_bucket_cfg_new(
- ev_uint32_t read_rate, ev_uint32_t read_burst,
- ev_uint32_t write_rate, ev_uint32_t write_burst,
+ size_t read_rate, size_t read_burst,
+ size_t write_rate, size_t write_burst,
const struct timeval *tick_len);
/** Free all storage held in 'cfg'.
struct ev_token_bucket {
/** How many bytes are we willing to read or write right now? These
* values are signed so that we can do "defecit spending" */
- ev_int32_t read_limit, write_limit;
+ ev_ssize_t read_limit, write_limit;
/** When was this bucket last updated? Measured in abstract 'ticks'
* relative to the token bucket configuration. */
ev_uint32_t last_updated;
/** Configuration info for a token bucket or set of token buckets. */
struct ev_token_bucket_cfg {
/** How many bytes are we willing to read on average per tick? */
- ev_uint32_t read_rate;
+ size_t read_rate;
/** How many bytes are we willing to read at most in any one tick? */
- ev_uint32_t read_maximum;
+ size_t read_maximum;
/** How many bytes are we willing to write on average per tick? */
- ev_uint32_t write_rate;
+ size_t write_rate;
/** How many bytes are we willing to write at most in any one tick? */
- ev_uint32_t write_maximum;
+ size_t write_maximum;
/* How long is a tick? Note that fractions of a millisecond are
* ignored. */