Read the ipset(8) and iptables(8), ip6tables(8) manpages on how to use
ipset and its match and target from iptables.
+
+Compatibilities:
+
+- The ipset 5.x userspace utility contains a backward compatibility
+ interface to support the syntax of ipset 4.x.
+- The ipset 5.x userspace utility can't talk to the kernel part of ipset 4.x.
+- The ipset 5.x kernel part can't talk to the userspace utility from
+ ipset 4.x.
+- The ipset 5.x kernel part can work together with the set match and SET
+ target from iptables 1.4.7 and below, however if you need the IPv6 support
+ from ipset 5.x, then you have to use iptables 1.4.8 or above.
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
};
-/* Flags for the set type variants */
-enum ip_set_type_flags {
- /* Set members created by kmalloc */
- IP_SET_FLAG_KMALLOC_BIT = 0,
- IP_SET_FLAG_KMALLOC = (1 << IP_SET_FLAG_KMALLOC_BIT),
-};
-
/* The core set type structure */
struct ip_set_type {
struct list_head list;
const struct ip_set_type_variant *variant;
/* The actual INET family */
u8 family;
- /* Set type flags, filled/modified by create/resize */
- u8 flags;
/* The type specific data */
void *data;
};
/* Allocate members */
static inline void *
-ip_set_alloc(size_t size, gfp_t gfp_mask, u8 *flags)
+ip_set_alloc(size_t size, gfp_t gfp_mask)
{
- void *members = kzalloc(size, gfp_mask | __GFP_NOWARN);
+ void *members = NULL;
+
+ if (size < KMALLOC_MAX_SIZE)
+ members = kzalloc(size, gfp_mask | __GFP_NOWARN);
if (members) {
- *flags |= IP_SET_FLAG_KMALLOC;
pr_debug("%p: allocated with kmalloc", members);
return members;
}
members = __vmalloc(size, gfp_mask | __GFP_ZERO, PAGE_KERNEL);
if (!members)
return NULL;
- *flags &= ~IP_SET_FLAG_KMALLOC;
pr_debug("%p: allocated with vmalloc", members);
return members;
}
static inline void
-ip_set_free(void *members, u8 flags)
+ip_set_free(void *members)
{
pr_debug("%p: free with %s", members,
- flags & IP_SET_FLAG_KMALLOC ? "kmalloc" : "vmalloc");
- if (flags & IP_SET_FLAG_KMALLOC)
- kfree(members);
- else
+ is_vmalloc_addr(members) ? "vfree" : "kfree");
+ if (is_vmalloc_addr(members))
vfree(members);
+ else
+ kfree(members);
}
static inline bool
}
static void
-chash_destroy(struct slist *t, u8 htable_bits, u8 flags)
+chash_destroy(struct slist *t, u8 htable_bits)
{
struct slist *n, *tmp;
u32 i;
/* FIXME: slab cache */
kfree(n);
- ip_set_free(t, flags);
+ ip_set_free(t);
}
static size_t
if (with_timeout(h->timeout))
del_timer_sync(&h->gc);
- chash_destroy(h->htable, h->htable_bits, set->flags);
+ chash_destroy(h->htable, h->htable_bits);
kfree(h);
set->data = NULL;
struct slist *t, *n;
const struct type_pf_elem *data;
u32 i, j;
- u8 oflags, flags;
int ret;
retry:
/* In case we have plenty of memory :-) */
return -IPSET_ERR_HASH_FULL;
t = ip_set_alloc(jhash_size(htable_bits) * sizeof(struct slist),
- gfp_flags, &flags);
+ gfp_flags);
if (!t)
return -ENOMEM;
write_lock_bh(&set->lock);
- flags = oflags = set->flags;
for (i = 0; i < jhash_size(h->htable_bits); i++) {
next_slot:
slist_for_each(n, &h->htable[i]) {
data, gfp_flags);
if (ret < 0) {
write_unlock_bh(&set->lock);
- chash_destroy(t, htable_bits, flags);
+ chash_destroy(t, htable_bits);
if (ret == -EAGAIN)
goto retry;
return ret;
h->htable = t;
h->htable_bits = htable_bits;
- set->flags = flags;
write_unlock_bh(&set->lock);
- chash_destroy(n, i, oflags);
+ chash_destroy(n, i);
return 0;
}
struct slist *t, *n;
const struct type_pf_elem *data;
u32 i, j;
- u8 oflags, flags;
int ret;
/* Try to cleanup once */
/* In case we have plenty of memory :-) */
return -IPSET_ERR_HASH_FULL;
t = ip_set_alloc(jhash_size(htable_bits) * sizeof(struct slist),
- gfp_flags, &flags);
+ gfp_flags);
if (!t)
return -ENOMEM;
write_lock_bh(&set->lock);
- flags = oflags = set->flags;
for (i = 0; i < jhash_size(h->htable_bits); i++) {
next_slot:
slist_for_each(n, &h->htable[i]) {
type_pf_data_timeout(data));
if (ret < 0) {
write_unlock_bh(&set->lock);
- chash_destroy(t, htable_bits, flags);
+ chash_destroy(t, htable_bits);
if (ret == -EAGAIN)
goto retry;
return ret;
h->htable = t;
h->htable_bits = htable_bits;
- set->flags = flags;
write_unlock_bh(&set->lock);
- chash_destroy(n, i, oflags);
+ chash_destroy(n, i);
return 0;
}
{
struct bitmap_ip *map = set->data;
- ip_set_free(map->members, set->flags);
+ ip_set_free(map->members);
kfree(map);
set->data = NULL;
struct bitmap_ip_timeout *map = set->data;
del_timer_sync(&map->gc);
- ip_set_free(map->members, set->flags);
+ ip_set_free(map->members);
kfree(map);
set->data = NULL;
u32 first_ip, u32 last_ip,
u32 elements, u32 hosts, u8 netmask)
{
- map->members = ip_set_alloc(map->memsize, GFP_KERNEL, &set->flags);
+ map->members = ip_set_alloc(map->memsize, GFP_KERNEL);
if (!map->members)
return false;
map->first_ip = first_ip;
if (with_timeout(map->timeout))
del_timer_sync(&map->gc);
- ip_set_free(map->members, set->flags);
+ ip_set_free(map->members);
kfree(map);
set->data = NULL;
u32 first_ip, u32 last_ip)
{
map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize,
- GFP_KERNEL, &set->flags);
+ GFP_KERNEL);
if (!map->members)
return false;
map->first_ip = first_ip;
{
struct bitmap_port *map = set->data;
- ip_set_free(map->members, set->flags);
+ ip_set_free(map->members);
kfree(map);
set->data = NULL;
struct bitmap_port_timeout *map = set->data;
del_timer_sync(&map->gc);
- ip_set_free(map->members, set->flags);
+ ip_set_free(map->members);
kfree(map);
set->data = NULL;
init_map_port(struct ip_set *set, struct bitmap_port *map,
u16 first_port, u16 last_port)
{
- map->members = ip_set_alloc(map->memsize, GFP_KERNEL, &set->flags);
+ map->members = ip_set_alloc(map->memsize, GFP_KERNEL);
if (!map->members)
return false;
map->first_port = first_port;
{
struct chash *x = a->data;
struct chash *y = b->data;
-
+
+ /* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem
&& x->timeout == y->timeout
&& x->netmask == y->netmask
- && x->htable_bits == y->htable_bits /* resizing ? */
&& x->array_size == y->array_size
&& x->chain_limit == y->chain_limit;
}
h->timeout = IPSET_NO_TIMEOUT;
h->htable = ip_set_alloc(jhash_size(h->htable_bits) * sizeof(struct slist),
- GFP_KERNEL, &set->flags);
+ GFP_KERNEL);
if (!h->htable) {
kfree(h);
return -ENOMEM;
struct chash *x = a->data;
struct chash *y = b->data;
+ /* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem
&& x->timeout == y->timeout
&& x->proto == y->proto
- && x->htable_bits == y->htable_bits /* resizing ? */
&& x->array_size == y->array_size
&& x->chain_limit == y->chain_limit;
}
h->timeout = IPSET_NO_TIMEOUT;
h->htable = ip_set_alloc(jhash_size(h->htable_bits) * sizeof(struct slist),
- GFP_KERNEL, &set->flags);
+ GFP_KERNEL);
if (!h->htable) {
kfree(h);
return -ENOMEM;
struct chash *x = a->data;
struct chash *y = b->data;
+ /* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem
&& x->timeout == y->timeout
&& x->proto == y->proto
- && x->htable_bits == y->htable_bits /* resizing ? */
&& x->array_size == y->array_size
&& x->chain_limit == y->chain_limit;
}
h->timeout = IPSET_NO_TIMEOUT;
h->htable = ip_set_alloc(jhash_size(h->htable_bits) * sizeof(struct slist),
- GFP_KERNEL, &set->flags);
+ GFP_KERNEL);
if (!h->htable) {
kfree(h);
return -ENOMEM;
struct chash *x = a->data;
struct chash *y = b->data;
+ /* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem
&& x->timeout == y->timeout
&& x->proto == y->proto
- && x->htable_bits == y->htable_bits /* resizing ? */
&& x->array_size == y->array_size
&& x->chain_limit == y->chain_limit;
}
h->timeout = IPSET_NO_TIMEOUT;
h->htable = ip_set_alloc(jhash_size(h->htable_bits) * sizeof(struct slist),
- GFP_KERNEL, &set->flags);
+ GFP_KERNEL);
if (!h->htable) {
kfree(h);
return -ENOMEM;
struct chash *x = a->data;
struct chash *y = b->data;
+ /* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem
&& x->timeout == y->timeout
- && x->htable_bits == y->htable_bits /* resizing ? */
&& x->array_size == y->array_size
&& x->chain_limit == y->chain_limit;
}
h->timeout = IPSET_NO_TIMEOUT;
h->htable = ip_set_alloc(jhash_size(h->htable_bits) * sizeof(struct slist),
- GFP_KERNEL, &set->flags);
+ GFP_KERNEL);
if (!h->htable) {
kfree(h);
return -ENOMEM;
#error "Linux kernel version too old: must be >= 2.6.31"
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#define CHECK_OK 1
+#define CHECK_FAIL 0
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */
+#define CHECK_OK 0
+#define CHECK_FAIL -EINVAL
+#endif
+
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
static bool
set_match_v0(const struct sk_buff *skb, const struct xt_match_param *par)
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find set indentified by id %u to match",
info->match_set.index);
- return 0; /* error */
+ return CHECK_FAIL; /* error */
}
if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
pr_warning("That's nasty!");
- return 0; /* error */
+ return CHECK_FAIL; /* error */
}
/* Fill out compatibility data */
compat_flags(&info->match_set);
- return 1;
+ return CHECK_OK;
}
static void
if (index == IPSET_INVALID_ID) {
pr_warning("cannot find add_set index %u as target",
info->add_set.index);
- return 0; /* error */
+ return CHECK_FAIL; /* error */
}
}
if (index == IPSET_INVALID_ID) {
pr_warning("cannot find del_set index %u as target",
info->del_set.index);
- return 0; /* error */
+ return CHECK_FAIL; /* error */
}
}
if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0
|| info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
pr_warning("That's nasty!");
- return 0; /* error */
+ return CHECK_FAIL; /* error */
}
/* Fill out compatibility data */
compat_flags(&info->add_set);
compat_flags(&info->del_set);
- return 1;
+ return CHECK_OK;
}
static void
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find set indentified by id %u to match",
info->match_set.index);
- return 0; /* error */
+ return CHECK_FAIL; /* error */
}
if (info->match_set.dim > IPSET_DIM_MAX) {
pr_warning("That's nasty!");
- return 0; /* error */
+ return CHECK_FAIL; /* error */
}
- return 1;
+ return CHECK_OK;
}
static void
if (index == IPSET_INVALID_ID) {
pr_warning("cannot find add_set index %u as target",
info->add_set.index);
- return 0; /* error */
+ return CHECK_FAIL; /* error */
}
}
if (index == IPSET_INVALID_ID) {
pr_warning("cannot find del_set index %u as target",
info->del_set.index);
- return 0; /* error */
+ return CHECK_FAIL; /* error */
}
}
if (info->add_set.dim > IPSET_DIM_MAX
|| info->del_set.flags > IPSET_DIM_MAX) {
pr_warning("That's nasty!");
- return 0; /* error */
+ return CHECK_FAIL; /* error */
}
- return 1;
+ return CHECK_OK;
}
static void