*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * published by the Free Software Foundation.
*/
/* The protocol version */
IPSET_MSG_MAX, /* Netlink message commands */
/* Commands in userspace: */
- IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 15: Enter restore mode */
+ IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 15: Enter restore mode */
IPSET_CMD_HELP, /* 16: Get help */
IPSET_CMD_VERSION, /* 17: Get program version */
IPSET_CMD_QUIT, /* 18: Quit from interactive mode */
IPSET_ATTR_ELEMENTS,
IPSET_ATTR_REFERENCES,
IPSET_ATTR_MEMSIZE,
-
+
__IPSET_ATTR_CREATE_MAX,
};
#define IPSET_ATTR_CREATE_MAX (__IPSET_ATTR_CREATE_MAX - 1)
ip_set_alloc(size_t size, gfp_t gfp_mask)
{
void *members = NULL;
-
+
if (size < KMALLOC_MAX_SIZE)
members = kzalloc(size, gfp_mask | __GFP_NOWARN);
-
+
if (members) {
pr_debug("%p: allocated with kmalloc", members);
return members;
}
-
+
members = __vmalloc(size, gfp_mask | __GFP_ZERO, PAGE_KERNEL);
if (!members)
return NULL;
pr_debug("%p: allocated with vmalloc", members);
-
+
return members;
}
ip_set_get_h32(const struct nlattr *attr)
{
u32 value = nla_get_u32(attr);
-
+
return attr->nla_type & NLA_F_NET_BYTEORDER ? ntohl(value) : value;
}
ip_set_get_h16(const struct nlattr *attr)
{
u16 value = nla_get_u16(attr);
-
+
return attr->nla_type & NLA_F_NET_BYTEORDER ? ntohs(value) : value;
}
ip_set_get_n32(const struct nlattr *attr)
{
u32 value = nla_get_u32(attr);
-
+
return attr->nla_type & NLA_F_NET_BYTEORDER ? value : htonl(value);
}
ip_set_get_n16(const struct nlattr *attr)
{
u16 value = nla_get_u16(attr);
-
+
return attr->nla_type & NLA_F_NET_BYTEORDER ? value : htons(value);
}
return -IPSET_ERR_PROTOCOL;
if (!tb[IPSET_ATTR_IPADDR_IPV4])
return -IPSET_ERR_IPADDR_IPV4;
-
+
*ipaddr = ip_set_get_n32(tb[IPSET_ATTR_IPADDR_IPV4]);
return 0;
}
}
#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
-#define ipset_nest_end(skb, start) nla_nest_end(skb, start)
+#define ipset_nest_end(skb, start) nla_nest_end(skb, start)
#define NLA_PUT_NET32(skb, type, value) \
NLA_PUT_BE32(skb, type | NLA_F_NET_BYTEORDER, value)
/* Interface to iptables/ip6tables */
-#define SO_IP_SET 83
+#define SO_IP_SET 83
union ip_set_name_index {
char name[IPSET_MAXNAMELEN];
range_to_mask(u32 from, u32 to, u8 *bits)
{
u32 mask = 0xFFFFFFFE;
-
+
*bits = 32;
while (--(*bits) > 0 && mask && (to & mask) != from)
mask <<= 1;
-
+
return mask;
}
#endif /* __KERNEL__ */
-
+
#endif /* __IP_SET_BITMAP_H */
* the timeout field must be the last one in the data structure - that field
* is ignored when computing the hash key.
*/
-
+
/* Number of elements to store in an array block */
#define CHASH_DEFAULT_ARRAY_SIZE 4
/* Number of arrays: max ARRAY_SIZE * CHAIN_LIMIT "long" chains */
if (h->nets[cidr-1].nets > 1)
return;
-
+
/* New cidr size */
for (i = 0; i < host_mask && h->nets[i].cidr; i++) {
/* Add in increasing prefix order, so larger cidr first */
{
struct slist *n, *tmp;
u32 i;
-
+
for (i = 0; i < jhash_size(ht->htable_bits); i++)
slist_for_each_safe(n, tmp, &ht->htable[i])
/* FIXME: use slab cache */
+ sizeof(struct chash_nets) * host_mask
#endif
+ jhash_size(ht->htable_bits) * sizeof(struct slist);
-
+
for (i = 0; i < jhash_size(ht->htable_bits); i++)
slist_for_each(n, &ht->htable[i])
memsize += sizeof(struct slist)
+ h->array_size * dsize;
-
+
return memsize;
}
struct htable *ht = h->table;
struct slist *n, *tmp;
u32 i;
-
+
for (i = 0; i < jhash_size(ht->htable_bits); i++) {
slist_for_each_safe(n, tmp, &ht->htable[i])
/* FIXME: slab cache */
chash_destroy(h->table);
kfree(h);
-
+
set->data = NULL;
}
/* Get the ith element from the array block n */
#define chash_data(n, i) \
-(struct type_pf_elem *)((char *)(n) + sizeof(struct slist) \
+(struct type_pf_elem *)((char *)(n) + sizeof(struct slist) \
+ (i)*sizeof(struct type_pf_elem))
/* Add an element to the hash table when resizing the set:
for (prev = n, tmp = n->next;
tmp->next != NULL;
prev = tmp, tmp = tmp->next)
- /* Find last array */;
+ /* Find last array */;
j = 0;
} else {
/* Already at last array */
if (type_pf_data_isnull(chash_data(tmp, j + 1)))
break;
- if (!(tmp == n && i == j)) {
+ if (!(tmp == n && i == j))
type_pf_data_swap(data, chash_data(tmp, j));
- }
+
#ifdef IP_SET_HASH_WITH_NETS
del_cidr(h, data->cidr, HOST_MASK);
#endif
h->table = ht;
read_unlock_bh(&set->lock);
-
+
/* Give time to other users of the set */
synchronize_net();
chash_destroy(orig);
-
+
return 0;
}
const struct chash *h = set->data;
struct nlattr *nested;
size_t memsize;
-
+
read_lock_bh(&set->lock);
memsize = chash_memsize(h, with_timeout(h->timeout)
? sizeof(struct type_pf_telem)
if (with_timeout(h->timeout))
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
ipset_nest_end(skb, nested);
-
+
return 0;
nla_put_failure:
return -EFAULT;
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
-
+
return 0;
nla_put_failure:
for (i = 0; i < h->array_size; i++) {
data = chash_tdata(n, i);
if (type_pf_data_isnull(data)) {
- tmp = n;
- goto found;
+ tmp = n;
+ goto found;
}
}
j++;
static void
type_pf_chash_del_telem(struct chash *h, struct slist *prev,
- struct slist *n, int i)
+ struct slist *n, int i)
{
struct type_pf_elem *d, *data = chash_tdata(n, i);
struct slist *tmp;
for (prev = n, tmp = n->next;
tmp->next != NULL;
prev = tmp, tmp = tmp->next)
- /* Find last array */;
+ /* Find last array */;
j = 0;
} else {
/* Already at last array */
struct type_pf_elem *data;
u32 i;
int j;
-
+
for (i = 0; i < jhash_size(ht->htable_bits); i++)
slist_for_each_prev(prev, n, &ht->htable[i])
for (j = 0; j < h->array_size; j++) {
synchronize_net();
chash_destroy(orig);
-
+
return 0;
}
static int
type_pf_chash_tadd(struct ip_set *set, void *value,
gfp_t gfp_flags, u32 timeout)
-{
+{
struct chash *h = set->data;
const struct type_pf_elem *d = value;
struct slist *n, *prev;
data = chash_tdata(n, i);
if (type_pf_data_isnull(data)
|| type_pf_data_expired(data)) {
- tmp = n;
- goto found;
+ tmp = n;
+ goto found;
}
if (type_pf_data_equal(data, d))
return -IPSET_ERR_EXIST;
}
found:
if (type_pf_data_isnull(data))
- h->elements++;
+ h->elements++;
#ifdef IP_SET_HASH_WITH_NETS
else
del_cidr(h, data->cidr, HOST_MASK);
return -IPSET_ERR_EXIST;
if (type_pf_data_equal(data, d)) {
if (type_pf_data_expired(data))
- ret = -IPSET_ERR_EXIST;
+ ret = -IPSET_ERR_EXIST;
type_pf_chash_del_telem(h, prev, n, i);
return ret;
}
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
-
+
return 0;
nla_put_failure:
type_pf_gc_init(struct ip_set *set)
{
struct chash *h = set->data;
-
+
init_timer(&h->gc);
h->gc.data = (unsigned long) set;
h->gc.function = type_pf_gc;
case IPPROTO_TCP: {
struct tcphdr _tcph;
const struct tcphdr *th;
-
+
th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
if (th == NULL)
/* No choice either */
return false;
-
- *port = src ? th->source : th->dest;
- break;
+
+ *port = src ? th->source : th->dest;
+ break;
}
case IPPROTO_UDP: {
struct udphdr _udph;
if (uh == NULL)
/* No choice either */
return false;
-
- *port = src ? uh->source : uh->dest;
- break;
+
+ *port = src ? uh->source : uh->dest;
+ break;
}
case IPPROTO_ICMP: {
struct icmphdr _icmph;
const struct icmphdr *ic;
-
+
ic = skb_header_pointer(skb, protooff, sizeof(_icmph), &_icmph);
if (ic == NULL)
return false;
-
+
*port = (ic->type << 8) & ic->code;
break;
}
case IPPROTO_ICMPV6: {
struct icmp6hdr _icmph;
const struct icmp6hdr *ic;
-
+
ic = skb_header_pointer(skb, protooff, sizeof(_icmph), &_icmph);
if (ic == NULL)
return false;
-
+
*port = (ic->icmp6_type << 8) & ic->icmp6_code;
break;
}
#define IPSET_DEFAULT_RESIZE 100
#endif /* __KERNEL__ */
-
+
#endif /* __IP_SET_HASH_H */
#define IP_SET_LIST_MIN_SIZE 4
#endif /* __KERNEL__ */
-
+
#endif /* __IP_SET_LIST_H */
/*
* Single linked lists with a single pointer.
- * Mostly useful for hash tables where the two pointer list head
+ * Mostly useful for hash tables where the two pointer list head
* and list node is too wasteful.
*/
#define SLIST(name) struct slist name = { .next = NULL }
#define INIT_SLIST(ptr) ((ptr)->next = NULL)
-#define slist_entry(ptr, type, member) container_of(ptr,type,member)
+#define slist_entry(ptr, type, member) container_of(ptr, type, member)
#define slist_for_each(pos, head) \
for (pos = (head)->next; pos && ({ prefetch(pos->next); 1; }); \
*/
#define slist_for_each_entry(tpos, pos, head, member) \
for (pos = (head)->next; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = slist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos && ({ prefetch(pos->next); 1; }) && \
+ ({ tpos = slist_entry(pos, typeof(*tpos), member); 1; });\
pos = pos->next)
/**
*/
#define slist_for_each_entry_continue(tpos, pos, member) \
for (pos = (pos)->next; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = slist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos && ({ prefetch(pos->next); 1; }) && \
+ ({ tpos = slist_entry(pos, typeof(*tpos), member); 1; });\
pos = pos->next)
/**
* @member: the name of the slist within the struct.
*/
#define slist_for_each_entry_from(tpos, pos, member) \
- for (; pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = slist_entry(pos, typeof(*tpos), member); 1;}); \
+ for (; pos && ({ prefetch(pos->next); 1; }) && \
+ ({ tpos = slist_entry(pos, typeof(*tpos), member); 1; });\
pos = pos->next)
/**
* @head: the head for your list.
* @member: the name of the slist within the struct.
*/
-#define slist_for_each_entry_safe(tpos, pos, n, head, member) \
- for (pos = (head)->next; \
- pos && ({ n = pos->next; 1; }) && \
- ({ tpos = slist_entry(pos, typeof(*tpos), member); 1;}); \
+#define slist_for_each_entry_safe(tpos, pos, n, head, member) \
+ for (pos = (head)->next; \
+ pos && ({ n = pos->next; 1; }) && \
+ ({ tpos = slist_entry(pos, typeof(*tpos), member); 1; });\
pos = n)
#endif /* _IP_SET_SLIST_H */
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * published by the Free Software Foundation.
*/
#ifdef __KERNEL__
/* Bitmap entry is unset */
#define IPSET_ELEM_UNSET 0
/* Bitmap entry is set with no timeout value */
-#define IPSET_ELEM_PERMANENT UINT_MAX/2
+#define IPSET_ELEM_PERMANENT (UINT_MAX/2)
static inline bool
ip_set_timeout_test(unsigned long timeout)
{
return timeout != IPSET_ELEM_UNSET
- && (timeout == IPSET_ELEM_PERMANENT
- || time_after(timeout, jiffies));
+ && (timeout == IPSET_ELEM_PERMANENT
+ || time_after(timeout, jiffies));
}
static inline bool
ip_set_timeout_set(u32 timeout)
{
unsigned long t;
-
+
if (!timeout)
return IPSET_ELEM_PERMANENT;
-
+
t = timeout * HZ + jiffies;
if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT)
/* Bingo! */
t++;
-
+
return t;
}
ip_set_timeout_set(u32 timeout)
{
unsigned long t;
-
+
if (!timeout)
return IPSET_ELEM_PERMANENT;
-
+
t = timeout * HZ + jiffies;
if (t == IPSET_ELEM_PERMANENT)
/* Bingo! :-) */
t++;
-
+
return t;
}
{.ip6 = { \
__constant_htonl(a), __constant_htonl(b), \
__constant_htonl(c), __constant_htonl(d), \
- }}
+ } }
/*
* This table works for both IPv4 and IPv6;
static struct ip_set **ip_set_list; /* all individual sets */
static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
-#define STREQ(a,b) (strncmp(a,b,IPSET_MAXNAMELEN) == 0)
+#define STREQ(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
static int max_sets;
find_set_type_rcu(const char *name, u8 family, u8 revision)
{
struct ip_set_type *type;
-
+
rcu_read_lock();
type = find_set_type(name, family, revision);
if (type == NULL)
}
/* Find a given set type by name and family.
- * If we succeeded, the supported minimal and maximum revisions are
+ * If we succeeded, the supported minimal and maximum revisions are
* filled out.
*/
static bool
{
struct ip_set_type *type;
bool ret = false;
-
+
*min = *max = 0;
rcu_read_lock();
list_for_each_entry_rcu(type, &ip_set_type_list, list)
if (STREQ(type->name, name)
&& (type->family == family || type->family == AF_UNSPEC)) {
- ret = true;
- if (type->revision < *min)
- *min = type->revision;
+ ret = true;
+ if (type->revision < *min)
+ *min = type->revision;
else if (type->revision > *max)
*max = type->revision;
}
ip_set_type_register(struct ip_set_type *type)
{
int ret = 0;
-
+
if (type->protocol != IPSET_PROTOCOL) {
pr_warning("ip_set type %s, family %s, revision %u uses "
"wrong protocol version %u (want %u)\n",
* the properties of a set. All of these can be executed from userspace
* only and serialized by the nfnl mutex indirectly from nfnetlink.
*
- * Sets are identified by their index in ip_set_list and the index
+ * Sets are identified by their index in ip_set_list and the index
* is used by the external references (set/SET netfilter modules).
*
* The set behind an index may change by swapping only, from userspace.
atomic_dec(&ip_set_list[index]->ref);
}
-/*
+/*
* Add, del and test set entries from kernel.
*
* The set behind the index must exist and must be referenced
if (dim < set->type->dimension
|| !(family == set->family || set->family == AF_UNSPEC))
- return 0;
+ return 0;
read_lock_bh(&set->lock);
ret = set->variant->kadt(set, skb, IPSET_TEST, family, dim, flags);
if (dim < set->type->dimension
|| !(family == set->family || set->family == AF_UNSPEC))
- return 0;
+ return 0;
write_lock_bh(&set->lock);
ret = set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
if (dim < set->type->dimension
|| !(family == set->family || set->family == AF_UNSPEC))
- return 0;
+ return 0;
write_lock_bh(&set->lock);
ret = set->variant->kadt(set, skb, IPSET_DEL, family, dim, flags);
write_unlock_bh(&set->lock);
-
+
return ret;
}
EXPORT_SYMBOL(ip_set_del);
BUG_ON(atomic_read(&set->ref) == 0);
/* Referenced, so it's safe */
- return set->name;
+ return set->name;
}
EXPORT_SYMBOL(ip_set_name_byindex);
nfmsg->nfgen_family = AF_INET;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
-
+
return nlh;
}
ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] __read_mostly = {
[IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
[IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
- .len = IPSET_MAXNAMELEN -1 },
+ .len = IPSET_MAXNAMELEN - 1 },
[IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
.len = IPSET_MAXNAMELEN - 1},
[IPSET_ATTR_REVISION] = { .type = NLA_U8 },
{
ip_set_id_t i, index = IPSET_INVALID_ID;
struct ip_set *set;
-
+
for (i = 0; index == IPSET_INVALID_ID && i < ip_set_max; i++) {
set = ip_set_list[i];
if (set != NULL && STREQ(set->name, name))
u32 flags = flag_exist(nlh);
int ret = 0, len;
- if (unlikely(protocol_failed(attr)
+ if (unlikely(protocol_failed(attr)
|| attr[IPSET_ATTR_SETNAME] == NULL
|| attr[IPSET_ATTR_TYPENAME] == NULL
|| attr[IPSET_ATTR_REVISION] == NULL
|| attr[IPSET_ATTR_FAMILY] == NULL
|| (attr[IPSET_ATTR_DATA] != NULL
- && !flag_nested(attr[IPSET_ATTR_DATA]))))
+ && !flag_nested(attr[IPSET_ATTR_DATA]))))
return -IPSET_ERR_PROTOCOL;
name = nla_data(attr[IPSET_ATTR_SETNAME]);
&& set->type->family == clash->type->family
&& set->type->revision == clash->type->revision
&& set->variant->same_set(set, clash))
- ret = 0;
+ ret = 0;
goto cleanup;
}
ip_set_list[index] = set;
return ret;
-
+
cleanup:
set->variant->destroy(set);
put_out:
NFNL_CB_CONST struct nlattr * NFNL_CB_CONST attr[])
{
ip_set_id_t i;
-
+
if (unlikely(protocol_failed(attr)))
return -IPSET_ERR_PROTOCOL;
for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL
&& (atomic_read(&ip_set_list[i]->ref)))
- return -IPSET_ERR_BUSY;
+ return -IPSET_ERR_BUSY;
}
for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL)
ip_set_id_t from_id, to_id;
char from_name[IPSET_MAXNAMELEN];
u32 from_ref;
-
+
if (unlikely(protocol_failed(attr)
|| attr[IPSET_ATTR_SETNAME] == NULL
|| attr[IPSET_ATTR_SETNAME2] == NULL))
from = ip_set_list[from_id];
to = ip_set_list[to_id];
-
+
/* Features must not change.
* Not an artifical restriction anymore, as we must prevent
* possible loops created by swapping in setlist type of sets. */
&& from->type->family == to->type->family))
return -IPSET_ERR_TYPE_MISMATCH;
- /* No magic here: ref munging protected by the nfnl_lock */
+ /* No magic here: ref munging protected by the nfnl_lock */
strncpy(from_name, from->name, IPSET_MAXNAMELEN);
from_ref = atomic_read(&from->ref);
atomic_set(&from->ref, atomic_read(&to->ref));
strncpy(to->name, from_name, IPSET_MAXNAMELEN);
atomic_set(&to->ref, from_ref);
-
+
ip_set_list[from_id] = to;
ip_set_list[to_id] = from;
struct nlattr *attr;
int rem;
- pr_debug("dump nlmsg");
+ pr_debug("dump nlmsg");
nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) {
pr_debug("type: %u, len %u", nla_type(attr), attr->nla_len);
}
struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
struct nlattr *attr = (void *)nlh + min_len;
ip_set_id_t index;
-
+
/* Second pass, so parser can't fail */
nla_parse(cda, IPSET_ATTR_CMD_MAX,
attr, nlh->nlmsg_len - min_len, ip_set_setname_policy);
/* cb->args[0] : dump single set/all sets
- * [1] : set index
+ * [1] : set index
* [..]: type specific
*/
index = find_set_id(nla_data(cda[IPSET_ATTR_SETNAME]));
if (index == IPSET_INVALID_ID)
return -EEXIST;
-
+
cb->args[0] = DUMP_ONE;
cb->args[1] = index;
return 0;
*/
if (cb->args[0] != DUMP_ONE
&& !((cb->args[0] == DUMP_ALL)
- ^ (set->type->features & IPSET_DUMP_LAST)))
- continue;
+ ^ (set->type->features & IPSET_DUMP_LAST)))
+ continue;
pr_debug("List set: %s", set->name);
if (!cb->args[2]) {
/* Start listing: make sure set won't be destroyed */
pr_debug("nlmsg_len: %u", nlh->nlmsg_len);
dump_attrs(nlh);
}
-
+
return ret < 0 ? ret : skb->len;
}
int ret, len = nla_len(nla), retried = 0;
u32 lineno = 0;
bool eexist = flags & IPSET_FLAG_EXIST;
-
+
do {
write_lock_bh(&set->lock);
ret = set->variant->uadt(set, head, len, adt,
} while (ret == -EAGAIN
&& set->variant->resize
&& (ret = set->variant->resize(set, GFP_ATOMIC,
- retried++)) == 0);
+ retried++)) == 0);
if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
return 0;
if (lineno && attr[IPSET_ATTR_LINENO]) {
/* Error in restore/batch mode: send back lineno */
u32 *errline = nla_data(attr[IPSET_ATTR_LINENO]);
-
+
*errline = lineno;
}
-
+
return ret;
}
if (unlikely(protocol_failed(attr)
|| attr[IPSET_ATTR_SETNAME] == NULL
- || !((attr[IPSET_ATTR_DATA] != NULL) ^
- (attr[IPSET_ATTR_ADT] != NULL))
+ || !((attr[IPSET_ATTR_DATA] != NULL)
+ ^ (attr[IPSET_ATTR_ADT] != NULL))
|| (attr[IPSET_ATTR_DATA] != NULL
- && !flag_nested(attr[IPSET_ATTR_DATA]))
+ && !flag_nested(attr[IPSET_ATTR_DATA]))
|| (attr[IPSET_ATTR_ADT] != NULL
- && (!flag_nested(attr[IPSET_ATTR_ADT])
- || attr[IPSET_ATTR_LINENO] == NULL))))
+ && (!flag_nested(attr[IPSET_ATTR_ADT])
+ || attr[IPSET_ATTR_LINENO] == NULL))))
return -IPSET_ERR_PROTOCOL;
set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
set, attr[IPSET_ATTR_DATA], IPSET_ADD, flags);
} else {
int nla_rem;
-
+
nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
if (nla_type(nla) != IPSET_ATTR_DATA
|| !flag_nested(nla))
if (unlikely(protocol_failed(attr)
|| attr[IPSET_ATTR_SETNAME] == NULL
- || !((attr[IPSET_ATTR_DATA] != NULL) ^
- (attr[IPSET_ATTR_ADT] != NULL))
+ || !((attr[IPSET_ATTR_DATA] != NULL)
+ ^ (attr[IPSET_ATTR_ADT] != NULL))
|| (attr[IPSET_ATTR_DATA] != NULL
- && !flag_nested(attr[IPSET_ATTR_DATA]))
+ && !flag_nested(attr[IPSET_ATTR_DATA]))
|| (attr[IPSET_ATTR_ADT] != NULL
- && (!flag_nested(attr[IPSET_ATTR_ADT])
- || attr[IPSET_ATTR_LINENO] == NULL))))
+ && (!flag_nested(attr[IPSET_ATTR_ADT])
+ || attr[IPSET_ATTR_LINENO] == NULL))))
return -IPSET_ERR_PROTOCOL;
-
+
set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
if (set == NULL)
return -EEXIST;
-
+
if (attr[IPSET_ATTR_DATA]) {
ret = call_ad(ctnl, skb, attr,
set, attr[IPSET_ATTR_DATA], IPSET_DEL, flags);
} else {
int nla_rem;
-
+
nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
if (nla_type(nla) != IPSET_ATTR_DATA
|| !flag_nested(nla))
|| attr[IPSET_ATTR_DATA] == NULL
|| !flag_nested(attr[IPSET_ATTR_DATA])))
return -IPSET_ERR_PROTOCOL;
-
+
set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
if (set == NULL)
return -EEXIST;
-
+
read_lock_bh(&set->lock);
ret = set->variant->uadt(set,
nla_data(attr[IPSET_ATTR_DATA]),
/* Userspace can't trigger element to be re-added */
if (ret == -EAGAIN)
ret = 1;
-
+
return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
}
if (unlikely(protocol_failed(attr)
|| attr[IPSET_ATTR_SETNAME] == NULL))
return -IPSET_ERR_PROTOCOL;
-
+
index = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
if (index == IPSET_INVALID_ID)
return -EEXIST;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL)
return -ENOMEM;
-
+
nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
IPSET_CMD_HEADER);
if (!nlh2)
ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
if (ret < 0)
return -EFAULT;
-
+
return 0;
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
nlmsg_failure:
- kfree_skb(skb2);
+ kfree_skb(skb2);
return -EFAULT;
}
|| attr[IPSET_ATTR_TYPENAME] == NULL
|| attr[IPSET_ATTR_FAMILY] == NULL))
return -IPSET_ERR_PROTOCOL;
-
+
family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
if (!find_set_type_minmax(typename, family, &min, &max)) {
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL)
return -ENOMEM;
-
+
nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
IPSET_CMD_TYPE);
if (!nlh2)
ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
if (ret < 0)
return -EFAULT;
-
+
return 0;
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
nlmsg_failure:
- kfree_skb(skb2);
+ kfree_skb(skb2);
return -EFAULT;
}
static int
ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
- NFNL_CB_CONST struct nlmsghdr *nlh,
- NFNL_CB_CONST struct nlattr * NFNL_CB_CONST attr[])
+ NFNL_CB_CONST struct nlmsghdr *nlh,
+ NFNL_CB_CONST struct nlattr * NFNL_CB_CONST attr[])
{
struct sk_buff *skb2;
struct nlmsghdr *nlh2;
if (unlikely(attr[IPSET_ATTR_PROTOCOL] == NULL))
return -IPSET_ERR_PROTOCOL;
-
+
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL)
return -ENOMEM;
-
+
nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
IPSET_CMD_PROTOCOL);
if (!nlh2)
ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
if (ret < 0)
return -EFAULT;
-
+
return 0;
nla_put_failure:
nlmsg_cancel(skb2, nlh2);
nlmsg_failure:
- kfree_skb(skb2);
+ kfree_skb(skb2);
return -EFAULT;
}
return -EPERM;
if (optval != SO_IP_SET)
return -EBADF;
- if (*len < sizeof(unsigned)) {
+ if (*len < sizeof(unsigned))
return -EINVAL;
- }
+
data = vmalloc(*len);
if (!data)
return -ENOMEM;
goto done;
} /* end of switch(op) */
- copy:
+copy:
ret = copy_to_user(user, data, copylen);
-
- done:
+
+done:
vfree(data);
if (ret > 0)
ret = 0;
}
static struct nf_sockopt_ops so_set = {
- .pf = PF_INET,
- .get_optmin = SO_IP_SET,
+ .pf = PF_INET,
+ .get_optmin = SO_IP_SET,
.get_optmax = SO_IP_SET + 1,
.get = &ip_set_sockfn_get,
.owner = THIS_MODULE,
return ret;
}
- pr_notice("ip_set: protocol %u", IPSET_PROTOCOL);
+ pr_notice("ip_set: protocol %u", IPSET_PROTOCOL);
return 0;
}
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
-#include <asm/uaccess.h>
-#include <asm/bitops.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/netlink.h>
#include <linux/jiffies.h>
{
struct bitmap_ip *map = set->data;
u32 ip;
-
+
ip = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
}
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-
+
if (cidr > 32)
return -IPSET_ERR_INVALID_CIDR;
ip &= HOSTMASK(cidr);
bitmap_ip_destroy(struct ip_set *set)
{
struct bitmap_ip *map = set->data;
-
+
ip_set_free(map->members);
kfree(map);
-
+
set->data = NULL;
}
bitmap_ip_flush(struct ip_set *set)
{
struct bitmap_ip *map = set->data;
-
+
memset(map->members, 0, map->memsize);
}
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize));
ipset_nest_end(skb, nested);
-
+
return 0;
nla_put_failure:
return -EFAULT;
return -EFAULT;
for (; cb->args[2] < map->elements; cb->args[2]++) {
id = cb->args[2];
- if (!bitmap_ip_test(map, id))
+ if (!bitmap_ip_test(map, id))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
{
struct bitmap_ip *x = a->data;
struct bitmap_ip *y = b->data;
-
+
return x->first_ip == y->first_ip
&& x->last_ip == y->last_ip
&& x->netmask == y->netmask;
if (bitmap_ip_timeout_test(map, id))
ret = 0;
-
+
map->members[id] = IPSET_ELEM_UNSET;
return ret;
}
if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
-
+
if (adt == IPSET_TEST)
return bitmap_ip_timeout_test(map,
ip_to_id((const struct bitmap_ip *)map, ip));
}
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-
+
if (cidr > 32)
return -IPSET_ERR_INVALID_CIDR;
ip &= HOSTMASK(cidr);
if (ip_to > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
-
+
if (tb[IPSET_ATTR_TIMEOUT])
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
bitmap_ip_timeout_destroy(struct ip_set *set)
{
struct bitmap_ip_timeout *map = set->data;
-
+
del_timer_sync(&map->gc);
ip_set_free(map->members);
kfree(map);
-
+
set->data = NULL;
}
bitmap_ip_timeout_flush(struct ip_set *set)
{
struct bitmap_ip_timeout *map = set->data;
-
+
memset(map->members, IPSET_ELEM_UNSET, map->memsize);
}
{
const struct bitmap_ip_timeout *map = set->data;
struct nlattr *nested;
-
+
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize));
ipset_nest_end(skb, nested);
-
+
return 0;
nla_put_failure:
return -EFAULT;
struct nlattr *adt, *nested;
u32 id, first = cb->args[2];
unsigned long *table = map->members;
-
+
adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!adt)
return -EFAULT;
/* Set listing finished */
cb->args[2] = 0;
-
+
return 0;
nla_put_failure:
{
struct bitmap_ip_timeout *x = a->data;
struct bitmap_ip_timeout *y = b->data;
-
+
return x->first_ip == y->first_ip
&& x->last_ip == y->last_ip
&& x->netmask == y->netmask
read_lock_bh(&set->lock);
for (id = 0; id < map->elements; id++)
if (ip_set_timeout_expired(table[id]))
- table[id] = IPSET_ELEM_UNSET;
+ table[id] = IPSET_ELEM_UNSET;
read_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
set->data = map;
set->family = AF_INET;
-
+
return true;
}
if (nla_parse(tb, IPSET_ATTR_CREATE_MAX, head, len,
bitmap_ip_create_policy))
return -IPSET_ERR_PROTOCOL;
-
+
ret = ip_set_get_ipaddr4(tb, IPSET_ATTR_IP, &first_ip);
if (ret)
return ret;
last_ip = htonl(last_ip);
if (first_ip > last_ip) {
u32 tmp = first_ip;
-
+
first_ip = last_ip;
last_ip = tmp;
}
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-
+
if (cidr >= 32)
return -IPSET_ERR_INVALID_CIDR;
last_ip = first_ip | ~HOSTMASK(cidr);
if (tb[IPSET_ATTR_NETMASK]) {
netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
-
+
if (netmask > 32)
return -IPSET_ERR_INVALID_NETMASK;
first_ip &= HOSTMASK(netmask);
last_ip |= ~HOSTMASK(netmask);
}
-
+
if (netmask == 32) {
hosts = 1;
elements = last_ip - first_ip + 1;
hosts = 2 << (32 - netmask - 1);
elements = 2 << (netmask - mask_bits - 1);
}
- if (elements > IPSET_BITMAP_MAX_RANGE + 1) {
+ if (elements > IPSET_BITMAP_MAX_RANGE + 1)
return -IPSET_ERR_BITMAP_RANGE_SIZE;
- }
+
pr_debug("hosts %u, elements %u", hosts, elements);
if (tb[IPSET_ATTR_TIMEOUT]) {
struct bitmap_ip_timeout *map;
-
+
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return -ENOMEM;
-
+
map->memsize = elements * sizeof(unsigned long);
-
+
if (!init_map_ip(set, (struct bitmap_ip *)map,
first_ip, last_ip,
elements, hosts, netmask)) {
map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = &bitmap_ip_timeout;
-
+
bitmap_ip_gc_init(set);
} else {
struct bitmap_ip *map;
-
+
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return -ENOMEM;
-
+
map->memsize = bitmap_bytes(0, elements - 1);
if (!init_map_ip(set, map,
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
-#include <asm/uaccess.h>
-#include <asm/bitops.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/if_ether.h>
#include <linux/netlink.h>
bitmap_ipmac_exist(const struct ipmac_telem *elem)
{
return elem->match == MAC_UNSET
- || (elem->match == MAC_FILLED
- && !ip_set_timeout_expired(elem->timeout));
+ || (elem->match == MAC_FILLED
+ && !ip_set_timeout_expired(elem->timeout));
}
/* Base variant */
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
-
+
return 0;
nla_put_failure:
for (; cb->args[2] <= last; cb->args[2]++) {
id = cb->args[2];
elem = bitmap_ipmac_elem(map, id);
- if (!bitmap_ipmac_exist(elem))
+ if (!bitmap_ipmac_exist(elem))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
-
+
return 0;
nla_put_failure:
/* Backward compatibility: we don't check the second flag */
if (skb_mac_header(skb) < skb->head
|| (skb_mac_header(skb) + ETH_HLEN) > skb->data)
- return -EINVAL;
+ return -EINVAL;
data.id -= map->first_ip;
data.ether = eth_hdr(skb)->h_source;
if (with_timeout(map->timeout))
del_timer_sync(&map->gc);
-
+
ip_set_free(map->members);
kfree(map);
-
+
set->data = NULL;
}
bitmap_ipmac_flush(struct ip_set *set)
{
struct bitmap_ipmac *map = set->data;
-
+
memset(map->members, 0,
(map->last_ip - map->first_ip + 1) * map->dsize);
}
htonl(atomic_read(&set->ref) - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map)
- + (map->last_ip - map->first_ip + 1) * map->dsize));
+ + (map->last_ip - map->first_ip + 1) * map->dsize));
if (with_timeout(map->timeout))
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
ipset_nest_end(skb, nested);
-
+
return 0;
nla_put_failure:
return -EFAULT;
{
struct bitmap_ipmac *x = a->data;
struct bitmap_ipmac *y = b->data;
-
+
return x->first_ip == y->first_ip
&& x->last_ip == y->last_ip
&& x->timeout == y->timeout;
struct bitmap_ipmac *map = set->data;
struct ipmac_telem *elem;
u32 id, last = map->last_ip - map->first_ip;
-
+
/* We run parallel with other readers (test element)
* but adding/deleting new entries is locked out */
read_lock_bh(&set->lock);
elem = bitmap_ipmac_elem(map, id);
if (elem->match == MAC_FILLED
&& ip_set_timeout_expired(elem->timeout))
- elem->match = MAC_EMPTY;
+ elem->match = MAC_EMPTY;
}
read_unlock_bh(&set->lock);
set->data = map;
set->family = AF_INET;
-
+
return true;
}
if (nla_parse(tb, IPSET_ATTR_CREATE_MAX, head, len,
bitmap_ipmac_create_policy))
return -IPSET_ERR_PROTOCOL;
-
+
ret = ip_set_get_ipaddr4(tb, IPSET_ATTR_IP, &first_ip);
if (ret)
return ret;
last_ip = ntohl(last_ip);
if (first_ip > last_ip) {
u32 tmp = first_ip;
-
+
first_ip = last_ip;
last_ip = tmp;
}
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-
+
if (cidr >= 32)
return -IPSET_ERR_INVALID_CIDR;
last_ip = first_ip | ~HOSTMASK(cidr);
if (tb[IPSET_ATTR_TIMEOUT]) {
map->dsize = sizeof(struct ipmac_telem);
-
+
if (!init_map_ipmac(set, map, first_ip, last_ip)) {
kfree(map);
return -ENOMEM;
}
map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-
- set->variant = &bitmap_tipmac;
+
+ set->variant = &bitmap_tipmac;
bitmap_ipmac_gc_init(set);
- } else {
+ } else {
map->dsize = sizeof(struct ipmac_elem);
if (!init_map_ipmac(set, map, first_ip, last_ip)) {
return -ENOMEM;
}
set->variant = &bitmap_ipmac;
-
+
}
return 0;
}
#include <linux/udp.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
-#include <asm/uaccess.h>
-#include <asm/bitops.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/netlink.h>
#include <linux/jiffies.h>
if (!get_ip_port(skb, pf, flags & IPSET_DIM_ONE_SRC, &port))
return -EINVAL;
-
+
port = ntohs(port);
if (port < map->first_port || port > map->last_port)
return -IPSET_ERR_BITMAP_RANGE;
-
+
port -= map->first_port;
switch (adt) {
port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
else
return -IPSET_ERR_PROTOCOL;
-
+
if (port < map->first_port || port > map->last_port)
return -IPSET_ERR_BITMAP_RANGE;
bitmap_port_destroy(struct ip_set *set)
{
struct bitmap_port *map = set->data;
-
+
ip_set_free(map->members);
kfree(map);
-
+
set->data = NULL;
}
bitmap_port_flush(struct ip_set *set)
{
struct bitmap_port *map = set->data;
-
+
memset(map->members, 0, map->memsize);
}
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize));
ipset_nest_end(skb, nested);
-
+
return 0;
nla_put_failure:
return -EFAULT;
return -EFAULT;
for (; cb->args[2] <= last; cb->args[2]++) {
id = cb->args[2];
- if (!test_bit(id, map->members))
+ if (!test_bit(id, map->members))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
-
+
return 0;
nla_put_failure:
{
struct bitmap_port *x = a->data;
struct bitmap_port *y = b->data;
-
+
return x->first_port == y->first_port
&& x->last_port == y->last_port;
}
if (bitmap_port_timeout_test(map, id))
ret = 0;
-
+
map->members[id] = IPSET_ELEM_UNSET;
return ret;
}
}
} else
port_to = port;
-
+
if (port_to > map->last_port)
return -IPSET_ERR_BITMAP_RANGE;
if (tb[IPSET_ATTR_TIMEOUT])
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-
+
for (; port <= port_to; port++) {
id = port - map->first_port;
ret = adt == IPSET_ADD
? bitmap_port_timeout_add(map, id, timeout)
: bitmap_port_timeout_del(map, id);
-
+
if (ret && !ip_set_eexist(ret, flags))
return ret;
else
del_timer_sync(&map->gc);
ip_set_free(map->members);
kfree(map);
-
+
set->data = NULL;
}
bitmap_port_timeout_flush(struct ip_set *set)
{
struct bitmap_port_timeout *map = set->data;
-
+
memset(map->members, 0, map->memsize);
}
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize));
ipset_nest_end(skb, nested);
-
+
return 0;
nla_put_failure:
return -EFAULT;
u16 id, first = cb->args[2];
u16 last = map->last_port - map->first_port;
unsigned long *table = map->members;
-
+
adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!adt)
return -EFAULT;
/* Set listing finished */
cb->args[2] = 0;
-
+
return 0;
nla_put_failure:
{
struct bitmap_port_timeout *x = a->data;
struct bitmap_port_timeout *y = b->data;
-
+
return x->first_port == y->first_port
&& x->last_port == y->last_port
&& x->timeout == y->timeout;
unsigned long *table = map->members;
u32 id; /* wraparound */
u16 last = map->last_port - map->first_port;
-
+
/* We run parallel with other readers (test element)
* but adding/deleting new entries is locked out */
read_lock_bh(&set->lock);
for (id = 0; id <= last; id++)
if (ip_set_timeout_expired(table[id]))
- table[id] = IPSET_ELEM_UNSET;
+ table[id] = IPSET_ELEM_UNSET;
read_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
set->data = map;
set->family = AF_UNSPEC;
-
+
return true;
}
if (nla_parse(tb, IPSET_ATTR_CREATE_MAX, head, len,
bitmap_port_create_policy))
return -IPSET_ERR_PROTOCOL;
-
+
if (tb[IPSET_ATTR_PORT])
first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
else
last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (first_port > last_port) {
u16 tmp = first_port;
-
+
first_port = last_port;
last_port = tmp;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
struct bitmap_port_timeout *map;
-
+
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return -ENOMEM;
-
+
map->memsize = (last_port - first_port + 1)
* sizeof(unsigned long);
-
+
if (!init_map_port(set, (struct bitmap_port *) map,
first_port, last_port)) {
kfree(map);
map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = &bitmap_port_timeout;
-
+
bitmap_port_gc_init(set);
} else {
struct bitmap_port *map;
-
+
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return -ENOMEM;
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
-#include <asm/uaccess.h>
-#include <asm/bitops.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#include <net/ip.h>
struct chash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
u32 ip;
-
+
ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip);
ip &= NETMASK(h->netmask);
if (ip == 0)
swap(ip, ip_to);
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-
+
if (cidr > 32)
return -IPSET_ERR_INVALID_CIDR;
ip &= HOSTMASK(cidr);
hash_ip6_data_swap(struct hash_ip6_elem *dst, struct hash_ip6_elem *src)
{
struct in6_addr tmp;
-
+
ipv6_addr_copy(&tmp, &dst->ip.in6);
ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
ipv6_addr_copy(&src->ip.in6, &tmp);
static inline bool
hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data)
{
- const struct hash_ip6_telem *e =
+ const struct hash_ip6_telem *e =
(const struct hash_ip6_telem *)data;
-
+
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
if (tb[IPSET_ATTR_NETMASK]) {
netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
-
+
if ((set->family == AF_INET && netmask > 32)
|| (set->family == AF_INET6 && netmask > 128)
|| netmask == 0)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-
+
set->variant = set->family == AF_INET
? &hash_ip4_tvariant : &hash_ip6_tvariant;
set->variant = set->family == AF_INET
? &hash_ip4_variant : &hash_ip6_variant;
}
-
+
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
-
+
return 0;
}
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
-#include <asm/uaccess.h>
-#include <asm/bitops.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#include <net/ip.h>
if (tb[IPSET_ATTR_PROTO]) {
data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
-
+
if (data.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else
{
struct chash *x = a->data;
struct chash *y = b->data;
-
+
/* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem
&& x->timeout == y->timeout
hash_ipport6_data_tlist(struct sk_buff *skb,
const struct hash_ipport6_elem *data)
{
- const struct hash_ipport6_telem *e =
+ const struct hash_ipport6_telem *e =
(const struct hash_ipport6_telem *)data;
-
+
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
}
ret = adtfn(set, &data, GFP_ATOMIC, timeout);
-
+
return ip_set_eexist(ret, flags) ? 0 : ret;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-
+
set->variant = set->family == AF_INET
? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
set->variant = set->family == AF_INET
? &hash_ipport4_variant : &hash_ipport6_variant;
}
-
+
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
-
+
return 0;
}
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
-#include <asm/uaccess.h>
-#include <asm/bitops.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#include <net/ip.h>
struct chash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem data = { };
-
+
if (!get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
return -EINVAL;
if (tb[IPSET_ATTR_PROTO]) {
data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
-
+
if (data.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else
{
struct chash *x = a->data;
struct chash *y = b->data;
-
+
/* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem
&& x->timeout == y->timeout
struct hash_ipportip6_elem *src)
{
struct hash_ipportip6_elem tmp;
-
+
memcpy(&tmp, dst, sizeof(tmp));
memcpy(dst, src, sizeof(tmp));
memcpy(src, &tmp, sizeof(tmp));
hash_ipportip6_data_tlist(struct sk_buff *skb,
const struct hash_ipportip6_elem *data)
{
- const struct hash_ipportip6_telem *e =
+ const struct hash_ipportip6_telem *e =
(const struct hash_ipportip6_telem *)data;
-
+
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-
+
set->variant = set->family == AF_INET
? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
set->variant = set->family == AF_INET
? &hash_ipportip4_variant : &hash_ipportip6_variant;
}
-
+
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
-
+
return 0;
}
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
-#include <asm/uaccess.h>
-#include <asm/bitops.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#include <net/ip.h>
struct chash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
-
+
if (data.cidr == 0)
return -EINVAL;
if (adt == IPSET_TEST)
{
struct chash *x = a->data;
struct chash *y = b->data;
-
+
/* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem
&& x->timeout == y->timeout
static inline bool
hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
{
- const struct hash_net6_telem *e =
+ const struct hash_net6_telem *e =
(const struct hash_net6_telem *)data;
-
+
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
}
ret = adtfn(set, &data, GFP_ATOMIC, timeout);
-
+
return ip_set_eexist(ret, flags) ? 0 : ret;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-
+
set->variant = set->family == AF_INET
? &hash_net4_tvariant : &hash_net6_tvariant;
set->variant = set->family == AF_INET
? &hash_net4_variant : &hash_net6_variant;
}
-
+
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
-
+
return 0;
}
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
-#include <asm/uaccess.h>
-#include <asm/bitops.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/random.h>
#include <net/ip.h>
static inline void
hash_netport4_data_copy(struct hash_netport4_elem *dst,
- const struct hash_netport4_elem *src)
+ const struct hash_netport4_elem *src)
{
dst->ip = src->ip;
dst->port = src->port;
static inline void
hash_netport4_data_swap(struct hash_netport4_elem *dst,
- struct hash_netport4_elem *src)
+ struct hash_netport4_elem *src)
{
swap(dst->ip, src->ip);
swap(dst->port, src->port);
static inline bool
hash_netport4_data_list(struct sk_buff *skb,
- const struct hash_netport4_elem *data)
+ const struct hash_netport4_elem *data)
{
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
{
struct chash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netport4_elem data =
- { .cidr = h->nets[0].cidr || HOST_MASK };
+ struct hash_netport4_elem data = {
+ .cidr = h->nets[0].cidr || HOST_MASK };
if (data.cidr == 0)
return -EINVAL;
ret = ip_set_get_ipaddr4(tb, IPSET_ATTR_IP, &data.ip);
if (ret)
return ret;
-
+
if (tb[IPSET_ATTR_CIDR])
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!data.cidr)
if (tb[IPSET_ATTR_PROTO]) {
data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
-
+
if (data.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else
{
struct chash *x = a->data;
struct chash *y = b->data;
-
+
/* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem
&& x->timeout == y->timeout
static inline void
hash_netport6_data_copy(struct hash_netport6_elem *dst,
- const struct hash_netport6_elem *src)
+ const struct hash_netport6_elem *src)
{
memcpy(dst, src, sizeof(*dst));
}
static inline void
hash_netport6_data_swap(struct hash_netport6_elem *dst,
- struct hash_netport6_elem *src)
+ struct hash_netport6_elem *src)
{
struct hash_netport6_elem tmp;
static inline bool
hash_netport6_data_list(struct sk_buff *skb,
- const struct hash_netport6_elem *data)
+ const struct hash_netport6_elem *data)
{
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
hash_netport6_data_tlist(struct sk_buff *skb,
const struct hash_netport6_elem *data)
{
- const struct hash_netport6_telem *e =
+ const struct hash_netport6_telem *e =
(const struct hash_netport6_telem *)data;
-
+
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
{
struct chash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netport6_elem data =
- { .cidr = h->nets[0].cidr || HOST_MASK };
+ struct hash_netport6_elem data = {
+ .cidr = h->nets[0].cidr || HOST_MASK };
if (data.cidr == 0)
return -EINVAL;
ret = ip_set_get_ipaddr6(tb, IPSET_ATTR_IP, &data.ip);
if (ret)
return ret;
-
+
if (tb[IPSET_ATTR_CIDR])
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!data.cidr)
}
ret = adtfn(set, &data, GFP_ATOMIC, timeout);
-
+
return ip_set_eexist(ret, flags) ? 0 : ret;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-
+
set->variant = set->family == AF_INET
? &hash_netport4_tvariant : &hash_netport6_tvariant;
set->variant = set->family == AF_INET
? &hash_netport4_variant : &hash_netport6_variant;
}
-
+
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)",
set->name, jhash_size(h->table->htable_bits),
h->table->htable_bits, h->maxelem, set->data, h->table);
-
+
return 0;
}
{
const struct set_telem *elem =
(const struct set_telem *) list_set_elem(map, id);
-
+
return ip_set_timeout_test(elem->timeout);
}
break;
}
}
-
+
static inline void
list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
unsigned long timeout)
swap(e->timeout, timeout);
}
}
-
+
static int
list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
unsigned long timeout)
list_elem_tadd(map, i, id, timeout);
else
list_elem_add(map, i, id);
-
+
return 0;
}
}
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
-
+
switch (adt) {
case IPSET_TEST:
for (i = 0; i < map->size && !ret; i++) {
elem = list_set_elem(map, i);
if (elem->id == id
&& !(with_timeout && list_set_expired(map, i)))
- ret = -IPSET_ERR_EXIST;
+ ret = -IPSET_ERR_EXIST;
}
if (ret == -IPSET_ERR_EXIST)
break;
} else if (with_timeout && list_set_expired(map, i))
continue;
else if (elem->id == id
- && (before == 0
- || (before > 0
- && next_id_eq(map, i, refid))))
+ && (before == 0
+ || (before > 0
+ && next_id_eq(map, i, refid))))
ret = list_set_del(map, id, i);
- else if (before < 0 && elem->id == refid
- && next_id_eq(map, i, id))
- ret = list_set_del(map, id, i + 1);
+ else if (before < 0
+ && elem->id == refid
+ && next_id_eq(map, i, id))
+ ret = list_set_del(map, id, i + 1);
}
break;
default:
del_timer_sync(&map->gc);
list_set_flush(set);
kfree(map);
-
+
set->data = NULL;
}
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->size * map->dsize));
ipset_nest_end(skb, nested);
-
+
return 0;
nla_put_failure:
return -EFAULT;
e = list_set_elem(map, i);
if (e->id == IPSET_INVALID_ID)
goto finish;
- if (with_timeout(map->timeout) && list_set_expired(map, i))
+ if (with_timeout(map->timeout) && list_set_expired(map, i))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
{
struct list_set *x = a->data;
struct list_set *y = b->data;
-
+
return x->size == y->size
&& x->timeout == y->timeout;
}
e = (struct set_telem *) list_set_elem(map, i);
if (e->id != IPSET_INVALID_ID
&& list_set_expired(map, i))
- list_set_del(map, e->id, i);
+ list_set_del(map, e->id, i);
}
read_unlock_bh(&set->lock);
struct list_set *map;
struct set_elem *e;
u32 i;
-
+
map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL);
if (!map)
return false;
e = list_set_elem(map, i);
e->id = IPSET_INVALID_ID;
}
-
+
return true;
}
if (nla_parse(tb, IPSET_ATTR_CREATE_MAX, head, len,
list_set_create_policy))
return -IPSET_ERR_PROTOCOL;
-
+
if (tb[IPSET_ATTR_SIZE])
size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]);
if (size < IP_SET_LIST_MIN_SIZE)
if (!init_list_set(set, size, sizeof(struct set_telem),
ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT])))
return -ENOMEM;
-
+
list_set_gc_init(set);
- } else {
+ } else {
if (!init_list_set(set, size, sizeof(struct set_elem),
IPSET_NO_TIMEOUT))
return -ENOMEM;
static inline int
match_set(ip_set_id_t index, const struct sk_buff *skb,
u8 pf, u8 dim, u8 flags, int inv)
-{
+{
if (ip_set_test(index, skb, pf, dim, flags))
inv = !inv;
return inv;
* 2.6.24: [NETLINK]: Introduce nested and byteorder flag to netlink attribute
* 2.6.31: netfilter: passive OS fingerprint xtables match
*/
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
#error "Linux kernel version too old: must be >= 2.6.31"
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
#define CHECK_OK 1
#define CHECK_FAIL 0
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */
#define CHECK_OK 0
-#define CHECK_FAIL -EINVAL
+#define CHECK_FAIL (-EINVAL)
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
static bool
set_match_v0(const struct sk_buff *skb, const struct xt_match_param *par)
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */
#endif
{
const struct xt_set_info_match_v0 *info = par->matchinfo;
-
+
return match_set(info->match_set.index, skb, par->family,
info->match_set.u.compat.dim,
info->match_set.u.compat.flags,
}
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
static bool
set_match_v0_checkentry(const struct xt_mtchk_param *par)
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */
ip_set_id_t index;
index = ip_set_nfnl_get_byindex(info->match_set.index);
-
+
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find set indentified by id %u to match",
info->match_set.index);
ip_set_nfnl_put(info->match_set.index);
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
static unsigned int
set_target_v0(struct sk_buff *skb, const struct xt_target_param *par)
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */
#endif
{
const struct xt_set_info_target_v0 *info = par->targinfo;
-
+
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par->family,
info->add_set.u.compat.dim,
return XT_CONTINUE;
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
static bool
set_target_v0_checkentry(const struct xt_tgchk_param *par)
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */
/* Revision 1: current interface to netfilter/iptables */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
static bool
set_match(const struct sk_buff *skb, const struct xt_match_param *par)
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */
#endif
{
const struct xt_set_info_match *info = par->matchinfo;
-
+
return match_set(info->match_set.index, skb, par->family,
info->match_set.dim,
info->match_set.flags,
info->match_set.flags & IPSET_INV_MATCH);
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
static bool
set_match_checkentry(const struct xt_mtchk_param *par)
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */
ip_set_id_t index;
index = ip_set_nfnl_get_byindex(info->match_set.index);
-
+
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find set indentified by id %u to match",
info->match_set.index);
ip_set_nfnl_put(info->match_set.index);
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
static unsigned int
set_target(struct sk_buff *skb, const struct xt_target_param *par)
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */
#endif
{
const struct xt_set_info_target *info = par->targinfo;
-
+
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index,
skb, par->family,
return XT_CONTINUE;
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
static bool
set_target_checkentry(const struct xt_tgchk_param *par)
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) */
static struct xt_target set_targets[] __read_mostly = {
{
- .name = "SET",
+ .name = "SET",
.revision = 0,
.family = NFPROTO_IPV4,
- .target = set_target_v0,
+ .target = set_target_v0,
.targetsize = sizeof(struct xt_set_info_target_v0),
- .checkentry = set_target_v0_checkentry,
- .destroy = set_target_v0_destroy,
- .me = THIS_MODULE
+ .checkentry = set_target_v0_checkentry,
+ .destroy = set_target_v0_destroy,
+ .me = THIS_MODULE
},
{
- .name = "SET",
+ .name = "SET",
.revision = 1,
.family = NFPROTO_IPV4,
- .target = set_target,
+ .target = set_target,
.targetsize = sizeof(struct xt_set_info_target),
- .checkentry = set_target_checkentry,
- .destroy = set_target_destroy,
- .me = THIS_MODULE
+ .checkentry = set_target_checkentry,
+ .destroy = set_target_destroy,
+ .me = THIS_MODULE
},
{
- .name = "SET",
+ .name = "SET",
.revision = 1,
.family = NFPROTO_IPV6,
- .target = set_target,
+ .target = set_target,
.targetsize = sizeof(struct xt_set_info_target),
- .checkentry = set_target_checkentry,
- .destroy = set_target_destroy,
- .me = THIS_MODULE
+ .checkentry = set_target_checkentry,
+ .destroy = set_target_destroy,
+ .me = THIS_MODULE
},
};
static int __init xt_set_init(void)
{
int ret = xt_register_matches(set_matches, ARRAY_SIZE(set_matches));
-
+
if (!ret) {
ret = xt_register_targets(set_targets,
ARRAY_SIZE(set_targets));