u8 family, u8 dim, u8 flags);
/* Utility functions */
-extern void * ip_set_alloc(size_t size, gfp_t gfp_mask);
+extern void * ip_set_alloc(size_t size);
extern void ip_set_free(void *members);
extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
/* In case we have plenty of memory :-) */
return -IPSET_ERR_HASH_FULL;
t = ip_set_alloc(sizeof(*t)
- + jhash_size(htable_bits) * sizeof(struct hbucket),
- GFP_KERNEL);
+ + jhash_size(htable_bits) * sizeof(struct hbucket));
if (!t)
return -ENOMEM;
t->htable_bits = htable_bits;
/* In case we have plenty of memory :-) */
return -IPSET_ERR_HASH_FULL;
t = ip_set_alloc(sizeof(*t)
- + jhash_size(htable_bits) * sizeof(struct hbucket),
- GFP_KERNEL);
+ + jhash_size(htable_bits) * sizeof(struct hbucket));
if (!t)
return -ENOMEM;
t->htable_bits = htable_bits;
u32 first_ip, u32 last_ip,
u32 elements, u32 hosts, u8 netmask)
{
- map->members = ip_set_alloc(map->memsize, GFP_KERNEL);
+ map->members = ip_set_alloc(map->memsize);
if (!map->members)
return false;
map->first_ip = first_ip;
init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
u32 first_ip, u32 last_ip)
{
- map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize,
- GFP_KERNEL);
+ map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
if (!map->members)
return false;
map->first_ip = first_ip;
init_map_port(struct ip_set *set, struct bitmap_port *map,
u16 first_port, u16 last_port)
{
- map->members = ip_set_alloc(map->memsize, GFP_KERNEL);
+ map->members = ip_set_alloc(map->memsize);
if (!map->members)
return false;
map->first_port = first_port;
#include <linux/spinlock.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
+#include <linux/version.h>
#include <net/netlink.h>
#include <linux/netfilter.h>
/* Utility functions */
void *
-ip_set_alloc(size_t size, gfp_t gfp_mask)
+ip_set_alloc(size_t size)
{
void *members = NULL;
if (size < KMALLOC_MAX_SIZE)
- members = kzalloc(size, gfp_mask | __GFP_NOWARN);
+ members = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (members) {
pr_debug("%p: allocated with kmalloc\n", members);
return members;
}
- members = __vmalloc(size, gfp_mask | __GFP_ZERO | __GFP_HIGHMEM,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
+ members = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_HIGHMEM,
PAGE_KERNEL);
+#else
+ members = vzalloc(size);
+#endif
if (!members)
return NULL;
pr_debug("%p: allocated with vmalloc\n", members);
hbits = htable_bits(hashsize);
h->table = ip_set_alloc(
sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket),
- GFP_KERNEL);
+ + jhash_size(hbits) * sizeof(struct hbucket));
if (!h->table) {
kfree(h);
return -ENOMEM;
hbits = htable_bits(hashsize);
h->table = ip_set_alloc(
sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket),
- GFP_KERNEL);
+ + jhash_size(hbits) * sizeof(struct hbucket));
if (!h->table) {
kfree(h);
return -ENOMEM;
hbits = htable_bits(hashsize);
h->table = ip_set_alloc(
sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket),
- GFP_KERNEL);
+ + jhash_size(hbits) * sizeof(struct hbucket));
if (!h->table) {
kfree(h);
return -ENOMEM;
hbits = htable_bits(hashsize);
h->table = ip_set_alloc(
sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket),
- GFP_KERNEL);
+ + jhash_size(hbits) * sizeof(struct hbucket));
if (!h->table) {
kfree(h);
return -ENOMEM;
hbits = htable_bits(hashsize);
h->table = ip_set_alloc(
sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket),
- GFP_KERNEL);
+ + jhash_size(hbits) * sizeof(struct hbucket));
if (!h->table) {
kfree(h);
return -ENOMEM;
hbits = htable_bits(hashsize);
h->table = ip_set_alloc(
sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct hbucket),
- GFP_KERNEL);
+ + jhash_size(hbits) * sizeof(struct hbucket));
if (!h->table) {
kfree(h);
return -ENOMEM;