An unoptimized version of the Jenkins one-at-a-time hash function is copied all over the code wherever an hashtable is used. Instead the optimized version shared between the whole kernel should be used to reduce code duplication and keep bugs at a single point.
Only the DAT code must use the old implementation because it is used as distributed hash function which has to be common for all nodes.
Signed-off-by: Sven Eckelmann sven@narfation.org --- Removed special choose_tt function and instead also use jhash
bridge_loop_avoidance.c | 20 ++++++-------------- hash.h | 22 ---------------------- main.h | 1 + originator.h | 15 ++------------- vis.c | 15 ++------------- 5 files changed, 11 insertions(+), 62 deletions(-)
diff --git a/bridge_loop_avoidance.c b/bridge_loop_avoidance.c index ec12c79..1400274 100644 --- a/bridge_loop_avoidance.c +++ b/bridge_loop_avoidance.c @@ -41,14 +41,10 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv, static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) { struct batadv_claim *claim = (struct batadv_claim *)data; - uint32_t hash = 0; + uint32_t hash;
- hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr)); - hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid)); - - hash += (hash << 3); - hash ^= (hash >> 11); - hash += (hash << 15); + hash = jhash(&claim->addr, sizeof(claim->addr), 0); + hash = jhash(&claim->vid, sizeof(claim->vid), hash);
return hash % size; } @@ -58,14 +54,10 @@ static inline uint32_t batadv_choose_backbone_gw(const void *data, uint32_t size) { struct batadv_claim *claim = (struct batadv_claim *)data; - uint32_t hash = 0; + uint32_t hash;
- hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr)); - hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid)); - - hash += (hash << 3); - hash ^= (hash >> 11); - hash += (hash << 15); + hash = jhash(&claim->addr, sizeof(claim->addr), 0); + hash = jhash(&claim->vid, sizeof(claim->vid), hash);
return hash % size; } diff --git a/hash.h b/hash.h index e053339..977de9c 100644 --- a/hash.h +++ b/hash.h @@ -82,28 +82,6 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash, }
/** - * batadv_hash_bytes - hash some bytes and add them to the previous hash - * @hash: previous hash value - * @data: data to be hashed - * @size: number of bytes to be hashed - * - * Returns the new hash value. - */ -static inline uint32_t batadv_hash_bytes(uint32_t hash, void *data, - uint32_t size) -{ - const unsigned char *key = data; - int i; - - for (i = 0; i < size; i++) { - hash += key[i]; - hash += (hash << 10); - hash ^= (hash >> 6); - } - return hash; -} - -/** * batadv_hash_add - adds data to the hashtable * @hash: storage hash table * @compare: callback to determine if 2 hash elements are identical diff --git a/main.h b/main.h index 2032de2..f58e373 100644 --- a/main.h +++ b/main.h @@ -147,6 +147,7 @@ enum batadv_uev_type { #include <linux/workqueue.h> /* workqueue */ #include <linux/percpu.h> #include <linux/slab.h> +#include <linux/jhash.h> #include <net/sock.h> /* struct sock */ #include <linux/jiffies.h> #include <linux/seq_file.h> diff --git a/originator.h b/originator.h index 9778e65..1311d39 100644 --- a/originator.h +++ b/originator.h @@ -46,20 +46,9 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, */ static inline uint32_t batadv_choose_orig(const void *data, uint32_t size) { - const unsigned char *key = data; - uint32_t hash = 0; - size_t i; - - for (i = 0; i < 6; i++) { - hash += key[i]; - hash += (hash << 10); - hash ^= (hash >> 6); - } - - hash += (hash << 3); - hash ^= (hash >> 11); - hash += (hash << 15); + uint32_t hash;
+ hash = jhash(data, ETH_ALEN, 0); return hash % size; }
diff --git a/vis.c b/vis.c index 0f65a9d..2eb6878 100644 --- a/vis.c +++ b/vis.c @@ -72,21 +72,10 @@ static uint32_t batadv_vis_info_choose(const void *data, uint32_t size) { const struct batadv_vis_info *vis_info = data; const struct batadv_vis_packet *packet; - const unsigned char *key; - uint32_t hash = 0; - size_t i; + uint32_t hash;
packet = (struct batadv_vis_packet *)vis_info->skb_packet->data; - key = packet->vis_orig; - for (i = 0; i < ETH_ALEN; i++) { - hash += key[i]; - hash += (hash << 10); - hash ^= (hash >> 6); - } - - hash += (hash << 3); - hash ^= (hash >> 11); - hash += (hash << 15); + hash = jhash(&packet->vis_orig, sizeof(packet->vis_orig), 0);
return hash % size; }
The common hashtable implementation in the kernel uses bits of the hash to compute the final size of the hastable. The current batman-adv hash implementations should do the same to allow a migration to the common hashtable implementation.
Signed-off-by: Sven Eckelmann sven@narfation.org --- bridge_loop_avoidance.c | 6 ++++-- distributed-arp-table.c | 2 +- main.h | 8 ++++++++ originator.c | 2 +- translation-table.c | 8 ++++++-- vis.c | 2 +- 6 files changed, 21 insertions(+), 7 deletions(-)
diff --git a/bridge_loop_avoidance.c b/bridge_loop_avoidance.c index 1400274..2fd5f09 100644 --- a/bridge_loop_avoidance.c +++ b/bridge_loop_avoidance.c @@ -1195,6 +1195,8 @@ int batadv_bla_init(struct batadv_priv *bat_priv) struct batadv_hard_iface *primary_if; uint16_t crc; unsigned long entrytime; + uint32_t hash_claim_size = 1 << BATADV_BLA_CLAIM_HASH_BITS; + uint32_t hash_backbone_size = 1 << BATADV_BLA_BACKBONE_HASH_BITS;
spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
@@ -1221,8 +1223,8 @@ int batadv_bla_init(struct batadv_priv *bat_priv) if (bat_priv->bla.claim_hash) return 0;
- bat_priv->bla.claim_hash = batadv_hash_new(128); - bat_priv->bla.backbone_hash = batadv_hash_new(32); + bat_priv->bla.claim_hash = batadv_hash_new(hash_claim_size); + bat_priv->bla.backbone_hash = batadv_hash_new(hash_backbone_size);
if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash) return -ENOMEM; diff --git a/distributed-arp-table.c b/distributed-arp-table.c index 8e1d89d..7285496 100644 --- a/distributed-arp-table.c +++ b/distributed-arp-table.c @@ -653,7 +653,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv) if (bat_priv->dat.hash) return 0;
- bat_priv->dat.hash = batadv_hash_new(1024); + bat_priv->dat.hash = batadv_hash_new(1 << BATADV_DAT_HASH_BITS);
if (!bat_priv->dat.hash) return -ENOMEM; diff --git a/main.h b/main.h index f58e373..78ea63f 100644 --- a/main.h +++ b/main.h @@ -127,6 +127,14 @@ enum batadv_uev_type { #define BATADV_DAT_CANDIDATE_NOT_FOUND 0 #define BATADV_DAT_CANDIDATE_ORIG 1
+#define BATADV_BLA_CLAIM_HASH_BITS 7 +#define BATADV_BLA_BACKBONE_HASH_BITS 5 +#define BATADV_DAT_HASH_BITS 10 +#define BATADV_ORIG_HASH_BITS 10 +#define BATADV_TT_LOCAL_HASH_BITS 10 +#define BATADV_TT_GLOBAL_HASH_BITS 10 +#define BATADV_VIS_HASH_BITS 8 + /* Debug Messages */ #ifdef pr_fmt #undef pr_fmt diff --git a/originator.c b/originator.c index 8c32cf1..bd3f5d2 100644 --- a/originator.c +++ b/originator.c @@ -52,7 +52,7 @@ int batadv_originator_init(struct batadv_priv *bat_priv) if (bat_priv->orig_hash) return 0;
- bat_priv->orig_hash = batadv_hash_new(1024); + bat_priv->orig_hash = batadv_hash_new(1 << BATADV_ORIG_HASH_BITS);
if (!bat_priv->orig_hash) goto err; diff --git a/translation-table.c b/translation-table.c index 1335294..2f20e9d 100644 --- a/translation-table.c +++ b/translation-table.c @@ -227,10 +227,12 @@ int batadv_tt_len(int changes_num)
static int batadv_tt_local_init(struct batadv_priv *bat_priv) { + uint32_t hash_size = 1 << BATADV_TT_LOCAL_HASH_BITS; + if (bat_priv->tt.local_hash) return 0;
- bat_priv->tt.local_hash = batadv_hash_new(1024); + bat_priv->tt.local_hash = batadv_hash_new(hash_size);
if (!bat_priv->tt.local_hash) return -ENOMEM; @@ -682,10 +684,12 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
static int batadv_tt_global_init(struct batadv_priv *bat_priv) { + uint32_t hash_size = 1 << BATADV_TT_GLOBAL_HASH_BITS; + if (bat_priv->tt.global_hash) return 0;
- bat_priv->tt.global_hash = batadv_hash_new(1024); + bat_priv->tt.global_hash = batadv_hash_new(hash_size);
if (!bat_priv->tt.global_hash) return -ENOMEM; diff --git a/vis.c b/vis.c index 2eb6878..30e1405 100644 --- a/vis.c +++ b/vis.c @@ -835,7 +835,7 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
spin_lock_bh(&bat_priv->vis.hash_lock);
- bat_priv->vis.hash = batadv_hash_new(256); + bat_priv->vis.hash = batadv_hash_new(1 << BATADV_VIS_HASH_BITS); if (!bat_priv->vis.hash) { pr_err("Can't initialize vis_hash\n"); goto err;
The size of an hashtable is known by the user of the hash and doesn't have to be stored inside the batadv_hashtable structure. This is an intermediate step before the size information is extracted by the compiler instead of manually added by the code.
Signed-off-by: Sven Eckelmann sven@narfation.org --- bridge_loop_avoidance.c | 32 +++++++++++++++----------- distributed-arp-table.c | 15 ++++++------ hash.c | 11 ++++----- hash.h | 13 +++++++---- originator.c | 17 +++++++------- originator.h | 2 +- routing.c | 2 +- translation-table.c | 58 ++++++++++++++++++++++++++++------------------- vis.c | 31 +++++++++++++++---------- 9 files changed, 105 insertions(+), 76 deletions(-)
diff --git a/bridge_loop_avoidance.c b/bridge_loop_avoidance.c index 2fd5f09..841143d 100644 --- a/bridge_loop_avoidance.c +++ b/bridge_loop_avoidance.c @@ -141,7 +141,7 @@ static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv, if (!hash) return NULL;
- index = batadv_choose_claim(data, hash->size); + index = batadv_choose_claim(data, 1 << BATADV_BLA_CLAIM_HASH_BITS); head = &hash->table[index];
rcu_read_lock(); @@ -178,6 +178,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, struct batadv_backbone_gw search_entry, *backbone_gw; struct batadv_backbone_gw *backbone_gw_tmp = NULL; int index; + uint32_t hash_size = 1 << BATADV_BLA_BACKBONE_HASH_BITS;
if (!hash) return NULL; @@ -185,7 +186,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, memcpy(search_entry.orig, addr, ETH_ALEN); search_entry.vid = vid;
- index = batadv_choose_backbone_gw(&search_entry, hash->size); + index = batadv_choose_backbone_gw(&search_entry, hash_size); head = &hash->table[index];
rcu_read_lock(); @@ -220,7 +221,7 @@ batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw) if (!hash) return;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_BLA_CLAIM_HASH_BITS; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
@@ -391,6 +392,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, atomic_set(&entry->refcount, 2);
hash_added = batadv_hash_add(bat_priv->bla.backbone_hash, + 1 << BATADV_BLA_BACKBONE_HASH_BITS, batadv_compare_backbone_gw, batadv_choose_backbone_gw, entry, &entry->hash_entry); @@ -468,7 +470,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv, return;
hash = bat_priv->bla.claim_hash; - for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_BLA_CLAIM_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -569,6 +571,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv, "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", mac, vid); hash_added = batadv_hash_add(bat_priv->bla.claim_hash, + 1 << BATADV_BLA_CLAIM_HASH_BITS, batadv_compare_claim, batadv_choose_claim, claim, &claim->hash_entry); @@ -620,8 +623,9 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
- batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, - batadv_choose_claim, claim); + batadv_hash_remove(bat_priv->bla.claim_hash, + 1 << BATADV_BLA_CLAIM_HASH_BITS, + batadv_compare_claim, batadv_choose_claim, claim); batadv_claim_free_ref(claim); /* reference from the hash is gone */
claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); @@ -961,7 +965,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) if (!hash) return;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_BLA_BACKBONE_HASH_BITS; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
@@ -1015,7 +1019,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, if (!hash) return;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_BLA_CLAIM_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -1075,7 +1079,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, if (!hash) return;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_BLA_BACKBONE_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -1139,7 +1143,7 @@ static void batadv_bla_periodic_work(struct work_struct *work) if (!hash) goto out;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_BLA_BACKBONE_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -1230,8 +1234,10 @@ int batadv_bla_init(struct batadv_priv *bat_priv) return -ENOMEM;
batadv_hash_set_lock_class(bat_priv->bla.claim_hash, + 1 << BATADV_BLA_CLAIM_HASH_BITS, &batadv_claim_hash_lock_class_key); batadv_hash_set_lock_class(bat_priv->bla.backbone_hash, + 1 << BATADV_BLA_CLAIM_HASH_BITS, &batadv_backbone_hash_lock_class_key);
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n"); @@ -1333,7 +1339,7 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig) if (!hash) return 0;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_BLA_BACKBONE_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -1625,7 +1631,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) ntohs(bat_priv->bla.claim_dest.group)); seq_printf(seq, " %-17s %-5s %-17s [o] (%-6s)\n", "Client", "VID", "Originator", "CRC"); - for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_BLA_CLAIM_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -1671,7 +1677,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) ntohs(bat_priv->bla.claim_dest.group)); seq_printf(seq, " %-17s %-5s %-9s (%-6s)\n", "Originator", "VID", "last seen", "CRC"); - for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_BLA_BACKBONE_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); diff --git a/distributed-arp-table.c b/distributed-arp-table.c index 7285496..baaec53 100644 --- a/distributed-arp-table.c +++ b/distributed-arp-table.c @@ -90,7 +90,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv, if (!bat_priv->dat.hash) return;
- for (i = 0; i < bat_priv->dat.hash->size; i++) { + for (i = 0; i < 1 << BATADV_DAT_HASH_BITS; i++) { head = &bat_priv->dat.hash->table[i]; list_lock = &bat_priv->dat.hash->list_locks[i];
@@ -243,7 +243,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip) if (!hash) return NULL;
- index = batadv_hash_dat(&ip, hash->size); + index = batadv_hash_dat(&ip, 1 << BATADV_DAT_HASH_BITS); head = &hash->table[index];
rcu_read_lock(); @@ -295,9 +295,10 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip, dat_entry->last_update = jiffies; atomic_set(&dat_entry->refcount, 2);
- hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat, - batadv_hash_dat, &dat_entry->ip, - &dat_entry->hash_entry); + hash_added = batadv_hash_add(bat_priv->dat.hash, + 1 << BATADV_DAT_HASH_BITS, + batadv_compare_dat, batadv_hash_dat, + &dat_entry->ip, &dat_entry->hash_entry);
if (unlikely(hash_added != 0)) { /* remove the reference for the hash */ @@ -477,7 +478,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, /* iterate over the originator list and find the node with closest * dat_address which has not been selected yet */ - for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -700,7 +701,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) seq_printf(seq, " %-7s %-13s %5s\n", "IPv4", "MAC", "last-seen");
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_DAT_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); diff --git a/hash.c b/hash.c index 15a849c..7c4edfc 100644 --- a/hash.c +++ b/hash.c @@ -21,11 +21,11 @@ #include "hash.h"
/* clears the hash */ -static void batadv_hash_init(struct batadv_hashtable *hash) +static void batadv_hash_init(struct batadv_hashtable *hash, uint32_t size) { uint32_t i;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < size; i++) { INIT_HLIST_HEAD(&hash->table[i]); spin_lock_init(&hash->list_locks[i]); } @@ -57,8 +57,7 @@ struct batadv_hashtable *batadv_hash_new(uint32_t size) if (!hash->list_locks) goto free_table;
- hash->size = size; - batadv_hash_init(hash); + batadv_hash_init(hash, size); return hash;
free_table: @@ -68,11 +67,11 @@ free_hash: return NULL; }
-void batadv_hash_set_lock_class(struct batadv_hashtable *hash, +void batadv_hash_set_lock_class(struct batadv_hashtable *hash, uint32_t size, struct lock_class_key *key) { uint32_t i;
- for (i = 0; i < hash->size; i++) + for (i = 0; i < size; i++) lockdep_set_class(&hash->list_locks[i], key); } diff --git a/hash.h b/hash.h index 977de9c..107c67e 100644 --- a/hash.h +++ b/hash.h @@ -38,14 +38,13 @@ typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *); struct batadv_hashtable { struct hlist_head *table; /* the hashtable itself with the buckets */ spinlock_t *list_locks; /* spinlock for each hash list entry */ - uint32_t size; /* size of hashtable */ };
/* allocates and clears the hash */ struct batadv_hashtable *batadv_hash_new(uint32_t size);
/* set class key for all locks */ -void batadv_hash_set_lock_class(struct batadv_hashtable *hash, +void batadv_hash_set_lock_class(struct batadv_hashtable *hash, uint32_t size, struct lock_class_key *key);
/* free only the hashtable and the hash itself. */ @@ -56,6 +55,7 @@ void batadv_hash_destroy(struct batadv_hashtable *hash); * elements, memory might be leaked. */ static inline void batadv_hash_delete(struct batadv_hashtable *hash, + uint32_t size, batadv_hashdata_free_cb free_cb, void *arg) { @@ -64,7 +64,7 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash, spinlock_t *list_lock; /* spinlock to protect write access */ uint32_t i;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
@@ -84,6 +84,7 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash, /** * batadv_hash_add - adds data to the hashtable * @hash: storage hash table + * @size: size of the hashtable * @compare: callback to determine if 2 hash elements are identical * @choose: callback calculating the hash index * @data: data passed to the aforementioned callbacks as argument @@ -93,6 +94,7 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash, * and -1 on error. */ static inline int batadv_hash_add(struct batadv_hashtable *hash, + uint32_t size, batadv_hashdata_compare_cb compare, batadv_hashdata_choose_cb choose, const void *data, @@ -107,7 +109,7 @@ static inline int batadv_hash_add(struct batadv_hashtable *hash, if (!hash) goto out;
- index = choose(data, hash->size); + index = choose(data, size); head = &hash->table[index]; list_lock = &hash->list_locks[index];
@@ -138,6 +140,7 @@ out: * comparing. */ static inline void *batadv_hash_remove(struct batadv_hashtable *hash, + uint32_t size, batadv_hashdata_compare_cb compare, batadv_hashdata_choose_cb choose, void *data) @@ -147,7 +150,7 @@ static inline void *batadv_hash_remove(struct batadv_hashtable *hash, struct hlist_head *head; void *data_save = NULL;
- index = choose(data, hash->size); + index = choose(data, size); head = &hash->table[index];
spin_lock_bh(&hash->list_locks[index]); diff --git a/originator.c b/originator.c index bd3f5d2..38ae83c 100644 --- a/originator.c +++ b/originator.c @@ -171,7 +171,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
bat_priv->orig_hash = NULL;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
@@ -251,9 +251,10 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv, if (!orig_node->bcast_own_sum) goto free_bcast_own;
- hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, - batadv_choose_orig, orig_node, - &orig_node->hash_entry); + hash_added = batadv_hash_add(bat_priv->orig_hash, + 1 << BATADV_ORIG_HASH_BITS, + batadv_compare_orig, batadv_choose_orig, + orig_node, &orig_node->hash_entry); if (hash_added != 0) goto free_bcast_own_sum;
@@ -358,7 +359,7 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv) return;
/* for all origins... */ - for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
@@ -428,7 +429,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF", "Potential nexthops");
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -518,7 +519,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ - for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -603,7 +604,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ - for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); diff --git a/originator.h b/originator.h index 1311d39..1cad364 100644 --- a/originator.h +++ b/originator.h @@ -64,7 +64,7 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data) if (!hash) return NULL;
- index = batadv_choose_orig(data, hash->size); + index = batadv_choose_orig(data, 1 << BATADV_ORIG_HASH_BITS); head = &hash->table[index];
rcu_read_lock(); diff --git a/routing.c b/routing.c index 1aa1722..09c40e3 100644 --- a/routing.c +++ b/routing.c @@ -45,7 +45,7 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) size_t word_index; uint8_t *w;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); diff --git a/translation-table.c b/translation-table.c index 2f20e9d..69822cb 100644 --- a/translation-table.c +++ b/translation-table.c @@ -56,7 +56,8 @@ static void batadv_tt_start_timer(struct batadv_priv *bat_priv) }
static struct batadv_tt_common_entry * -batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data) +batadv_tt_hash_find(struct batadv_hashtable *hash, uint32_t size, + const void *data) { struct hlist_head *head; struct hlist_node *node; @@ -67,7 +68,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data) if (!hash) return NULL;
- index = batadv_choose_orig(data, hash->size); + index = batadv_choose_orig(data, size); head = &hash->table[index];
rcu_read_lock(); @@ -92,7 +93,9 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data) struct batadv_tt_common_entry *tt_common_entry; struct batadv_tt_local_entry *tt_local_entry = NULL;
- tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data); + tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, + 1 << BATADV_TT_LOCAL_HASH_BITS, + data); if (tt_common_entry) tt_local_entry = container_of(tt_common_entry, struct batadv_tt_local_entry, @@ -106,7 +109,9 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data) struct batadv_tt_common_entry *tt_common_entry; struct batadv_tt_global_entry *tt_global_entry = NULL;
- tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data); + tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, + 1 << BATADV_TT_GLOBAL_HASH_BITS, + data); if (tt_common_entry) tt_global_entry = container_of(tt_common_entry, struct batadv_tt_global_entry, @@ -248,7 +253,8 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv, "Deleting global tt entry %pM: %s\n", tt_global->common.addr, message);
- batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, + batadv_hash_remove(bat_priv->tt.global_hash, + 1 << BATADV_TT_GLOBAL_HASH_BITS, batadv_compare_tt, batadv_choose_orig, tt_global->common.addr); batadv_tt_global_entry_free_ref(tt_global);
@@ -322,8 +328,10 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, if (batadv_compare_eth(addr, soft_iface->dev_addr)) tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
- hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt, - batadv_choose_orig, &tt_local->common, + hash_added = batadv_hash_add(bat_priv->tt.local_hash, + 1 << BATADV_TT_LOCAL_HASH_BITS, + batadv_compare_tt, batadv_choose_orig, + &tt_local->common, &tt_local->common.hash_entry);
if (unlikely(hash_added != 0)) { @@ -492,7 +500,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) seq_printf(seq, " %-13s %-7s %-10s\n", "Client", "Flags", "Last seen");
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_TT_LOCAL_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -635,7 +643,7 @@ static void batadv_tt_local_purge(struct batadv_priv *bat_priv) spinlock_t *list_lock; /* protects write access to the hash lists */ uint32_t i;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_TT_LOCAL_HASH_BITS; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
@@ -661,7 +669,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
hash = bat_priv->tt.local_hash;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_TT_LOCAL_HASH_BITS; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
@@ -843,6 +851,7 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv, spin_lock_init(&tt_global_entry->list_lock);
hash_added = batadv_hash_add(bat_priv->tt.global_hash, + 1 << BATADV_TT_GLOBAL_HASH_BITS, batadv_compare_tt, batadv_choose_orig, common, &common->hash_entry); @@ -1035,7 +1044,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) "Client", "(TTVN)", "Originator", "(Curr TTVN)", "CRC", "Flags");
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_TT_GLOBAL_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -1210,7 +1219,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, if (!hash) return;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_TT_GLOBAL_HASH_BITS; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
@@ -1270,7 +1279,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv) struct batadv_tt_common_entry *tt_common; struct batadv_tt_global_entry *tt_global;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_TT_GLOBAL_HASH_BITS; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
@@ -1311,7 +1320,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
hash = bat_priv->tt.global_hash;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_TT_GLOBAL_HASH_BITS; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
@@ -1403,7 +1412,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv, uint32_t i; int j;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_TT_GLOBAL_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -1455,7 +1464,7 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv) uint32_t i; int j;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_TT_LOCAL_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -1590,7 +1599,7 @@ static int batadv_tt_global_valid(const void *entry_ptr,
static struct sk_buff * batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, - struct batadv_hashtable *hash, + struct batadv_hashtable *hash, uint32_t size, struct batadv_hard_iface *primary_if, int (*valid_cb)(const void *, const void *), void *cb_data) @@ -1625,7 +1634,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, tt_count = 0;
rcu_read_lock(); - for (i = 0; i < hash->size; i++) { + for (i = 0; i < size; i++) { head = &hash->table[i];
hlist_for_each_entry_rcu(tt_common_entry, node, @@ -1737,6 +1746,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv, struct batadv_tt_query_packet *tt_response; uint8_t *packet_pos; size_t len; + uint32_t hash_size = 1 << BATADV_TT_GLOBAL_HASH_BITS;
batadv_dbg(BATADV_DBG_TT, bat_priv, "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n", @@ -1803,7 +1813,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
skb = batadv_tt_response_fill_table(tt_len, ttvn, bat_priv->tt.global_hash, - primary_if, + hash_size, primary_if, batadv_tt_global_valid, req_dst_orig_node); if (!skb) @@ -1863,6 +1873,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv, struct batadv_tt_query_packet *tt_response; uint8_t *packet_pos; size_t len; + uint32_t hash_size = 1 << BATADV_TT_LOCAL_HASH_BITS;
batadv_dbg(BATADV_DBG_TT, bat_priv, "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n", @@ -1920,7 +1931,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
skb = batadv_tt_response_fill_table(tt_len, ttvn, bat_priv->tt.local_hash, - primary_if, + hash_size, primary_if, batadv_tt_local_valid_entry, NULL); if (!skb) @@ -2301,7 +2312,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv) * in the given hash table and returns the number of modified entries */ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash, - uint16_t flags, bool enable) + uint32_t size, uint16_t flags, bool enable) { uint32_t i; uint16_t changed_num = 0; @@ -2312,7 +2323,7 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash, if (!hash) goto out;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < size; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -2349,7 +2360,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) if (!hash) return;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_TT_LOCAL_HASH_BITS; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
@@ -2385,6 +2396,7 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv, return -ENOENT;
changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash, + 1 << BATADV_TT_LOCAL_HASH_BITS, BATADV_TT_CLIENT_NEW, false);
/* all reset entries have to be counted as local entries */ diff --git a/vis.c b/vis.c index 30e1405..ecde6a1 100644 --- a/vis.c +++ b/vis.c @@ -92,7 +92,7 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data) if (!hash) return NULL;
- index = batadv_vis_info_choose(data, hash->size); + index = batadv_vis_info_choose(data, 1 << BATADV_VIS_HASH_BITS); head = &hash->table[index];
rcu_read_lock(); @@ -254,7 +254,7 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset) goto out;
spin_lock_bh(&bat_priv->vis.hash_lock); - for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_VIS_HASH_BITS; i++) { head = &hash->table[i]; batadv_vis_seq_print_text_bucket(seq, head); } @@ -374,8 +374,10 @@ batadv_add_packet(struct batadv_priv *bat_priv, } } /* remove old entry */ - batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp, - batadv_vis_info_choose, old_info); + batadv_hash_remove(bat_priv->vis.hash, + 1 << BATADV_VIS_HASH_BITS, + batadv_vis_info_cmp, batadv_vis_info_choose, + old_info); batadv_send_list_del(old_info); kref_put(&old_info->refcount, batadv_free_info); } @@ -415,7 +417,9 @@ batadv_add_packet(struct batadv_priv *bat_priv, batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
/* try to add it */ - hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp, + hash_added = batadv_hash_add(bat_priv->vis.hash, + 1 << BATADV_VIS_HASH_BITS, + batadv_vis_info_cmp, batadv_vis_info_choose, info, &info->hash_entry); if (hash_added != 0) { @@ -516,7 +520,7 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
packet = (struct batadv_vis_packet *)info->skb_packet->data;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -589,7 +593,7 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) return best_tq; }
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -628,7 +632,7 @@ next:
hash = bat_priv->tt.local_hash;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_TT_LOCAL_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -665,7 +669,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) struct hlist_head *head; struct batadv_vis_info *info;
- for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_VIS_HASH_BITS; i++) { head = &hash->table[i];
hlist_for_each_entry_safe(info, node, node_tmp, @@ -699,7 +703,7 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv, packet = (struct batadv_vis_packet *)info->skb_packet->data;
/* send to all routers in range. */ - for (i = 0; i < hash->size; i++) { + for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { head = &hash->table[i];
rcu_read_lock(); @@ -871,7 +875,9 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
INIT_LIST_HEAD(&bat_priv->vis.send_list);
- hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp, + hash_added = batadv_hash_add(bat_priv->vis.hash, + 1 << BATADV_VIS_HASH_BITS, + batadv_vis_info_cmp, batadv_vis_info_choose, bat_priv->vis.my_info, &bat_priv->vis.my_info->hash_entry); @@ -915,7 +921,8 @@ void batadv_vis_quit(struct batadv_priv *bat_priv)
spin_lock_bh(&bat_priv->vis.hash_lock); /* properly remove, kill timers ... */ - batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL); + batadv_hash_delete(bat_priv->vis.hash, 1 << BATADV_VIS_HASH_BITS, + batadv_free_info_ref, NULL); bat_priv->vis.hash = NULL; bat_priv->vis.my_info = NULL; spin_unlock_bh(&bat_priv->vis.hash_lock);
It is unnecessary to allocate an extra memory region for hashtables and the corresponding locks. This brings the hashes used in batman-adv slightly in the direction of the common statically sized hash table implementation. More common hashtable functionality cannot be used batman-adv wide because the simple hashtable implementation doesn't provide bucket based locking and its non-locked access macros don't allow loop-flow control.
A sideeffect of this change is the initialization of each array of locks for each hashtable with a different lock_class. This allows to correct nesting of write access to two different hashtables without triggering a lockdep warning.
Signed-off-by: Sven Eckelmann sven@narfation.org --- Makefile.kbuild | 1 - bridge_loop_avoidance.c | 140 ++++++++++++++-------------------------- distributed-arp-table.c | 51 +++++---------- hash.c | 77 ---------------------- hash.h | 91 +++++++++++++------------- originator.c | 58 ++++++----------- originator.h | 6 +- routing.c | 6 +- translation-table.c | 163 ++++++++++++++++++++--------------------------- types.h | 22 +++++-- vis.c | 68 ++++++++------------ 11 files changed, 247 insertions(+), 436 deletions(-) delete mode 100644 hash.c
diff --git a/Makefile.kbuild b/Makefile.kbuild index e45e3b4..af2e837 100644 --- a/Makefile.kbuild +++ b/Makefile.kbuild @@ -27,7 +27,6 @@ batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o batman-adv-y += gateway_client.o batman-adv-y += gateway_common.o batman-adv-y += hard-interface.o -batman-adv-y += hash.o batman-adv-y += icmp_socket.o batman-adv-y += main.o batman-adv-y += originator.o diff --git a/bridge_loop_avoidance.c b/bridge_loop_avoidance.c index 841143d..1a78bac 100644 --- a/bridge_loop_avoidance.c +++ b/bridge_loop_avoidance.c @@ -131,18 +131,15 @@ static void batadv_claim_free_ref(struct batadv_claim *claim) static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv, struct batadv_claim *data) { - struct batadv_hashtable *hash = bat_priv->bla.claim_hash; + struct hlist_head *hash = bat_priv->bla.claim_hash; struct hlist_head *head; struct hlist_node *node; struct batadv_claim *claim; struct batadv_claim *claim_tmp = NULL; int index;
- if (!hash) - return NULL; - - index = batadv_choose_claim(data, 1 << BATADV_BLA_CLAIM_HASH_BITS); - head = &hash->table[index]; + index = batadv_choose_claim(data, ARRAY_SIZE(bat_priv->bla.claim_hash)); + head = &hash[index];
rcu_read_lock(); hlist_for_each_entry_rcu(claim, node, head, hash_entry) { @@ -172,22 +169,19 @@ static struct batadv_backbone_gw * batadv_backbone_hash_find(struct batadv_priv *bat_priv, uint8_t *addr, short vid) { - struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; + struct hlist_head *hash = bat_priv->bla.backbone_hash; struct hlist_head *head; struct hlist_node *node; struct batadv_backbone_gw search_entry, *backbone_gw; struct batadv_backbone_gw *backbone_gw_tmp = NULL; int index; - uint32_t hash_size = 1 << BATADV_BLA_BACKBONE_HASH_BITS; - - if (!hash) - return NULL; + uint32_t hash_size = ARRAY_SIZE(bat_priv->bla.backbone_hash);
memcpy(search_entry.orig, addr, ETH_ALEN); search_entry.vid = vid;
index = batadv_choose_backbone_gw(&search_entry, hash_size); - head = &hash->table[index]; + head = &hash[index];
rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { @@ -210,20 +204,19 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, static void batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw) { - struct batadv_hashtable *hash; + struct batadv_priv *bat_priv = backbone_gw->bat_priv; + struct hlist_head *hash; struct hlist_node *node, *node_tmp; struct hlist_head *head; struct batadv_claim *claim; int i; spinlock_t *list_lock; /* protects write access to the hash lists */
- hash = backbone_gw->bat_priv->bla.claim_hash; - if (!hash) - return; + hash = bat_priv->bla.claim_hash;
- for (i = 0; i < 1 << BATADV_BLA_CLAIM_HASH_BITS; i++) { - head = &hash->table[i]; - list_lock = &hash->list_locks[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->bla.claim_hash); i++) { + head = &hash[i]; + list_lock = &bat_priv->bla.claim_hash_locks[i];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(claim, node, node_tmp, @@ -392,7 +385,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, atomic_set(&entry->refcount, 2);
hash_added = batadv_hash_add(bat_priv->bla.backbone_hash, - 1 << BATADV_BLA_BACKBONE_HASH_BITS, + bat_priv->bla.backbone_hash_locks, batadv_compare_backbone_gw, batadv_choose_backbone_gw, entry, &entry->hash_entry); @@ -455,7 +448,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv, { struct hlist_node *node; struct hlist_head *head; - struct batadv_hashtable *hash; + struct hlist_head *hash; struct batadv_claim *claim; struct batadv_backbone_gw *backbone_gw; int i; @@ -470,8 +463,8 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv, return;
hash = bat_priv->bla.claim_hash; - for (i = 0; i < 1 << BATADV_BLA_CLAIM_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->bla.claim_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(claim, node, head, hash_entry) { @@ -571,7 +564,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv, "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", mac, vid); hash_added = batadv_hash_add(bat_priv->bla.claim_hash, - 1 << BATADV_BLA_CLAIM_HASH_BITS, + bat_priv->bla.claim_hash_locks, batadv_compare_claim, batadv_choose_claim, claim, &claim->hash_entry); @@ -624,7 +617,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, mac, vid);
batadv_hash_remove(bat_priv->bla.claim_hash, - 1 << BATADV_BLA_CLAIM_HASH_BITS, + bat_priv->bla.claim_hash_locks, batadv_compare_claim, batadv_choose_claim, claim); batadv_claim_free_ref(claim); /* reference from the hash is gone */
@@ -957,17 +950,15 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) struct batadv_backbone_gw *backbone_gw; struct hlist_node *node, *node_tmp; struct hlist_head *head; - struct batadv_hashtable *hash; + struct hlist_head *hash; spinlock_t *list_lock; /* protects write access to the hash lists */ int i;
hash = bat_priv->bla.backbone_hash; - if (!hash) - return;
- for (i = 0; i < 1 << BATADV_BLA_BACKBONE_HASH_BITS; i++) { - head = &hash->table[i]; - list_lock = &hash->list_locks[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->bla.backbone_hash); i++) { + head = &hash[i]; + list_lock = &bat_priv->bla.backbone_hash_locks[i];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(backbone_gw, node, node_tmp, @@ -1012,15 +1003,13 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, struct batadv_claim *claim; struct hlist_node *node; struct hlist_head *head; - struct batadv_hashtable *hash; + struct hlist_head *hash; int i;
hash = bat_priv->bla.claim_hash; - if (!hash) - return;
- for (i = 0; i < 1 << BATADV_BLA_CLAIM_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->bla.claim_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(claim, node, head, hash_entry) { @@ -1061,7 +1050,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, struct batadv_backbone_gw *backbone_gw; struct hlist_node *node; struct hlist_head *head; - struct batadv_hashtable *hash; + struct hlist_head *hash; __be16 group; int i;
@@ -1076,11 +1065,9 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, }
hash = bat_priv->bla.backbone_hash; - if (!hash) - return;
- for (i = 0; i < 1 << BATADV_BLA_BACKBONE_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->bla.backbone_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { @@ -1122,7 +1109,7 @@ static void batadv_bla_periodic_work(struct work_struct *work) struct hlist_node *node; struct hlist_head *head; struct batadv_backbone_gw *backbone_gw; - struct batadv_hashtable *hash; + struct hlist_head *hash; struct batadv_hard_iface *primary_if; int i;
@@ -1140,11 +1127,9 @@ static void batadv_bla_periodic_work(struct work_struct *work) goto out;
hash = bat_priv->bla.backbone_hash; - if (!hash) - goto out;
- for (i = 0; i < 1 << BATADV_BLA_BACKBONE_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->bla.backbone_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { @@ -1183,14 +1168,6 @@ out: batadv_bla_start_timer(bat_priv); }
-/* The hash for claim and backbone hash receive the same key because they - * are getting initialized by hash_new with the same key. Reinitializing - * them with to different keys to allow nested locking without generating - * lockdep warnings - */ -static struct lock_class_key batadv_claim_hash_lock_class_key; -static struct lock_class_key batadv_backbone_hash_lock_class_key; - /* initialize all bla structures */ int batadv_bla_init(struct batadv_priv *bat_priv) { @@ -1199,8 +1176,6 @@ int batadv_bla_init(struct batadv_priv *bat_priv) struct batadv_hard_iface *primary_if; uint16_t crc; unsigned long entrytime; - uint32_t hash_claim_size = 1 << BATADV_BLA_CLAIM_HASH_BITS; - uint32_t hash_backbone_size = 1 << BATADV_BLA_BACKBONE_HASH_BITS;
spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
@@ -1224,21 +1199,10 @@ int batadv_bla_init(struct batadv_priv *bat_priv) bat_priv->bla.bcast_duplist[i].entrytime = entrytime; bat_priv->bla.bcast_duplist_curr = 0;
- if (bat_priv->bla.claim_hash) - return 0; - - bat_priv->bla.claim_hash = batadv_hash_new(hash_claim_size); - bat_priv->bla.backbone_hash = batadv_hash_new(hash_backbone_size); - - if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash) - return -ENOMEM; - - batadv_hash_set_lock_class(bat_priv->bla.claim_hash, - 1 << BATADV_BLA_CLAIM_HASH_BITS, - &batadv_claim_hash_lock_class_key); - batadv_hash_set_lock_class(bat_priv->bla.backbone_hash, - 1 << BATADV_BLA_CLAIM_HASH_BITS, - &batadv_backbone_hash_lock_class_key); + batadv_hash_init(bat_priv->bla.claim_hash, + bat_priv->bla.claim_hash_locks); + batadv_hash_init(bat_priv->bla.backbone_hash, + bat_priv->bla.backbone_hash_locks);
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
@@ -1327,7 +1291,7 @@ out: */ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig) { - struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; + struct hlist_head *hash = bat_priv->bla.backbone_hash; struct hlist_head *head; struct hlist_node *node; struct batadv_backbone_gw *backbone_gw; @@ -1336,11 +1300,8 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig) if (!atomic_read(&bat_priv->bridge_loop_avoidance)) return 0;
- if (!hash) - return 0; - - for (i = 0; i < 1 << BATADV_BLA_BACKBONE_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->bla.backbone_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { @@ -1409,16 +1370,9 @@ void batadv_bla_free(struct batadv_priv *bat_priv) cancel_delayed_work_sync(&bat_priv->bla.work); primary_if = batadv_primary_if_get_selected(bat_priv);
- if (bat_priv->bla.claim_hash) { - batadv_bla_purge_claims(bat_priv, primary_if, 1); - batadv_hash_destroy(bat_priv->bla.claim_hash); - bat_priv->bla.claim_hash = NULL; - } - if (bat_priv->bla.backbone_hash) { - batadv_bla_purge_backbone_gw(bat_priv, 1); - batadv_hash_destroy(bat_priv->bla.backbone_hash); - bat_priv->bla.backbone_hash = NULL; - } + batadv_bla_purge_claims(bat_priv, primary_if, 1); + batadv_bla_purge_backbone_gw(bat_priv, 1); + if (primary_if) batadv_hardif_free_ref(primary_if); } @@ -1611,7 +1565,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); - struct batadv_hashtable *hash = bat_priv->bla.claim_hash; + struct hlist_head *hash = bat_priv->bla.claim_hash; struct batadv_claim *claim; struct batadv_hard_iface *primary_if; struct hlist_node *node; @@ -1631,8 +1585,8 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) ntohs(bat_priv->bla.claim_dest.group)); seq_printf(seq, " %-17s %-5s %-17s [o] (%-6s)\n", "Client", "VID", "Originator", "CRC"); - for (i = 0; i < 1 << BATADV_BLA_CLAIM_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->bla.claim_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(claim, node, head, hash_entry) { @@ -1656,7 +1610,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); - struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; + struct hlist_head *hash = bat_priv->bla.backbone_hash; struct batadv_backbone_gw *backbone_gw; struct batadv_hard_iface *primary_if; struct hlist_node *node; @@ -1677,8 +1631,8 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) ntohs(bat_priv->bla.claim_dest.group)); seq_printf(seq, " %-17s %-5s %-9s (%-6s)\n", "Originator", "VID", "last seen", "CRC"); - for (i = 0; i < 1 << BATADV_BLA_BACKBONE_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->bla.backbone_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { diff --git a/distributed-arp-table.c b/distributed-arp-table.c index baaec53..b4f8eef 100644 --- a/distributed-arp-table.c +++ b/distributed-arp-table.c @@ -87,12 +87,9 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv, struct hlist_head *head; uint32_t i;
- if (!bat_priv->dat.hash) - return; - - for (i = 0; i < 1 << BATADV_DAT_HASH_BITS; i++) { - head = &bat_priv->dat.hash->table[i]; - list_lock = &bat_priv->dat.hash->list_locks[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->dat.hash); i++) { + head = &bat_priv->dat.hash[i]; + list_lock = &bat_priv->dat.hash_locks[i];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(dat_entry, node, node_tmp, head, @@ -237,14 +234,11 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip) struct hlist_head *head; struct hlist_node *node; struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL; - struct batadv_hashtable *hash = bat_priv->dat.hash; + struct hlist_head *hash = bat_priv->dat.hash; uint32_t index;
- if (!hash) - return NULL; - - index = batadv_hash_dat(&ip, 1 << BATADV_DAT_HASH_BITS); - head = &hash->table[index]; + index = batadv_hash_dat(&ip, ARRAY_SIZE(bat_priv->dat.hash)); + head = &hash[index];
rcu_read_lock(); hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { @@ -296,7 +290,7 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip, atomic_set(&dat_entry->refcount, 2);
hash_added = batadv_hash_add(bat_priv->dat.hash, - 1 << BATADV_DAT_HASH_BITS, + bat_priv->dat.hash_locks, batadv_compare_dat, batadv_hash_dat, &dat_entry->ip, &dat_entry->hash_entry);
@@ -465,7 +459,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, { batadv_dat_addr_t max = 0, tmp_max = 0; struct batadv_orig_node *orig_node, *max_orig_node = NULL; - struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *hash = bat_priv->orig_hash; struct hlist_node *node; struct hlist_head *head; int i; @@ -478,8 +472,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, /* iterate over the originator list and find the node with closest * dat_address which has not been selected yet */ - for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->orig_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { @@ -533,9 +527,6 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; struct batadv_dat_candidate *res;
- if (!bat_priv->orig_hash) - return NULL; - res = kmalloc(BATADV_DAT_CANDIDATES_NUM * sizeof(*res), GFP_ATOMIC); if (!res) return NULL; @@ -635,14 +626,7 @@ out: */ static void batadv_dat_hash_free(struct batadv_priv *bat_priv) { - if (!bat_priv->dat.hash) - return; - __batadv_dat_purge(bat_priv, NULL); - - batadv_hash_destroy(bat_priv->dat.hash); - - bat_priv->dat.hash = NULL; }
/** @@ -651,13 +635,8 @@ static void batadv_dat_hash_free(struct batadv_priv *bat_priv) */ int batadv_dat_init(struct batadv_priv *bat_priv) { - if (bat_priv->dat.hash) - return 0; - - bat_priv->dat.hash = batadv_hash_new(1 << BATADV_DAT_HASH_BITS); - - if (!bat_priv->dat.hash) - return -ENOMEM; + batadv_hash_init(bat_priv->dat.hash, + bat_priv->dat.hash_locks);
batadv_dat_start_timer(bat_priv);
@@ -684,7 +663,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); - struct batadv_hashtable *hash = bat_priv->dat.hash; + struct hlist_head *hash = bat_priv->dat.hash; struct batadv_dat_entry *dat_entry; struct batadv_hard_iface *primary_if; struct hlist_node *node; @@ -701,8 +680,8 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) seq_printf(seq, " %-7s %-13s %5s\n", "IPv4", "MAC", "last-seen");
- for (i = 0; i < 1 << BATADV_DAT_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->dat.hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { diff --git a/hash.c b/hash.c deleted file mode 100644 index 7c4edfc..0000000 --- a/hash.c +++ /dev/null @@ -1,77 +0,0 @@ -/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: - * - * Simon Wunderlich, Marek Lindner - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA - */ - -#include "main.h" -#include "hash.h" - -/* clears the hash */ -static void batadv_hash_init(struct batadv_hashtable *hash, uint32_t size) -{ - uint32_t i; - - for (i = 0; i < size; i++) { - INIT_HLIST_HEAD(&hash->table[i]); - spin_lock_init(&hash->list_locks[i]); - } -} - -/* free only the hashtable and the hash itself. */ -void batadv_hash_destroy(struct batadv_hashtable *hash) -{ - kfree(hash->list_locks); - kfree(hash->table); - kfree(hash); -} - -/* allocates and clears the hash */ -struct batadv_hashtable *batadv_hash_new(uint32_t size) -{ - struct batadv_hashtable *hash; - - hash = kmalloc(sizeof(*hash), GFP_ATOMIC); - if (!hash) - return NULL; - - hash->table = kmalloc(sizeof(*hash->table) * size, GFP_ATOMIC); - if (!hash->table) - goto free_hash; - - hash->list_locks = kmalloc(sizeof(*hash->list_locks) * size, - GFP_ATOMIC); - if (!hash->list_locks) - goto free_table; - - batadv_hash_init(hash, size); - return hash; - -free_table: - kfree(hash->table); -free_hash: - kfree(hash); - return NULL; -} - -void batadv_hash_set_lock_class(struct batadv_hashtable *hash, uint32_t size, - struct lock_class_key *key) -{ - uint32_t i; - - for (i = 0; i < size; i++) - lockdep_set_class(&hash->list_locks[i], key); -} diff --git a/hash.h b/hash.h index 107c67e..1cd3ae3 100644 --- a/hash.h +++ b/hash.h @@ -22,6 +22,15 @@
#include <linux/list.h>
+#define batadv_hash_init(hashtable, hashtable_locks) \ + do { \ + uint32_t _it; \ + for (_it = 0; _it < ARRAY_SIZE(hashtable); _it++) \ + INIT_HLIST_HEAD(&hashtable[_it]); \ + for (_it = 0; _it < ARRAY_SIZE(hashtable_locks); _it++) \ + spin_lock_init(&hashtable_locks[_it]); \ + } while (0) + /* callback to a compare function. should compare 2 element datas for their * keys, return 0 if same and not 0 if not same */ @@ -35,38 +44,28 @@ typedef int (*batadv_hashdata_compare_cb)(const struct hlist_node *, typedef uint32_t (*batadv_hashdata_choose_cb)(const void *, uint32_t); typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
-struct batadv_hashtable { - struct hlist_head *table; /* the hashtable itself with the buckets */ - spinlock_t *list_locks; /* spinlock for each hash list entry */ -}; - -/* allocates and clears the hash */ -struct batadv_hashtable *batadv_hash_new(uint32_t size); - -/* set class key for all locks */ -void batadv_hash_set_lock_class(struct batadv_hashtable *hash, uint32_t size, - struct lock_class_key *key); - -/* free only the hashtable and the hash itself. */ -void batadv_hash_destroy(struct batadv_hashtable *hash); - /* remove the hash structure. if hashdata_free_cb != NULL, this function will be * called to remove the elements inside of the hash. if you don't remove the * elements, memory might be leaked. */ -static inline void batadv_hash_delete(struct batadv_hashtable *hash, - uint32_t size, - batadv_hashdata_free_cb free_cb, - void *arg) +#define batadv_hash_delete(hash, locks, free_cb, arg) \ + _batadv_hash_delete(hash, ARRAY_SIZE(hash), locks, ARRAY_SIZE(locks), \ + free_cb, arg) + +static inline void _batadv_hash_delete(struct hlist_head *hash, + uint32_t hash_size, spinlock_t *locks, + uint32_t lock_size, + batadv_hashdata_free_cb free_cb, + void *arg) { struct hlist_head *head; struct hlist_node *node, *node_tmp; spinlock_t *list_lock; /* spinlock to protect write access */ uint32_t i;
- for (i = 0; i < size; i++) { - head = &hash->table[i]; - list_lock = &hash->list_locks[i]; + for (i = 0; i < hash_size; i++) { + head = &hash[i]; + list_lock = &locks[i % lock_size];
spin_lock_bh(list_lock); hlist_for_each_safe(node, node_tmp, head) { @@ -77,14 +76,11 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash, } spin_unlock_bh(list_lock); } - - batadv_hash_destroy(hash); }
/** * batadv_hash_add - adds data to the hashtable * @hash: storage hash table - * @size: size of the hashtable * @compare: callback to determine if 2 hash elements are identical * @choose: callback calculating the hash index * @data: data passed to the aforementioned callbacks as argument @@ -93,12 +89,16 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash, * Returns 0 on success, 1 if the element already is in the hash * and -1 on error. */ -static inline int batadv_hash_add(struct batadv_hashtable *hash, - uint32_t size, - batadv_hashdata_compare_cb compare, - batadv_hashdata_choose_cb choose, - const void *data, - struct hlist_node *data_node) +#define batadv_hash_add(hash, locks, compare, choose, data, data_node) \ + _batadv_hash_add(hash, ARRAY_SIZE(hash), locks, ARRAY_SIZE(locks), \ + compare, choose, data, data_node) + +static inline int _batadv_hash_add(struct hlist_head *hash, uint32_t hash_size, + spinlock_t *locks, uint32_t lock_size, + batadv_hashdata_compare_cb compare, + batadv_hashdata_choose_cb choose, + const void *data, + struct hlist_node *data_node) { uint32_t index; int ret = -1; @@ -109,9 +109,9 @@ static inline int batadv_hash_add(struct batadv_hashtable *hash, if (!hash) goto out;
- index = choose(data, size); - head = &hash->table[index]; - list_lock = &hash->list_locks[index]; + index = choose(data, hash_size); + head = &hash[index]; + list_lock = &locks[index];
spin_lock_bh(list_lock);
@@ -139,21 +139,26 @@ out: * structure you use with just the key filled, we just need the key for * comparing. */ -static inline void *batadv_hash_remove(struct batadv_hashtable *hash, - uint32_t size, - batadv_hashdata_compare_cb compare, - batadv_hashdata_choose_cb choose, - void *data) +#define batadv_hash_remove(hash, locks, compare, choose, data) \ + _batadv_hash_remove(hash, ARRAY_SIZE(hash), locks, ARRAY_SIZE(locks), \ + compare, choose, data) + +static inline void *_batadv_hash_remove(struct hlist_head *hash, + uint32_t hash_size, spinlock_t *locks, + uint32_t lock_size, + batadv_hashdata_compare_cb compare, + batadv_hashdata_choose_cb choose, + void *data) { uint32_t index; struct hlist_node *node; struct hlist_head *head; void *data_save = NULL;
- index = choose(data, size); - head = &hash->table[index]; + index = choose(data, hash_size); + head = &hash[index];
- spin_lock_bh(&hash->list_locks[index]); + spin_lock_bh(&locks[index]); hlist_for_each(node, head) { if (!compare(node, data)) continue; @@ -162,7 +167,7 @@ static inline void *batadv_hash_remove(struct batadv_hashtable *hash, hlist_del_rcu(node); break; } - spin_unlock_bh(&hash->list_locks[index]); + spin_unlock_bh(&locks[index]);
return data_save; } diff --git a/originator.c b/originator.c index 38ae83c..2145005 100644 --- a/originator.c +++ b/originator.c @@ -49,19 +49,11 @@ static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
int batadv_originator_init(struct batadv_priv *bat_priv) { - if (bat_priv->orig_hash) - return 0; - - bat_priv->orig_hash = batadv_hash_new(1 << BATADV_ORIG_HASH_BITS); - - if (!bat_priv->orig_hash) - goto err; + batadv_hash_init(bat_priv->orig_hash, + bat_priv->orig_hash_locks);
batadv_start_purge_timer(bat_priv); return 0; - -err: - return -ENOMEM; }
void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node) @@ -157,23 +149,18 @@ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
void batadv_originator_free(struct batadv_priv *bat_priv) { - struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *hash = bat_priv->orig_hash; struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* spinlock to protect write access */ struct batadv_orig_node *orig_node; uint32_t i;
- if (!hash) - return; - cancel_delayed_work_sync(&bat_priv->orig_work);
- bat_priv->orig_hash = NULL; - - for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { - head = &hash->table[i]; - list_lock = &hash->list_locks[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->orig_hash); i++) { + head = &hash[i]; + list_lock = &bat_priv->orig_hash_locks[i];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(orig_node, node, node_tmp, @@ -184,8 +171,6 @@ void batadv_originator_free(struct batadv_priv *bat_priv) } spin_unlock_bh(list_lock); } - - batadv_hash_destroy(hash); }
/* this function finds or creates an originator entry for the given @@ -252,7 +237,7 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv, goto free_bcast_own;
hash_added = batadv_hash_add(bat_priv->orig_hash, - 1 << BATADV_ORIG_HASH_BITS, + bat_priv->orig_hash_locks, batadv_compare_orig, batadv_choose_orig, orig_node, &orig_node->hash_entry); if (hash_added != 0) @@ -348,20 +333,17 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
static void _batadv_purge_orig(struct batadv_priv *bat_priv) { - struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *hash = bat_priv->orig_hash; struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* spinlock to protect write access */ struct batadv_orig_node *orig_node; uint32_t i;
- if (!hash) - return; - /* for all origins... */ - for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { - head = &hash->table[i]; - list_lock = &hash->list_locks[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->orig_hash); i++) { + head = &hash[i]; + list_lock = &bat_priv->orig_hash_locks[i];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(orig_node, node, node_tmp, @@ -406,7 +388,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); - struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *hash = bat_priv->orig_hash; struct hlist_node *node, *node_tmp; struct hlist_head *head; struct batadv_hard_iface *primary_if; @@ -429,8 +411,8 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF", "Potential nexthops");
- for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->orig_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { @@ -509,7 +491,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, int max_if_num) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *hash = bat_priv->orig_hash; struct hlist_node *node; struct hlist_head *head; struct batadv_orig_node *orig_node; @@ -519,8 +501,8 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ - for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->orig_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { @@ -593,7 +575,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, int max_if_num) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *hash = bat_priv->orig_hash; struct hlist_node *node; struct hlist_head *head; struct batadv_hard_iface *hard_iface_tmp; @@ -604,8 +586,8 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ - for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->orig_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { diff --git a/originator.h b/originator.h index 1cad364..0661479 100644 --- a/originator.h +++ b/originator.h @@ -55,7 +55,7 @@ static inline uint32_t batadv_choose_orig(const void *data, uint32_t size) static inline struct batadv_orig_node * batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data) { - struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *hash = bat_priv->orig_hash; struct hlist_head *head; struct hlist_node *node; struct batadv_orig_node *orig_node, *orig_node_tmp = NULL; @@ -64,8 +64,8 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data) if (!hash) return NULL;
- index = batadv_choose_orig(data, 1 << BATADV_ORIG_HASH_BITS); - head = &hash->table[index]; + index = batadv_choose_orig(data, ARRAY_SIZE(bat_priv->orig_hash)); + head = &hash[index];
rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { diff --git a/routing.c b/routing.c index 09c40e3..95cd435 100644 --- a/routing.c +++ b/routing.c @@ -36,7 +36,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *hash = bat_priv->orig_hash; struct hlist_node *node; struct hlist_head *head; struct batadv_orig_node *orig_node; @@ -45,8 +45,8 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) size_t word_index; uint8_t *w;
- for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->orig_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { diff --git a/translation-table.c b/translation-table.c index 69822cb..dd821ee 100644 --- a/translation-table.c +++ b/translation-table.c @@ -55,9 +55,11 @@ static void batadv_tt_start_timer(struct batadv_priv *bat_priv) msecs_to_jiffies(5000)); }
+#define batadv_tt_hash_find(hash, dat) \ + _batadv_tt_hash_find(hash, ARRAY_SIZE(hash), data) + static struct batadv_tt_common_entry * -batadv_tt_hash_find(struct batadv_hashtable *hash, uint32_t size, - const void *data) +_batadv_tt_hash_find(struct hlist_head *hash, uint32_t size, const void *data) { struct hlist_head *head; struct hlist_node *node; @@ -69,7 +71,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, uint32_t size, return NULL;
index = batadv_choose_orig(data, size); - head = &hash->table[index]; + head = &hash[index];
rcu_read_lock(); hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { @@ -94,7 +96,6 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data) struct batadv_tt_local_entry *tt_local_entry = NULL;
tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, - 1 << BATADV_TT_LOCAL_HASH_BITS, data); if (tt_common_entry) tt_local_entry = container_of(tt_common_entry, @@ -110,7 +111,6 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data) struct batadv_tt_global_entry *tt_global_entry = NULL;
tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, - 1 << BATADV_TT_GLOBAL_HASH_BITS, data); if (tt_common_entry) tt_global_entry = container_of(tt_common_entry, @@ -232,15 +232,8 @@ int batadv_tt_len(int changes_num)
static int batadv_tt_local_init(struct batadv_priv *bat_priv) { - uint32_t hash_size = 1 << BATADV_TT_LOCAL_HASH_BITS; - - if (bat_priv->tt.local_hash) - return 0; - - bat_priv->tt.local_hash = batadv_hash_new(hash_size); - - if (!bat_priv->tt.local_hash) - return -ENOMEM; + batadv_hash_init(bat_priv->tt.local_hash, + bat_priv->tt.local_hash_locks);
return 0; } @@ -254,7 +247,7 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv, tt_global->common.addr, message);
batadv_hash_remove(bat_priv->tt.global_hash, - 1 << BATADV_TT_GLOBAL_HASH_BITS, batadv_compare_tt, + bat_priv->tt.global_hash_locks, batadv_compare_tt, batadv_choose_orig, tt_global->common.addr); batadv_tt_global_entry_free_ref(tt_global);
@@ -329,7 +322,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
hash_added = batadv_hash_add(bat_priv->tt.local_hash, - 1 << BATADV_TT_LOCAL_HASH_BITS, + bat_priv->tt.local_hash_locks, batadv_compare_tt, batadv_choose_orig, &tt_local->common, &tt_local->common.hash_entry); @@ -478,7 +471,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); - struct batadv_hashtable *hash = bat_priv->tt.local_hash; + struct hlist_head *hash = bat_priv->tt.local_hash; struct batadv_tt_common_entry *tt_common_entry; struct batadv_tt_local_entry *tt_local; struct batadv_hard_iface *primary_if; @@ -500,8 +493,8 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) seq_printf(seq, " %-13s %-7s %-10s\n", "Client", "Flags", "Last seen");
- for (i = 0; i < 1 << BATADV_TT_LOCAL_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->tt.local_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(tt_common_entry, node, @@ -638,14 +631,14 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
static void batadv_tt_local_purge(struct batadv_priv *bat_priv) { - struct batadv_hashtable *hash = bat_priv->tt.local_hash; + struct hlist_head *hash = bat_priv->tt.local_hash; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ uint32_t i;
- for (i = 0; i < 1 << BATADV_TT_LOCAL_HASH_BITS; i++) { - head = &hash->table[i]; - list_lock = &hash->list_locks[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->tt.local_hash); i++) { + head = &hash[i]; + list_lock = &bat_priv->tt.local_hash_locks[i];
spin_lock_bh(list_lock); batadv_tt_local_purge_list(bat_priv, head); @@ -656,7 +649,7 @@ static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) { - struct batadv_hashtable *hash; + struct hlist_head *hash; spinlock_t *list_lock; /* protects write access to the hash lists */ struct batadv_tt_common_entry *tt_common_entry; struct batadv_tt_local_entry *tt_local; @@ -664,14 +657,11 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) struct hlist_head *head; uint32_t i;
- if (!bat_priv->tt.local_hash) - return; - hash = bat_priv->tt.local_hash;
- for (i = 0; i < 1 << BATADV_TT_LOCAL_HASH_BITS; i++) { - head = &hash->table[i]; - list_lock = &hash->list_locks[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->tt.local_hash); i++) { + head = &hash[i]; + list_lock = &bat_priv->tt.local_hash_locks[i];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, @@ -684,23 +674,12 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) } spin_unlock_bh(list_lock); } - - batadv_hash_destroy(hash); - - bat_priv->tt.local_hash = NULL; }
static int batadv_tt_global_init(struct batadv_priv *bat_priv) { - uint32_t hash_size = 1 << BATADV_TT_GLOBAL_HASH_BITS; - - if (bat_priv->tt.global_hash) - return 0; - - bat_priv->tt.global_hash = batadv_hash_new(hash_size); - - if (!bat_priv->tt.global_hash) - return -ENOMEM; + batadv_hash_init(bat_priv->tt.global_hash, + bat_priv->tt.global_hash_locks);
return 0; } @@ -851,7 +830,7 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv, spin_lock_init(&tt_global_entry->list_lock);
hash_added = batadv_hash_add(bat_priv->tt.global_hash, - 1 << BATADV_TT_GLOBAL_HASH_BITS, + bat_priv->tt.global_hash_locks, batadv_compare_tt, batadv_choose_orig, common, &common->hash_entry); @@ -1025,7 +1004,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); - struct batadv_hashtable *hash = bat_priv->tt.global_hash; + struct hlist_head *hash = bat_priv->tt.global_hash; struct batadv_tt_common_entry *tt_common_entry; struct batadv_tt_global_entry *tt_global; struct batadv_hard_iface *primary_if; @@ -1044,8 +1023,8 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) "Client", "(TTVN)", "Originator", "(Curr TTVN)", "CRC", "Flags");
- for (i = 0; i < 1 << BATADV_TT_GLOBAL_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->tt.global_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(tt_common_entry, node, @@ -1211,17 +1190,14 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, struct batadv_tt_global_entry *tt_global; struct batadv_tt_common_entry *tt_common_entry; uint32_t i; - struct batadv_hashtable *hash = bat_priv->tt.global_hash; + struct hlist_head *hash = bat_priv->tt.global_hash; struct hlist_node *node, *safe; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */
- if (!hash) - return; - - for (i = 0; i < 1 << BATADV_TT_GLOBAL_HASH_BITS; i++) { - head = &hash->table[i]; - list_lock = &hash->list_locks[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->tt.global_hash); i++) { + head = &hash[i]; + list_lock = &bat_priv->tt.global_hash_locks[i];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common_entry, node, safe, @@ -1270,7 +1246,7 @@ static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
static void batadv_tt_global_purge(struct batadv_priv *bat_priv) { - struct batadv_hashtable *hash = bat_priv->tt.global_hash; + struct hlist_head *hash = bat_priv->tt.global_hash; struct hlist_head *head; struct hlist_node *node, *node_tmp; spinlock_t *list_lock; /* protects write access to the hash lists */ @@ -1279,9 +1255,9 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv) struct batadv_tt_common_entry *tt_common; struct batadv_tt_global_entry *tt_global;
- for (i = 0; i < 1 << BATADV_TT_GLOBAL_HASH_BITS; i++) { - head = &hash->table[i]; - list_lock = &hash->list_locks[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->tt.global_hash); i++) { + head = &hash[i]; + list_lock = &bat_priv->tt.global_hash_locks[i];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common, node, node_tmp, head, @@ -1307,7 +1283,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) { - struct batadv_hashtable *hash; + struct hlist_head *hash; spinlock_t *list_lock; /* protects write access to the hash lists */ struct batadv_tt_common_entry *tt_common_entry; struct batadv_tt_global_entry *tt_global; @@ -1315,14 +1291,11 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) struct hlist_head *head; uint32_t i;
- if (!bat_priv->tt.global_hash) - return; - hash = bat_priv->tt.global_hash;
- for (i = 0; i < 1 << BATADV_TT_GLOBAL_HASH_BITS; i++) { - head = &hash->table[i]; - list_lock = &hash->list_locks[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->tt.global_hash); i++) { + head = &hash[i]; + list_lock = &bat_priv->tt.global_hash_locks[i];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, @@ -1335,10 +1308,6 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) } spin_unlock_bh(list_lock); } - - batadv_hash_destroy(hash); - - bat_priv->tt.global_hash = NULL; }
static bool @@ -1404,7 +1373,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { uint16_t total = 0, total_one; - struct batadv_hashtable *hash = bat_priv->tt.global_hash; + struct hlist_head *hash = bat_priv->tt.global_hash; struct batadv_tt_common_entry *tt_common; struct batadv_tt_global_entry *tt_global; struct hlist_node *node; @@ -1412,8 +1381,8 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv, uint32_t i; int j;
- for (i = 0; i < 1 << BATADV_TT_GLOBAL_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->tt.global_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) { @@ -1457,15 +1426,15 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv, static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv) { uint16_t total = 0, total_one; - struct batadv_hashtable *hash = bat_priv->tt.local_hash; + struct hlist_head *hash = bat_priv->tt.local_hash; struct batadv_tt_common_entry *tt_common; struct hlist_node *node; struct hlist_head *head; uint32_t i; int j;
- for (i = 0; i < 1 << BATADV_TT_LOCAL_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->tt.local_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) { @@ -1599,7 +1568,8 @@ static int batadv_tt_global_valid(const void *entry_ptr,
static struct sk_buff * batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, - struct batadv_hashtable *hash, uint32_t size, + struct hlist_head *hash, uint32_t hash_size, + spinlock_t *locks, uint32_t lock_size, struct batadv_hard_iface *primary_if, int (*valid_cb)(const void *, const void *), void *cb_data) @@ -1634,8 +1604,8 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, tt_count = 0;
rcu_read_lock(); - for (i = 0; i < size; i++) { - head = &hash->table[i]; + for (i = 0; i < hash_size; i++) { + head = &hash[i];
hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { @@ -1746,7 +1716,8 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv, struct batadv_tt_query_packet *tt_response; uint8_t *packet_pos; size_t len; - uint32_t hash_size = 1 << BATADV_TT_GLOBAL_HASH_BITS; + spinlock_t *locks; /* hash_locks */ + uint32_t hash_size, lock_size;
batadv_dbg(BATADV_DBG_TT, bat_priv, "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n", @@ -1810,10 +1781,14 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv, tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size); tt_len *= sizeof(struct batadv_tt_change); ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); + locks = bat_priv->tt.global_hash_locks; + hash_size = ARRAY_SIZE(bat_priv->tt.global_hash); + lock_size = ARRAY_SIZE(bat_priv->tt.global_hash_locks);
skb = batadv_tt_response_fill_table(tt_len, ttvn, bat_priv->tt.global_hash, - hash_size, primary_if, + hash_size, locks, lock_size, + primary_if, batadv_tt_global_valid, req_dst_orig_node); if (!skb) @@ -1873,7 +1848,8 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv, struct batadv_tt_query_packet *tt_response; uint8_t *packet_pos; size_t len; - uint32_t hash_size = 1 << BATADV_TT_LOCAL_HASH_BITS; + spinlock_t *locks; /* hash_locks */ + uint32_t hash_size, lock_size;
batadv_dbg(BATADV_DBG_TT, bat_priv, "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n", @@ -1928,10 +1904,14 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv, tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num); tt_len *= sizeof(struct batadv_tt_change); ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); + locks = bat_priv->tt.local_hash_locks; + hash_size = ARRAY_SIZE(bat_priv->tt.local_hash); + lock_size = ARRAY_SIZE(bat_priv->tt.local_hash_locks);
skb = batadv_tt_response_fill_table(tt_len, ttvn, bat_priv->tt.local_hash, - hash_size, primary_if, + hash_size, locks, lock_size, + primary_if, batadv_tt_local_valid_entry, NULL); if (!skb) @@ -2311,8 +2291,8 @@ void batadv_tt_free(struct batadv_priv *bat_priv) /* This function will enable or disable the specified flags for all the entries * in the given hash table and returns the number of modified entries */ -static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash, - uint32_t size, uint16_t flags, bool enable) +static uint16_t batadv_tt_set_flags(struct hlist_head *hash, uint32_t size, + uint16_t flags, bool enable) { uint32_t i; uint16_t changed_num = 0; @@ -2324,7 +2304,7 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash, goto out;
for (i = 0; i < size; i++) { - head = &hash->table[i]; + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(tt_common_entry, node, @@ -2349,7 +2329,7 @@ out: /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) { - struct batadv_hashtable *hash = bat_priv->tt.local_hash; + struct hlist_head *hash = bat_priv->tt.local_hash; struct batadv_tt_common_entry *tt_common; struct batadv_tt_local_entry *tt_local; struct hlist_node *node, *node_tmp; @@ -2357,12 +2337,9 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) spinlock_t *list_lock; /* protects write access to the hash lists */ uint32_t i;
- if (!hash) - return; - - for (i = 0; i < 1 << BATADV_TT_LOCAL_HASH_BITS; i++) { - head = &hash->table[i]; - list_lock = &hash->list_locks[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->tt.local_hash); i++) { + head = &hash[i]; + list_lock = &bat_priv->tt.local_hash_locks[i];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common, node, node_tmp, head, @@ -2396,7 +2373,7 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv, return -ENOENT;
changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash, - 1 << BATADV_TT_LOCAL_HASH_BITS, + ARRAY_SIZE(bat_priv->tt.local_hash), BATADV_TT_CLIENT_NEW, false);
/* all reset entries have to be counted as local entries */ diff --git a/types.h b/types.h index ae9ac9a..a0e115a 100644 --- a/types.h +++ b/types.h @@ -206,8 +206,10 @@ struct batadv_priv_tt { atomic_t ogm_append_cnt; atomic_t local_changes; struct list_head changes_list; - struct batadv_hashtable *local_hash; - struct batadv_hashtable *global_hash; + struct hlist_head local_hash[1 << BATADV_TT_LOCAL_HASH_BITS]; + spinlock_t local_hash_locks[1 << BATADV_TT_LOCAL_HASH_BITS]; + struct hlist_head global_hash[1 << BATADV_TT_GLOBAL_HASH_BITS]; + spinlock_t global_hash_locks[1 << BATADV_TT_GLOBAL_HASH_BITS]; struct list_head req_list; struct list_head roam_list; spinlock_t changes_list_lock; /* protects changes */ @@ -224,8 +226,10 @@ struct batadv_priv_tt { #ifdef CONFIG_BATMAN_ADV_BLA struct batadv_priv_bla { atomic_t num_requests; /* number of bla requests in flight */ - struct batadv_hashtable *claim_hash; - struct batadv_hashtable *backbone_hash; + struct hlist_head claim_hash[1 << BATADV_BLA_CLAIM_HASH_BITS]; + spinlock_t claim_hash_locks[1 << BATADV_BLA_CLAIM_HASH_BITS]; + struct hlist_head backbone_hash[1 << BATADV_BLA_BACKBONE_HASH_BITS]; + spinlock_t backbone_hash_locks[1 << BATADV_BLA_BACKBONE_HASH_BITS]; struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE]; int bcast_duplist_curr; /* protects bcast_duplist and bcast_duplist_curr */ @@ -244,7 +248,8 @@ struct batadv_priv_gw {
struct batadv_priv_vis { struct list_head send_list; - struct batadv_hashtable *hash; + struct hlist_head hash[1 << BATADV_VIS_HASH_BITS]; + spinlock_t hash_locks[1 << BATADV_VIS_HASH_BITS]; spinlock_t hash_lock; /* protects hash */ spinlock_t list_lock; /* protects info::recv_list */ struct delayed_work work; @@ -255,12 +260,14 @@ struct batadv_priv_vis { * struct batadv_priv_dat - per mesh interface DAT private data * @addr: node DAT address * @hash: hashtable representing the local ARP cache + * @hash_lock: locks for each hashtable buckets * @work: work queue callback item for cache purging */ #ifdef CONFIG_BATMAN_ADV_DAT struct batadv_priv_dat { batadv_dat_addr_t addr; - struct batadv_hashtable *hash; + struct hlist_head hash[1 << BATADV_DAT_HASH_BITS]; + spinlock_t hash_locks[1 << BATADV_DAT_HASH_BITS]; struct delayed_work work; }; #endif @@ -293,7 +300,8 @@ struct batadv_priv { struct dentry *debug_dir; struct hlist_head forw_bat_list; struct hlist_head forw_bcast_list; - struct batadv_hashtable *orig_hash; + struct hlist_head orig_hash[1 << BATADV_ORIG_HASH_BITS]; + spinlock_t orig_hash_locks[1 << BATADV_ORIG_HASH_BITS]; spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */ struct delayed_work orig_work; diff --git a/vis.c b/vis.c index ecde6a1..54401e3 100644 --- a/vis.c +++ b/vis.c @@ -83,17 +83,14 @@ static uint32_t batadv_vis_info_choose(const void *data, uint32_t size) static struct batadv_vis_info * batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data) { - struct batadv_hashtable *hash = bat_priv->vis.hash; + struct hlist_head *hash = bat_priv->vis.hash; struct hlist_head *head; struct hlist_node *node; struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; uint32_t index;
- if (!hash) - return NULL; - - index = batadv_vis_info_choose(data, 1 << BATADV_VIS_HASH_BITS); - head = &hash->table[index]; + index = batadv_vis_info_choose(data, ARRAY_SIZE(bat_priv->vis.hash)); + head = &hash[index];
rcu_read_lock(); hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) { @@ -241,7 +238,7 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset) struct hlist_head *head; struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); - struct batadv_hashtable *hash = bat_priv->vis.hash; + struct hlist_head *hash = bat_priv->vis.hash; uint32_t i; int ret = 0; int vis_server = atomic_read(&bat_priv->vis_mode); @@ -254,8 +251,8 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset) goto out;
spin_lock_bh(&bat_priv->vis.hash_lock); - for (i = 0; i < 1 << BATADV_VIS_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->vis.hash); i++) { + head = &hash[i]; batadv_vis_seq_print_text_bucket(seq, head); } spin_unlock_bh(&bat_priv->vis.hash_lock); @@ -342,9 +339,6 @@ batadv_add_packet(struct batadv_priv *bat_priv, size_t max_entries;
*is_new = 0; - /* sanity check */ - if (!bat_priv->vis.hash) - return NULL;
/* see if the packet is already in vis_hash */ search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet)); @@ -375,7 +369,7 @@ batadv_add_packet(struct batadv_priv *bat_priv, } /* remove old entry */ batadv_hash_remove(bat_priv->vis.hash, - 1 << BATADV_VIS_HASH_BITS, + bat_priv->vis.hash_locks, batadv_vis_info_cmp, batadv_vis_info_choose, old_info); batadv_send_list_del(old_info); @@ -418,7 +412,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
/* try to add it */ hash_added = batadv_hash_add(bat_priv->vis.hash, - 1 << BATADV_VIS_HASH_BITS, + bat_priv->vis.hash_locks, batadv_vis_info_cmp, batadv_vis_info_choose, info, &info->hash_entry); @@ -509,7 +503,7 @@ end: static int batadv_find_best_vis_server(struct batadv_priv *bat_priv, struct batadv_vis_info *info) { - struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *hash = bat_priv->orig_hash; struct batadv_neigh_node *router; struct hlist_node *node; struct hlist_head *head; @@ -520,8 +514,8 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
packet = (struct batadv_vis_packet *)info->skb_packet->data;
- for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->orig_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { @@ -562,7 +556,7 @@ static bool batadv_vis_packet_full(const struct batadv_vis_info *info) */ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) { - struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *hash = bat_priv->orig_hash; struct hlist_node *node; struct hlist_head *head; struct batadv_orig_node *orig_node; @@ -593,8 +587,8 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv) return best_tq; }
- for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->orig_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { @@ -632,8 +626,8 @@ next:
hash = bat_priv->tt.local_hash;
- for (i = 0; i < 1 << BATADV_TT_LOCAL_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->tt.local_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(tt_common_entry, node, head, @@ -664,13 +658,13 @@ unlock: static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) { uint32_t i; - struct batadv_hashtable *hash = bat_priv->vis.hash; + struct hlist_head *hash = bat_priv->vis.hash; struct hlist_node *node, *node_tmp; struct hlist_head *head; struct batadv_vis_info *info;
- for (i = 0; i < 1 << BATADV_VIS_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->vis.hash); i++) { + head = &hash[i];
hlist_for_each_entry_safe(info, node, node_tmp, head, hash_entry) { @@ -691,7 +685,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv, struct batadv_vis_info *info) { - struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *hash = bat_priv->orig_hash; struct hlist_node *node; struct hlist_head *head; struct batadv_orig_node *orig_node; @@ -703,8 +697,8 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv, packet = (struct batadv_vis_packet *)info->skb_packet->data;
/* send to all routers in range. */ - for (i = 0; i < 1 << BATADV_ORIG_HASH_BITS; i++) { - head = &hash->table[i]; + for (i = 0; i < ARRAY_SIZE(bat_priv->orig_hash); i++) { + head = &hash[i];
rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { @@ -834,16 +828,10 @@ int batadv_vis_init(struct batadv_priv *bat_priv) unsigned long first_seen; struct sk_buff *tmp_skb;
- if (bat_priv->vis.hash) - return 0; - spin_lock_bh(&bat_priv->vis.hash_lock);
- bat_priv->vis.hash = batadv_hash_new(1 << BATADV_VIS_HASH_BITS); - if (!bat_priv->vis.hash) { - pr_err("Can't initialize vis_hash\n"); - goto err; - } + batadv_hash_init(bat_priv->vis.hash, + bat_priv->vis.hash_locks);
bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC); if (!bat_priv->vis.my_info) @@ -876,7 +864,7 @@ int batadv_vis_init(struct batadv_priv *bat_priv) INIT_LIST_HEAD(&bat_priv->vis.send_list);
hash_added = batadv_hash_add(bat_priv->vis.hash, - 1 << BATADV_VIS_HASH_BITS, + bat_priv->vis.hash_locks, batadv_vis_info_cmp, batadv_vis_info_choose, bat_priv->vis.my_info, @@ -914,16 +902,12 @@ static void batadv_free_info_ref(struct hlist_node *node, void *arg) /* shutdown vis-server */ void batadv_vis_quit(struct batadv_priv *bat_priv) { - if (!bat_priv->vis.hash) - return; - cancel_delayed_work_sync(&bat_priv->vis.work);
spin_lock_bh(&bat_priv->vis.hash_lock); /* properly remove, kill timers ... */ - batadv_hash_delete(bat_priv->vis.hash, 1 << BATADV_VIS_HASH_BITS, + batadv_hash_delete(bat_priv->vis.hash, bat_priv->vis.hash_locks, batadv_free_info_ref, NULL); - bat_priv->vis.hash = NULL; bat_priv->vis.my_info = NULL; spin_unlock_bh(&bat_priv->vis.hash_lock); }
The amount of locks of a hash directly affects the parallelity when all access independent parts of the hash. But this also affects the amount of memory used for each hash. Allowing to decouple the hash size and the lock size allows to reduce this memory consumption for hashes which don't allow this parallelism.
Signed-off-by: Sven Eckelmann sven@narfation.org --- bridge_loop_avoidance.c | 28 ++++++++++++++-------------- distributed-arp-table.c | 16 ++++++++-------- distributed-arp-table.h | 6 +++--- hash.h | 19 +++++++++---------- originator.c | 6 ++++-- originator.h | 13 +++++-------- translation-table.c | 22 ++++++++++++++-------- vis.c | 8 ++++---- 8 files changed, 61 insertions(+), 57 deletions(-)
diff --git a/bridge_loop_avoidance.c b/bridge_loop_avoidance.c index 1a78bac..781119f 100644 --- a/bridge_loop_avoidance.c +++ b/bridge_loop_avoidance.c @@ -38,7 +38,7 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv, struct batadv_backbone_gw *backbone_gw);
/* return the index of the claim */ -static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) +static inline uint32_t batadv_choose_claim(const void *data) { struct batadv_claim *claim = (struct batadv_claim *)data; uint32_t hash; @@ -46,12 +46,11 @@ static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) hash = jhash(&claim->addr, sizeof(claim->addr), 0); hash = jhash(&claim->vid, sizeof(claim->vid), hash);
- return hash % size; + return hash; }
/* return the index of the backbone gateway */ -static inline uint32_t batadv_choose_backbone_gw(const void *data, - uint32_t size) +static inline uint32_t batadv_choose_backbone_gw(const void *data) { struct batadv_claim *claim = (struct batadv_claim *)data; uint32_t hash; @@ -59,7 +58,7 @@ static inline uint32_t batadv_choose_backbone_gw(const void *data, hash = jhash(&claim->addr, sizeof(claim->addr), 0); hash = jhash(&claim->vid, sizeof(claim->vid), hash);
- return hash % size; + return hash; }
@@ -136,10 +135,10 @@ static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv, struct hlist_node *node; struct batadv_claim *claim; struct batadv_claim *claim_tmp = NULL; - int index; + uint32_t index;
- index = batadv_choose_claim(data, ARRAY_SIZE(bat_priv->bla.claim_hash)); - head = &hash[index]; + index = batadv_choose_claim(data); + head = &hash[index % ARRAY_SIZE(bat_priv->bla.claim_hash)];
rcu_read_lock(); hlist_for_each_entry_rcu(claim, node, head, hash_entry) { @@ -174,14 +173,13 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, struct hlist_node *node; struct batadv_backbone_gw search_entry, *backbone_gw; struct batadv_backbone_gw *backbone_gw_tmp = NULL; - int index; - uint32_t hash_size = ARRAY_SIZE(bat_priv->bla.backbone_hash); + uint32_t index;
memcpy(search_entry.orig, addr, ETH_ALEN); search_entry.vid = vid;
- index = batadv_choose_backbone_gw(&search_entry, hash_size); - head = &hash[index]; + index = batadv_choose_backbone_gw(&search_entry); + head = &hash[index % ARRAY_SIZE(bat_priv->bla.backbone_hash)];
rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { @@ -211,12 +209,13 @@ batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw) struct batadv_claim *claim; int i; spinlock_t *list_lock; /* protects write access to the hash lists */ + uint32_t locks_size = ARRAY_SIZE(bat_priv->bla.claim_hash_locks);
hash = bat_priv->bla.claim_hash;
for (i = 0; i < ARRAY_SIZE(bat_priv->bla.claim_hash); i++) { head = &hash[i]; - list_lock = &bat_priv->bla.claim_hash_locks[i]; + list_lock = &bat_priv->bla.claim_hash_locks[i % locks_size];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(claim, node, node_tmp, @@ -953,12 +952,13 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) struct hlist_head *hash; spinlock_t *list_lock; /* protects write access to the hash lists */ int i; + uint32_t locks_size = ARRAY_SIZE(bat_priv->bla.backbone_hash_locks);
hash = bat_priv->bla.backbone_hash;
for (i = 0; i < ARRAY_SIZE(bat_priv->bla.backbone_hash); i++) { head = &hash[i]; - list_lock = &bat_priv->bla.backbone_hash_locks[i]; + list_lock = &bat_priv->bla.backbone_hash_locks[i % locks_size];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(backbone_gw, node, node_tmp, diff --git a/distributed-arp-table.c b/distributed-arp-table.c index b4f8eef..8c8dbc5 100644 --- a/distributed-arp-table.c +++ b/distributed-arp-table.c @@ -86,10 +86,11 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv, struct hlist_node *node, *node_tmp; struct hlist_head *head; uint32_t i; + uint32_t locks_size = ARRAY_SIZE(bat_priv->dat.hash_locks);
for (i = 0; i < ARRAY_SIZE(bat_priv->dat.hash); i++) { head = &bat_priv->dat.hash[i]; - list_lock = &bat_priv->dat.hash_locks[i]; + list_lock = &bat_priv->dat.hash_locks[i % locks_size];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(dat_entry, node, node_tmp, head, @@ -197,11 +198,10 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size) /** * batadv_hash_dat - compute the hash value for an IP address * @data: data to hash - * @size: size of the hash table * * Returns the selected index in the hash table for the given data */ -static uint32_t batadv_hash_dat(const void *data, uint32_t size) +static uint32_t batadv_hash_dat(const void *data) { const unsigned char *key = data; uint32_t hash = 0; @@ -217,7 +217,7 @@ static uint32_t batadv_hash_dat(const void *data, uint32_t size) hash ^= (hash >> 11); hash += (hash << 15);
- return hash % size; + return hash; }
/** @@ -237,8 +237,8 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip) struct hlist_head *hash = bat_priv->dat.hash; uint32_t index;
- index = batadv_hash_dat(&ip, ARRAY_SIZE(bat_priv->dat.hash)); - head = &hash[index]; + index = batadv_hash_dat(&ip); + head = &hash[index % ARRAY_SIZE(bat_priv->dat.hash)];
rcu_read_lock(); hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { @@ -531,8 +531,8 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) if (!res) return NULL;
- ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst, - BATADV_DAT_ADDR_MAX); + ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst); + ip_key %= BATADV_DAT_ADDR_MAX;
batadv_dbg(BATADV_DBG_DAT, bat_priv, "dat_select_candidates(): IP=%pI4 hash(IP)=%u\n", &ip_dst, diff --git a/distributed-arp-table.h b/distributed-arp-table.h index d060c03..642e28b 100644 --- a/distributed-arp-table.h +++ b/distributed-arp-table.h @@ -49,7 +49,7 @@ batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node) { uint32_t addr;
- addr = batadv_choose_orig(orig_node->orig, BATADV_DAT_ADDR_MAX); + addr = batadv_choose_orig(orig_node->orig) % BATADV_DAT_ADDR_MAX; orig_node->dat_addr = (batadv_dat_addr_t)addr; }
@@ -64,8 +64,8 @@ batadv_dat_init_own_addr(struct batadv_priv *bat_priv, { uint32_t addr;
- addr = batadv_choose_orig(primary_if->net_dev->dev_addr, - BATADV_DAT_ADDR_MAX); + addr = batadv_choose_orig(primary_if->net_dev->dev_addr); + addr %= BATADV_DAT_ADDR_MAX;
bat_priv->dat.addr = (batadv_dat_addr_t)addr; } diff --git a/hash.h b/hash.h index 1cd3ae3..33b0be1 100644 --- a/hash.h +++ b/hash.h @@ -37,11 +37,10 @@ typedef int (*batadv_hashdata_compare_cb)(const struct hlist_node *, const void *);
-/* the hashfunction, should return an index - * based on the key in the data of the first - * argument and the size the second +/* the hashfunction, should return an unmapped index based on the key in the + * data of the first argument */ -typedef uint32_t (*batadv_hashdata_choose_cb)(const void *, uint32_t); +typedef uint32_t (*batadv_hashdata_choose_cb)(const void *); typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
/* remove the hash structure. if hashdata_free_cb != NULL, this function will be @@ -109,9 +108,9 @@ static inline int _batadv_hash_add(struct hlist_head *hash, uint32_t hash_size, if (!hash) goto out;
- index = choose(data, hash_size); - head = &hash[index]; - list_lock = &locks[index]; + index = choose(data); + head = &hash[index % hash_size]; + list_lock = &locks[index % lock_size];
spin_lock_bh(list_lock);
@@ -155,10 +154,10 @@ static inline void *_batadv_hash_remove(struct hlist_head *hash, struct hlist_head *head; void *data_save = NULL;
- index = choose(data, hash_size); - head = &hash[index]; + index = choose(data); + head = &hash[index % hash_size];
- spin_lock_bh(&locks[index]); + spin_lock_bh(&locks[index % lock_size]); hlist_for_each(node, head) { if (!compare(node, data)) continue; diff --git a/originator.c b/originator.c index 2145005..3909862 100644 --- a/originator.c +++ b/originator.c @@ -155,12 +155,13 @@ void batadv_originator_free(struct batadv_priv *bat_priv) spinlock_t *list_lock; /* spinlock to protect write access */ struct batadv_orig_node *orig_node; uint32_t i; + uint32_t locks_size = ARRAY_SIZE(bat_priv->orig_hash_locks);
cancel_delayed_work_sync(&bat_priv->orig_work);
for (i = 0; i < ARRAY_SIZE(bat_priv->orig_hash); i++) { head = &hash[i]; - list_lock = &bat_priv->orig_hash_locks[i]; + list_lock = &bat_priv->orig_hash_locks[i % locks_size];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(orig_node, node, node_tmp, @@ -339,11 +340,12 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv) spinlock_t *list_lock; /* spinlock to protect write access */ struct batadv_orig_node *orig_node; uint32_t i; + uint32_t locks_size = ARRAY_SIZE(bat_priv->orig_hash_locks);
/* for all origins... */ for (i = 0; i < ARRAY_SIZE(bat_priv->orig_hash); i++) { head = &hash[i]; - list_lock = &bat_priv->orig_hash_locks[i]; + list_lock = &bat_priv->orig_hash_locks[i % locks_size];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(orig_node, node, node_tmp, diff --git a/originator.h b/originator.h index 0661479..9d188c0 100644 --- a/originator.h +++ b/originator.h @@ -44,12 +44,9 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, /* hashfunction to choose an entry in a hash table of given size * hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ -static inline uint32_t batadv_choose_orig(const void *data, uint32_t size) +static inline uint32_t batadv_choose_orig(const void *data) { - uint32_t hash; - - hash = jhash(data, ETH_ALEN, 0); - return hash % size; + return jhash(data, ETH_ALEN, 0); }
static inline struct batadv_orig_node * @@ -59,13 +56,13 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data) struct hlist_head *head; struct hlist_node *node; struct batadv_orig_node *orig_node, *orig_node_tmp = NULL; - int index; + uint32_t index;
if (!hash) return NULL;
- index = batadv_choose_orig(data, ARRAY_SIZE(bat_priv->orig_hash)); - head = &hash[index]; + index = batadv_choose_orig(data); + head = &hash[index % ARRAY_SIZE(bat_priv->orig_hash)];
rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { diff --git a/translation-table.c b/translation-table.c index dd821ee..50b3a8c 100644 --- a/translation-table.c +++ b/translation-table.c @@ -70,8 +70,8 @@ _batadv_tt_hash_find(struct hlist_head *hash, uint32_t size, const void *data) if (!hash) return NULL;
- index = batadv_choose_orig(data, size); - head = &hash[index]; + index = batadv_choose_orig(data); + head = &hash[index % size];
rcu_read_lock(); hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { @@ -635,10 +635,11 @@ static void batadv_tt_local_purge(struct batadv_priv *bat_priv) struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ uint32_t i; + uint32_t locks_size = ARRAY_SIZE(bat_priv->tt.local_hash_locks);
for (i = 0; i < ARRAY_SIZE(bat_priv->tt.local_hash); i++) { head = &hash[i]; - list_lock = &bat_priv->tt.local_hash_locks[i]; + list_lock = &bat_priv->tt.local_hash_locks[i % locks_size];
spin_lock_bh(list_lock); batadv_tt_local_purge_list(bat_priv, head); @@ -656,12 +657,13 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) struct hlist_node *node, *node_tmp; struct hlist_head *head; uint32_t i; + uint32_t locks_size = ARRAY_SIZE(bat_priv->tt.local_hash_locks);
hash = bat_priv->tt.local_hash;
for (i = 0; i < ARRAY_SIZE(bat_priv->tt.local_hash); i++) { head = &hash[i]; - list_lock = &bat_priv->tt.local_hash_locks[i]; + list_lock = &bat_priv->tt.local_hash_locks[i % locks_size];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, @@ -1194,10 +1196,11 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, struct hlist_node *node, *safe; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ + uint32_t locks_size = ARRAY_SIZE(bat_priv->tt.global_hash_locks);
for (i = 0; i < ARRAY_SIZE(bat_priv->tt.global_hash); i++) { head = &hash[i]; - list_lock = &bat_priv->tt.global_hash_locks[i]; + list_lock = &bat_priv->tt.global_hash_locks[i % locks_size];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common_entry, node, safe, @@ -1254,10 +1257,11 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv) char *msg = NULL; struct batadv_tt_common_entry *tt_common; struct batadv_tt_global_entry *tt_global; + uint32_t locks_size = ARRAY_SIZE(bat_priv->tt.global_hash_locks);
for (i = 0; i < ARRAY_SIZE(bat_priv->tt.global_hash); i++) { head = &hash[i]; - list_lock = &bat_priv->tt.global_hash_locks[i]; + list_lock = &bat_priv->tt.global_hash_locks[i % locks_size];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common, node, node_tmp, head, @@ -1290,12 +1294,13 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) struct hlist_node *node, *node_tmp; struct hlist_head *head; uint32_t i; + uint32_t locks_size = ARRAY_SIZE(bat_priv->tt.global_hash_locks);
hash = bat_priv->tt.global_hash;
for (i = 0; i < ARRAY_SIZE(bat_priv->tt.global_hash); i++) { head = &hash[i]; - list_lock = &bat_priv->tt.global_hash_locks[i]; + list_lock = &bat_priv->tt.global_hash_locks[i % locks_size];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, @@ -2336,10 +2341,11 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ uint32_t i; + uint32_t locks_size = ARRAY_SIZE(bat_priv->tt.local_hash_locks);
for (i = 0; i < ARRAY_SIZE(bat_priv->tt.local_hash); i++) { head = &hash[i]; - list_lock = &bat_priv->tt.local_hash_locks[i]; + list_lock = &bat_priv->tt.local_hash_locks[i % locks_size];
spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common, node, node_tmp, head, diff --git a/vis.c b/vis.c index 54401e3..8fd03f0 100644 --- a/vis.c +++ b/vis.c @@ -68,7 +68,7 @@ static int batadv_vis_info_cmp(const struct hlist_node *node, const void *data2) /* hash function to choose an entry in a hash table of given size * hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ -static uint32_t batadv_vis_info_choose(const void *data, uint32_t size) +static uint32_t batadv_vis_info_choose(const void *data) { const struct batadv_vis_info *vis_info = data; const struct batadv_vis_packet *packet; @@ -77,7 +77,7 @@ static uint32_t batadv_vis_info_choose(const void *data, uint32_t size) packet = (struct batadv_vis_packet *)vis_info->skb_packet->data; hash = jhash(&packet->vis_orig, sizeof(packet->vis_orig), 0);
- return hash % size; + return hash; }
static struct batadv_vis_info * @@ -89,8 +89,8 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data) struct batadv_vis_info *vis_info, *vis_info_tmp = NULL; uint32_t index;
- index = batadv_vis_info_choose(data, ARRAY_SIZE(bat_priv->vis.hash)); - head = &hash[index]; + index = batadv_vis_info_choose(data); + head = &hash[index % ARRAY_SIZE(bat_priv->vis.hash)];
rcu_read_lock(); hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
On Mon, Nov 26, 2012 at 12:58:32PM +0100, Sven Eckelmann wrote:
The amount of locks of a hash directly affects the parallelity when all access independent parts of the hash. But this also affects the amount of memory used for each hash. Allowing to decouple the hash size and the lock size allows to reduce this memory consumption for hashes which don't allow this parallelism.
Signed-off-by: Sven Eckelmann sven@narfation.org
bridge_loop_avoidance.c | 28 ++++++++++++++-------------- distributed-arp-table.c | 16 ++++++++-------- distributed-arp-table.h | 6 +++--- hash.h | 19 +++++++++---------- originator.c | 6 ++++-- originator.h | 13 +++++-------- translation-table.c | 22 ++++++++++++++-------- vis.c | 8 ++++---- 8 files changed, 61 insertions(+), 57 deletions(-)
diff --git a/bridge_loop_avoidance.c b/bridge_loop_avoidance.c index 1a78bac..781119f 100644 --- a/bridge_loop_avoidance.c +++ b/bridge_loop_avoidance.c @@ -38,7 +38,7 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv, struct batadv_backbone_gw *backbone_gw);
/* return the index of the claim */ -static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) +static inline uint32_t batadv_choose_claim(const void *data) { struct batadv_claim *claim = (struct batadv_claim *)data; uint32_t hash; @@ -46,12 +46,11 @@ static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) hash = jhash(&claim->addr, sizeof(claim->addr), 0); hash = jhash(&claim->vid, sizeof(claim->vid), hash);
- return hash % size;
- return hash;
I do not fully understand how having the mod operation inside the function would prevent the decoupling. Why you need to move it out?
Cheers,
On Saturday 01 December 2012 17:40:45 Antonio Quartulli wrote:
On Mon, Nov 26, 2012 at 12:58:32PM +0100, Sven Eckelmann wrote:
The amount of locks of a hash directly affects the parallelity when all access independent parts of the hash. But this also affects the amount of memory used for each hash. Allowing to decouple the hash size and the lock size allows to reduce this memory consumption for hashes which don't allow this parallelism.
Signed-off-by: Sven Eckelmann sven@narfation.org
bridge_loop_avoidance.c | 28 ++++++++++++++-------------- distributed-arp-table.c | 16 ++++++++-------- distributed-arp-table.h | 6 +++--- hash.h | 19 +++++++++---------- originator.c | 6 ++++-- originator.h | 13 +++++-------- translation-table.c | 22 ++++++++++++++-------- vis.c | 8 ++++---- 8 files changed, 61 insertions(+), 57 deletions(-)
diff --git a/bridge_loop_avoidance.c b/bridge_loop_avoidance.c index 1a78bac..781119f 100644 --- a/bridge_loop_avoidance.c +++ b/bridge_loop_avoidance.c @@ -38,7 +38,7 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,> struct batadv_backbone_gw *backbone_gw);
/* return the index of the claim */
-static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) +static inline uint32_t batadv_choose_claim(const void *data)
{
struct batadv_claim *claim = (struct batadv_claim *)data; uint32_t hash;
@@ -46,12 +46,11 @@ static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)> hash = jhash(&claim->addr, sizeof(claim->addr), 0); hash = jhash(&claim->vid, sizeof(claim->vid), hash);
- return hash % size;
- return hash;
I do not fully understand how having the mod operation inside the function would prevent the decoupling. Why you need to move it out?
Because following calculations can create different results:
* (i % hashsize) % locksize * (i % locksize)
Example: * (21 % 4) % 3 == 1 * 21 % 3 == 0
And of course, we could use only the first calculation. But then we would end up in having more hash buckets mapped in the first part of the locks when hashsize > locksize.
Kind regards, Sven
On Sat, Dec 01, 2012 at 05:58:22PM +0100, Sven Eckelmann wrote:
On Saturday 01 December 2012 17:40:45 Antonio Quartulli wrote:
On Mon, Nov 26, 2012 at 12:58:32PM +0100, Sven Eckelmann wrote:
The amount of locks of a hash directly affects the parallelity when all access independent parts of the hash. But this also affects the amount of memory used for each hash. Allowing to decouple the hash size and the lock size allows to reduce this memory consumption for hashes which don't allow this parallelism.
Signed-off-by: Sven Eckelmann sven@narfation.org
bridge_loop_avoidance.c | 28 ++++++++++++++-------------- distributed-arp-table.c | 16 ++++++++-------- distributed-arp-table.h | 6 +++--- hash.h | 19 +++++++++---------- originator.c | 6 ++++-- originator.h | 13 +++++-------- translation-table.c | 22 ++++++++++++++-------- vis.c | 8 ++++---- 8 files changed, 61 insertions(+), 57 deletions(-)
diff --git a/bridge_loop_avoidance.c b/bridge_loop_avoidance.c index 1a78bac..781119f 100644 --- a/bridge_loop_avoidance.c +++ b/bridge_loop_avoidance.c @@ -38,7 +38,7 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,> struct batadv_backbone_gw *backbone_gw);
/* return the index of the claim */
-static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) +static inline uint32_t batadv_choose_claim(const void *data)
{
struct batadv_claim *claim = (struct batadv_claim *)data; uint32_t hash;
@@ -46,12 +46,11 @@ static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)> hash = jhash(&claim->addr, sizeof(claim->addr), 0); hash = jhash(&claim->vid, sizeof(claim->vid), hash);
- return hash % size;
- return hash;
I do not fully understand how having the mod operation inside the function would prevent the decoupling. Why you need to move it out?
Because following calculations can create different results:
- (i % hashsize) % locksize
- (i % locksize)
Example:
- (21 % 4) % 3 == 1
- 21 % 3 == 0
And of course, we could use only the first calculation. But then we would end up in having more hash buckets mapped in the first part of the locks when hashsize > locksize.
Yeah this is what I had in mind, but I didn't think to the distribution effect. Well, even doing your way I think the distribution is not uniform at all. But the effect is minimized with respect of the first case.
I think it is worth leaving it as you posted.
Cheers,
On Mon, Nov 26, 2012 at 12:58 PM, Sven Eckelmann sven@narfation.org wrote:
An unoptimized version of the Jenkins one-at-a-time hash function is copied all over the code wherever an hashtable is used. Instead the optimized version shared between the whole kernel should be used to reduce code duplication and keep bugs at a single point.
Only the DAT code must use the old implementation because it is used as distributed hash function which has to be common for all nodes.
Here happened something similar. This patchset was discussed and not rejected. But it wasn't accepted either and as result didn't appear in 3.8-rc1.
b.a.t.m.a.n@lists.open-mesh.org