The following commit has been merged in the linux branch: commit 2cb7c23024a5b440b29498adc1f46d9c3aebe81c Merge: c47f2d31185ac7c53d5e495b0a7124602527f38d 050ada5d7cdcc7d68b630af6eb6c477011867ead Author: Sven Eckelmann sven.eckelmann@gmx.de Date: Tue Oct 19 02:13:39 2010 +0200
Merge remote branch 'origin/next' into linux
Conflicts: drivers/staging/batman-adv/Makefile.kbuild drivers/staging/batman-adv/bat_printk.c drivers/staging/batman-adv/compat.h drivers/staging/batman-adv/sysfs-class-net-mesh
diff --combined drivers/staging/batman-adv/Makefile index 4b5c434,0000000..7892428 mode 100644,000000..100644 --- a/drivers/staging/batman-adv/Makefile +++ b/drivers/staging/batman-adv/Makefile @@@ -1,22 -1,0 +1,22 @@@ +# +# Copyright (C) 2007-2010 B.A.T.M.A.N. contributors: +# +# Marek Lindner, Simon Wunderlich +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of version 2 of the GNU General Public +# License as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA +# + +obj-$(CONFIG_BATMAN_ADV) += batman-adv.o - batman-adv-objs := main.o bat_debugfs.o bat_sysfs.o send.o routing.o soft-interface.o icmp_socket.o translation-table.o bitarray.o hash.o ring_buffer.o vis.o hard-interface.o aggregation.o originator.o unicast.o ++batman-adv-y := main.o bat_debugfs.o bat_sysfs.o send.o routing.o soft-interface.o icmp_socket.o translation-table.o bitarray.o hash.o ring_buffer.o vis.o hard-interface.o aggregation.o originator.o unicast.o diff --combined drivers/staging/batman-adv/aggregation.c index 46b9c2b,08624d4..08624d4 --- a/drivers/staging/batman-adv/aggregation.c +++ b/drivers/staging/batman-adv/aggregation.c @@@ -123,8 -123,14 +123,14 @@@ static void new_aggregated_packet(unsig return; }
- forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES + - sizeof(struct ethhdr)); + if ((atomic_read(&bat_priv->aggregation_enabled)) && + (packet_len < MAX_AGGREGATION_BYTES)) + forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES + + sizeof(struct ethhdr)); + else + forw_packet_aggr->skb = dev_alloc_skb(packet_len + + sizeof(struct ethhdr)); + if (!forw_packet_aggr->skb) { if (!own_packet) atomic_inc(&bat_priv->batman_queue_left); @@@ -251,9 -257,7 +257,7 @@@ void receive_aggr_bat_packet(struct eth
batman_packet = (struct batman_packet *)packet_buff;
- while (aggregated_packet(buff_pos, packet_len, - batman_packet->num_hna)) { - + do { /* network to host order for our 32bit seqno, and the orig_interval. */ batman_packet->seqno = ntohl(batman_packet->seqno); @@@ -266,5 -270,6 +270,6 @@@ buff_pos += BAT_PACKET_LEN + hna_len(batman_packet); batman_packet = (struct batman_packet *) (packet_buff + buff_pos); - } + } while (aggregated_packet(buff_pos, packet_len, + batman_packet->num_hna)); } diff --combined drivers/staging/batman-adv/originator.c index c530df1,0000000..865211d mode 100644,000000..100644 --- a/drivers/staging/batman-adv/originator.c +++ b/drivers/staging/batman-adv/originator.c @@@ -1,537 -1,0 +1,537 @@@ +/* + * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +/* increase the reference counter for this originator */ + +#include "main.h" +#include "originator.h" +#include "hash.h" +#include "translation-table.h" +#include "routing.h" +#include "hard-interface.h" +#include "unicast.h" + +static void purge_orig(struct work_struct *work); + +static void start_purge_timer(struct bat_priv *bat_priv) +{ + INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig); + queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); +} + +int originator_init(struct bat_priv *bat_priv) +{ + unsigned long flags; + if (bat_priv->orig_hash) + return 1; + + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + bat_priv->orig_hash = hash_new(128, compare_orig, choose_orig); + + if (!bat_priv->orig_hash) + goto err; + + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + start_purge_timer(bat_priv); + return 1; + +err: + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + return 0; +} + +struct neigh_node * +create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node, + uint8_t *neigh, struct batman_if *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct neigh_node *neigh_node; + + bat_dbg(DBG_BATMAN, bat_priv, + "Creating new last-hop neighbor of originator\n"); + + neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC); + if (!neigh_node) + return NULL; + + INIT_LIST_HEAD(&neigh_node->list); + + memcpy(neigh_node->addr, neigh, ETH_ALEN); + neigh_node->orig_node = orig_neigh_node; + neigh_node->if_incoming = if_incoming; + + list_add_tail(&neigh_node->list, &orig_node->neigh_list); + return neigh_node; +} + +static void free_orig_node(void *data, void *arg) +{ + struct list_head *list_pos, *list_pos_tmp; + struct neigh_node *neigh_node; + struct orig_node *orig_node = (struct orig_node *)data; + struct bat_priv *bat_priv = (struct bat_priv *)arg; + + /* for all neighbors towards this originator ... */ + list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) { + neigh_node = list_entry(list_pos, struct neigh_node, list); + + list_del(list_pos); + kfree(neigh_node); + } + + frag_list_free(&orig_node->frag_list); + hna_global_del_orig(bat_priv, orig_node, "originator timed out"); + + kfree(orig_node->bcast_own); + kfree(orig_node->bcast_own_sum); + kfree(orig_node); +} + +void originator_free(struct bat_priv *bat_priv) +{ + unsigned long flags; + + if (!bat_priv->orig_hash) + return; + + cancel_delayed_work_sync(&bat_priv->orig_work); + + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv); + bat_priv->orig_hash = NULL; + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); +} + +/* this function finds or creates an originator entry for the given + * address if it does not exits */ +struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr) +{ + struct orig_node *orig_node; + struct hashtable_t *swaphash; + int size; + + orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, addr)); + + if (orig_node) + return orig_node; + + bat_dbg(DBG_BATMAN, bat_priv, + "Creating new originator: %pM\n", addr); + + orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC); + if (!orig_node) + return NULL; + + INIT_LIST_HEAD(&orig_node->neigh_list); + + memcpy(orig_node->orig, addr, ETH_ALEN); + orig_node->router = NULL; + orig_node->hna_buff = NULL; + orig_node->bcast_seqno_reset = jiffies - 1 + - msecs_to_jiffies(RESET_PROTECTION_MS); + orig_node->batman_seqno_reset = jiffies - 1 + - msecs_to_jiffies(RESET_PROTECTION_MS); + + size = bat_priv->num_ifaces * sizeof(TYPE_OF_WORD) * NUM_WORDS; + + orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); + if (!orig_node->bcast_own) + goto free_orig_node; + + size = bat_priv->num_ifaces * sizeof(uint8_t); + orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC); + + INIT_LIST_HEAD(&orig_node->frag_list); + orig_node->last_frag_packet = 0; + + if (!orig_node->bcast_own_sum) + goto free_bcast_own; + + if (hash_add(bat_priv->orig_hash, orig_node) < 0) + goto free_bcast_own_sum; + + if (bat_priv->orig_hash->elements * 4 > bat_priv->orig_hash->size) { + swaphash = hash_resize(bat_priv->orig_hash, + bat_priv->orig_hash->size * 2); + + if (!swaphash) + bat_dbg(DBG_BATMAN, bat_priv, + "Couldn't resize orig hash table\n"); + else + bat_priv->orig_hash = swaphash; + } + + return orig_node; +free_bcast_own_sum: + kfree(orig_node->bcast_own_sum); +free_bcast_own: + kfree(orig_node->bcast_own); +free_orig_node: + kfree(orig_node); + return NULL; +} + +static bool purge_orig_neighbors(struct bat_priv *bat_priv, + struct orig_node *orig_node, + struct neigh_node **best_neigh_node) +{ + struct list_head *list_pos, *list_pos_tmp; + struct neigh_node *neigh_node; + bool neigh_purged = false; + + *best_neigh_node = NULL; + + /* for all neighbors towards this originator ... */ + list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) { + neigh_node = list_entry(list_pos, struct neigh_node, list); + + if ((time_after(jiffies, + neigh_node->last_valid + PURGE_TIMEOUT * HZ)) || + (neigh_node->if_incoming->if_status == IF_INACTIVE) || + (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { + + if (neigh_node->if_incoming->if_status == + IF_TO_BE_REMOVED) + bat_dbg(DBG_BATMAN, bat_priv, + "neighbor purge: originator %pM, " + "neighbor: %pM, iface: %s\n", + orig_node->orig, neigh_node->addr, + neigh_node->if_incoming->net_dev->name); + else + bat_dbg(DBG_BATMAN, bat_priv, + "neighbor timeout: originator %pM, " + "neighbor: %pM, last_valid: %lu\n", + orig_node->orig, neigh_node->addr, + (neigh_node->last_valid / HZ)); + + neigh_purged = true; + list_del(list_pos); + kfree(neigh_node); + } else { + if ((*best_neigh_node == NULL) || + (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) + *best_neigh_node = neigh_node; + } + } + return neigh_purged; +} + +static bool purge_orig_node(struct bat_priv *bat_priv, + struct orig_node *orig_node) +{ + struct neigh_node *best_neigh_node; + + if (time_after(jiffies, + orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) { + + bat_dbg(DBG_BATMAN, bat_priv, + "Originator timeout: originator %pM, last_valid %lu\n", + orig_node->orig, (orig_node->last_valid / HZ)); + return true; + } else { + if (purge_orig_neighbors(bat_priv, orig_node, + &best_neigh_node)) { + update_routes(bat_priv, orig_node, + best_neigh_node, + orig_node->hna_buff, + orig_node->hna_buff_len); + /* update bonding candidates, we could have lost + * some candidates. */ + update_bonding_candidates(bat_priv, orig_node); + } + } + + return false; +} + +static void _purge_orig(struct bat_priv *bat_priv) +{ + HASHIT(hashit); + struct orig_node *orig_node; + unsigned long flags; + + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + + /* for all origins... */ + while (hash_iterate(bat_priv->orig_hash, &hashit)) { + orig_node = hashit.bucket->data; + + if (purge_orig_node(bat_priv, orig_node)) { + hash_remove_bucket(bat_priv->orig_hash, &hashit); + free_orig_node(orig_node, bat_priv); + } + + if (time_after(jiffies, (orig_node->last_frag_packet + + msecs_to_jiffies(FRAG_TIMEOUT)))) + frag_list_free(&orig_node->frag_list); + } + + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + +} + +static void purge_orig(struct work_struct *work) +{ + struct delayed_work *delayed_work = + container_of(work, struct delayed_work, work); + struct bat_priv *bat_priv = + container_of(delayed_work, struct bat_priv, orig_work); + + _purge_orig(bat_priv); + start_purge_timer(bat_priv); +} + +void purge_orig_ref(struct bat_priv *bat_priv) +{ + _purge_orig(bat_priv); +} + +int orig_seq_print_text(struct seq_file *seq, void *offset) +{ + HASHIT(hashit); + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct orig_node *orig_node; + struct neigh_node *neigh_node; + int batman_count = 0; + int last_seen_secs; + int last_seen_msecs; + unsigned long flags; + char orig_str[ETH_STR_LEN], router_str[ETH_STR_LEN]; + + if ((!bat_priv->primary_if) || + (bat_priv->primary_if->if_status != IF_ACTIVE)) { + if (!bat_priv->primary_if) + return seq_printf(seq, "BATMAN mesh %s disabled - " + "please specify interfaces to enable it\n", + net_dev->name); + + return seq_printf(seq, "BATMAN mesh %s " + "disabled - primary interface not active\n", + net_dev->name); + } + + seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s (%s)]\n", + SOURCE_VERSION, REVISION_VERSION_STR, + bat_priv->primary_if->net_dev->name, + bat_priv->primary_if->addr_str, net_dev->name); + seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", + "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", + "outgoingIF", "Potential nexthops"); + + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + + while (hash_iterate(bat_priv->orig_hash, &hashit)) { + + orig_node = hashit.bucket->data; + + if (!orig_node->router) + continue; + + if (orig_node->router->tq_avg == 0) + continue; + + addr_to_string(orig_str, orig_node->orig); + addr_to_string(router_str, orig_node->router->addr); + last_seen_secs = jiffies_to_msecs(jiffies - + orig_node->last_valid) / 1000; + last_seen_msecs = jiffies_to_msecs(jiffies - + orig_node->last_valid) % 1000; + + seq_printf(seq, "%-17s %4i.%03is (%3i) %17s [%10s]:", + orig_str, last_seen_secs, last_seen_msecs, + orig_node->router->tq_avg, router_str, + orig_node->router->if_incoming->net_dev->name); + + list_for_each_entry(neigh_node, &orig_node->neigh_list, list) { + addr_to_string(orig_str, neigh_node->addr); + seq_printf(seq, " %17s (%3i)", orig_str, + neigh_node->tq_avg); + } + + seq_printf(seq, "\n"); + batman_count++; + } + + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + + if ((batman_count == 0)) + seq_printf(seq, "No batman nodes in range ...\n"); + + return 0; +} + +static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) +{ + void *data_ptr; + + data_ptr = kmalloc(max_if_num * sizeof(TYPE_OF_WORD) * NUM_WORDS, + GFP_ATOMIC); + if (!data_ptr) { + pr_err("Can't resize orig: out of memory\n"); + return -1; + } + + memcpy(data_ptr, orig_node->bcast_own, + (max_if_num - 1) * sizeof(TYPE_OF_WORD) * NUM_WORDS); + kfree(orig_node->bcast_own); + orig_node->bcast_own = data_ptr; + + data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); + if (!data_ptr) { + pr_err("Can't resize orig: out of memory\n"); + return -1; + } + + memcpy(data_ptr, orig_node->bcast_own_sum, + (max_if_num - 1) * sizeof(uint8_t)); + kfree(orig_node->bcast_own_sum); + orig_node->bcast_own_sum = data_ptr; + + return 0; +} + +int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) +{ + struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); + struct orig_node *orig_node; + unsigned long flags; + HASHIT(hashit); + + /* resize all orig nodes because orig_node->bcast_own(_sum) depend on + * if_num */ + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + + while (hash_iterate(bat_priv->orig_hash, &hashit)) { + orig_node = hashit.bucket->data; + + if (orig_node_add_if(orig_node, max_if_num) == -1) + goto err; + } + + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + return 0; + +err: + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + return -ENOMEM; +} + +static int orig_node_del_if(struct orig_node *orig_node, + int max_if_num, int del_if_num) +{ + void *data_ptr = NULL; + int chunk_size; + + /* last interface was removed */ + if (max_if_num == 0) + goto free_bcast_own; + + chunk_size = sizeof(TYPE_OF_WORD) * NUM_WORDS; + data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC); + if (!data_ptr) { + pr_err("Can't resize orig: out of memory\n"); + return -1; + } + + /* copy first part */ + memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); + + /* copy second part */ - memcpy(data_ptr, ++ memcpy(data_ptr + del_if_num * chunk_size, + orig_node->bcast_own + ((del_if_num + 1) * chunk_size), + (max_if_num - del_if_num) * chunk_size); + +free_bcast_own: + kfree(orig_node->bcast_own); + orig_node->bcast_own = data_ptr; + + if (max_if_num == 0) + goto free_own_sum; + + data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); + if (!data_ptr) { + pr_err("Can't resize orig: out of memory\n"); + return -1; + } + + memcpy(data_ptr, orig_node->bcast_own_sum, + del_if_num * sizeof(uint8_t)); + - memcpy(data_ptr, ++ memcpy(data_ptr + del_if_num * sizeof(uint8_t), + orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)), + (max_if_num - del_if_num) * sizeof(uint8_t)); + +free_own_sum: + kfree(orig_node->bcast_own_sum); + orig_node->bcast_own_sum = data_ptr; + + return 0; +} + +int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) +{ + struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); + struct batman_if *batman_if_tmp; + struct orig_node *orig_node; + unsigned long flags; + HASHIT(hashit); + int ret; + + /* resize all orig nodes because orig_node->bcast_own(_sum) depend on + * if_num */ + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + + while (hash_iterate(bat_priv->orig_hash, &hashit)) { + orig_node = hashit.bucket->data; + + ret = orig_node_del_if(orig_node, max_if_num, + batman_if->if_num); + + if (ret == -1) + goto err; + } + + /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ + rcu_read_lock(); + list_for_each_entry_rcu(batman_if_tmp, &if_list, list) { + if (batman_if_tmp->if_status == IF_NOT_IN_USE) + continue; + + if (batman_if == batman_if_tmp) + continue; + + if (batman_if->soft_iface != batman_if_tmp->soft_iface) + continue; + + if (batman_if_tmp->if_num > batman_if->if_num) + batman_if_tmp->if_num--; + } + rcu_read_unlock(); + + batman_if->if_num = -1; + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + return 0; + +err: + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + return -ENOMEM; +} diff --combined drivers/staging/batman-adv/send.c index 5d57ef5,0000000..9032861 mode 100644,000000..100644 --- a/drivers/staging/batman-adv/send.c +++ b/drivers/staging/batman-adv/send.c @@@ -1,577 -1,0 +1,580 @@@ +/* + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "send.h" +#include "routing.h" +#include "translation-table.h" +#include "soft-interface.h" +#include "hard-interface.h" +#include "types.h" +#include "vis.h" +#include "aggregation.h" + + +static void send_outstanding_bcast_packet(struct work_struct *work); + +/* apply hop penalty for a normal link */ +static uint8_t hop_penalty(const uint8_t tq) +{ + return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE); +} + +/* when do we schedule our own packet to be sent */ +static unsigned long own_send_time(struct bat_priv *bat_priv) +{ + return jiffies + msecs_to_jiffies( + atomic_read(&bat_priv->orig_interval) - + JITTER + (random32() % 2*JITTER)); +} + +/* when do we schedule a forwarded packet to be sent */ +static unsigned long forward_send_time(struct bat_priv *bat_priv) +{ + return jiffies + msecs_to_jiffies(random32() % (JITTER/2)); +} + +/* send out an already prepared packet to the given address via the + * specified batman interface */ +int send_skb_packet(struct sk_buff *skb, + struct batman_if *batman_if, + uint8_t *dst_addr) +{ + struct ethhdr *ethhdr; + + if (batman_if->if_status != IF_ACTIVE) + goto send_skb_err; + + if (unlikely(!batman_if->net_dev)) + goto send_skb_err; + + if (!(batman_if->net_dev->flags & IFF_UP)) { + pr_warning("Interface %s is not up - can't send packet via " + "that interface!\n", batman_if->net_dev->name); + goto send_skb_err; + } + + /* push to the ethernet header. */ + if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0) + goto send_skb_err; + + skb_reset_mac_header(skb); + + ethhdr = (struct ethhdr *) skb_mac_header(skb); + memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN); + memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); + ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); + + skb_set_network_header(skb, ETH_HLEN); + skb->priority = TC_PRIO_CONTROL; + skb->protocol = __constant_htons(ETH_P_BATMAN); + + skb->dev = batman_if->net_dev; + + /* dev_queue_xmit() returns a negative result on error. However on + * congestion and traffic shaping, it drops and returns NET_XMIT_DROP + * (which is > 0). This will not be treated as an error. */ + + return dev_queue_xmit(skb); +send_skb_err: + kfree_skb(skb); + return NET_XMIT_DROP; +} + +/* Send a packet to a given interface */ +static void send_packet_to_if(struct forw_packet *forw_packet, + struct batman_if *batman_if) +{ + struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); + char *fwd_str; + uint8_t packet_num; + int16_t buff_pos; + struct batman_packet *batman_packet; + struct sk_buff *skb; + + if (batman_if->if_status != IF_ACTIVE) + return; + + packet_num = 0; + buff_pos = 0; + batman_packet = (struct batman_packet *)forw_packet->skb->data; + + /* adjust all flags and log packets */ + while (aggregated_packet(buff_pos, + forw_packet->packet_len, + batman_packet->num_hna)) { + + /* we might have aggregated direct link packets with an + * ordinary base packet */ + if ((forw_packet->direct_link_flags & (1 << packet_num)) && + (forw_packet->if_incoming == batman_if)) + batman_packet->flags |= DIRECTLINK; + else + batman_packet->flags &= ~DIRECTLINK; + + fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ? + "Sending own" : + "Forwarding")); + bat_dbg(DBG_BATMAN, bat_priv, + "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d," + " IDF %s) on interface %s [%s]\n", + fwd_str, (packet_num > 0 ? "aggregated " : ""), + batman_packet->orig, ntohl(batman_packet->seqno), + batman_packet->tq, batman_packet->ttl, + (batman_packet->flags & DIRECTLINK ? + "on" : "off"), + batman_if->net_dev->name, batman_if->addr_str); + + buff_pos += sizeof(struct batman_packet) + + (batman_packet->num_hna * ETH_ALEN); + packet_num++; + batman_packet = (struct batman_packet *) + (forw_packet->skb->data + buff_pos); + } + + /* create clone because function is called more than once */ + skb = skb_clone(forw_packet->skb, GFP_ATOMIC); + if (skb) + send_skb_packet(skb, batman_if, broadcast_addr); +} + +/* send a batman packet */ +static void send_packet(struct forw_packet *forw_packet) +{ + struct batman_if *batman_if; - struct net_device *soft_iface = forw_packet->if_incoming->soft_iface; - struct bat_priv *bat_priv = netdev_priv(soft_iface); ++ struct net_device *soft_iface; ++ struct bat_priv *bat_priv; + struct batman_packet *batman_packet = + (struct batman_packet *)(forw_packet->skb->data); + unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0); + + if (!forw_packet->if_incoming) { + pr_err("Error - can't forward packet: incoming iface not " + "specified\n"); + return; + } + ++ soft_iface = forw_packet->if_incoming->soft_iface; ++ bat_priv = netdev_priv(soft_iface); ++ + if (forw_packet->if_incoming->if_status != IF_ACTIVE) + return; + + /* multihomed peer assumed */ + /* non-primary OGMs are only broadcasted on their interface */ + if ((directlink && (batman_packet->ttl == 1)) || + (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) { + + /* FIXME: what about aggregated packets ? */ + bat_dbg(DBG_BATMAN, bat_priv, + "%s packet (originator %pM, seqno %d, TTL %d) " + "on interface %s [%s]\n", + (forw_packet->own ? "Sending own" : "Forwarding"), + batman_packet->orig, ntohl(batman_packet->seqno), + batman_packet->ttl, + forw_packet->if_incoming->net_dev->name, + forw_packet->if_incoming->addr_str); + + /* skb is only used once and than forw_packet is free'd */ + send_skb_packet(forw_packet->skb, forw_packet->if_incoming, + broadcast_addr); + forw_packet->skb = NULL; + + return; + } + + /* broadcast on every interface */ + rcu_read_lock(); + list_for_each_entry_rcu(batman_if, &if_list, list) { + if (batman_if->soft_iface != soft_iface) + continue; + + send_packet_to_if(forw_packet, batman_if); + } + rcu_read_unlock(); +} + +static void rebuild_batman_packet(struct bat_priv *bat_priv, + struct batman_if *batman_if) +{ + int new_len; + unsigned char *new_buff; + struct batman_packet *batman_packet; + + new_len = sizeof(struct batman_packet) + + (bat_priv->num_local_hna * ETH_ALEN); + new_buff = kmalloc(new_len, GFP_ATOMIC); + + /* keep old buffer if kmalloc should fail */ + if (new_buff) { + memcpy(new_buff, batman_if->packet_buff, + sizeof(struct batman_packet)); + batman_packet = (struct batman_packet *)new_buff; + + batman_packet->num_hna = hna_local_fill_buffer(bat_priv, + new_buff + sizeof(struct batman_packet), + new_len - sizeof(struct batman_packet)); + + kfree(batman_if->packet_buff); + batman_if->packet_buff = new_buff; + batman_if->packet_len = new_len; + } +} + +void schedule_own_packet(struct batman_if *batman_if) +{ + struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); + unsigned long send_time; + struct batman_packet *batman_packet; + int vis_server; + + if ((batman_if->if_status == IF_NOT_IN_USE) || + (batman_if->if_status == IF_TO_BE_REMOVED)) + return; + + vis_server = atomic_read(&bat_priv->vis_mode); + + /** + * the interface gets activated here to avoid race conditions between + * the moment of activating the interface in + * hardif_activate_interface() where the originator mac is set and + * outdated packets (especially uninitialized mac addresses) in the + * packet queue + */ + if (batman_if->if_status == IF_TO_BE_ACTIVATED) + batman_if->if_status = IF_ACTIVE; + + /* if local hna has changed and interface is a primary interface */ + if ((atomic_read(&bat_priv->hna_local_changed)) && + (batman_if == bat_priv->primary_if)) + rebuild_batman_packet(bat_priv, batman_if); + + /** + * NOTE: packet_buff might just have been re-allocated in + * rebuild_batman_packet() + */ + batman_packet = (struct batman_packet *)batman_if->packet_buff; + + /* change sequence number to network order */ + batman_packet->seqno = + htonl((uint32_t)atomic_read(&batman_if->seqno)); + + if (vis_server == VIS_TYPE_SERVER_SYNC) + batman_packet->flags |= VIS_SERVER; + else + batman_packet->flags &= ~VIS_SERVER; + + atomic_inc(&batman_if->seqno); + + slide_own_bcast_window(batman_if); + send_time = own_send_time(bat_priv); + add_bat_packet_to_list(bat_priv, + batman_if->packet_buff, + batman_if->packet_len, + batman_if, 1, send_time); +} + +void schedule_forward_packet(struct orig_node *orig_node, + struct ethhdr *ethhdr, + struct batman_packet *batman_packet, + uint8_t directlink, int hna_buff_len, + struct batman_if *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + unsigned char in_tq, in_ttl, tq_avg = 0; + unsigned long send_time; + + if (batman_packet->ttl <= 1) { + bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); + return; + } + + in_tq = batman_packet->tq; + in_ttl = batman_packet->ttl; + + batman_packet->ttl--; + memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN); + + /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast + * of our best tq value */ + if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { + + /* rebroadcast ogm of best ranking neighbor as is */ + if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) { + batman_packet->tq = orig_node->router->tq_avg; + + if (orig_node->router->last_ttl) + batman_packet->ttl = orig_node->router->last_ttl + - 1; + } + + tq_avg = orig_node->router->tq_avg; + } + + /* apply hop penalty */ + batman_packet->tq = hop_penalty(batman_packet->tq); + + bat_dbg(DBG_BATMAN, bat_priv, + "Forwarding packet: tq_orig: %i, tq_avg: %i, " + "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n", + in_tq, tq_avg, batman_packet->tq, in_ttl - 1, + batman_packet->ttl); + + batman_packet->seqno = htonl(batman_packet->seqno); + + /* switch of primaries first hop flag when forwarding */ + batman_packet->flags &= ~PRIMARIES_FIRST_HOP; + if (directlink) + batman_packet->flags |= DIRECTLINK; + else + batman_packet->flags &= ~DIRECTLINK; + + send_time = forward_send_time(bat_priv); + add_bat_packet_to_list(bat_priv, + (unsigned char *)batman_packet, + sizeof(struct batman_packet) + hna_buff_len, + if_incoming, 0, send_time); +} + +static void forw_packet_free(struct forw_packet *forw_packet) +{ + if (forw_packet->skb) + kfree_skb(forw_packet->skb); + kfree(forw_packet); +} + +static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, + struct forw_packet *forw_packet, + unsigned long send_time) +{ + unsigned long flags; + INIT_HLIST_NODE(&forw_packet->list); + + /* add new packet to packet list */ + spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags); + hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list); + spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags); + + /* start timer for this packet */ + INIT_DELAYED_WORK(&forw_packet->delayed_work, + send_outstanding_bcast_packet); + queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work, + send_time); +} + +#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) +/* add a broadcast packet to the queue and setup timers. broadcast packets + * are sent multiple times to increase probability for beeing received. + * + * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on + * errors. + * + * The skb is not consumed, so the caller should make sure that the + * skb is freed. */ +int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) +{ + struct forw_packet *forw_packet; + struct bcast_packet *bcast_packet; + + if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { + bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); + goto out; + } + + if (!bat_priv->primary_if) + goto out; + + forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); + + if (!forw_packet) + goto out_and_inc; + + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) + goto packet_free; + + /* as we have a copy now, it is safe to decrease the TTL */ + bcast_packet = (struct bcast_packet *)skb->data; + bcast_packet->ttl--; + + skb_reset_mac_header(skb); + + forw_packet->skb = skb; + forw_packet->if_incoming = bat_priv->primary_if; + + /* how often did we send the bcast packet ? */ + forw_packet->num_packets = 0; + + _add_bcast_packet_to_list(bat_priv, forw_packet, 1); + return NETDEV_TX_OK; + +packet_free: + kfree(forw_packet); +out_and_inc: + atomic_inc(&bat_priv->bcast_queue_left); +out: + return NETDEV_TX_BUSY; +} + +static void send_outstanding_bcast_packet(struct work_struct *work) +{ + struct batman_if *batman_if; + struct delayed_work *delayed_work = + container_of(work, struct delayed_work, work); + struct forw_packet *forw_packet = + container_of(delayed_work, struct forw_packet, delayed_work); + unsigned long flags; + struct sk_buff *skb1; + struct net_device *soft_iface = forw_packet->if_incoming->soft_iface; + struct bat_priv *bat_priv = netdev_priv(soft_iface); + + spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags); + hlist_del(&forw_packet->list); + spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags); + + if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) + goto out; + + /* rebroadcast packet */ + rcu_read_lock(); + list_for_each_entry_rcu(batman_if, &if_list, list) { + if (batman_if->soft_iface != soft_iface) + continue; + + /* send a copy of the saved skb */ + skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); + if (skb1) + send_skb_packet(skb1, batman_if, broadcast_addr); + } + rcu_read_unlock(); + + forw_packet->num_packets++; + + /* if we still have some more bcasts to send */ + if (forw_packet->num_packets < 3) { + _add_bcast_packet_to_list(bat_priv, forw_packet, + ((5 * HZ) / 1000)); + return; + } + +out: + forw_packet_free(forw_packet); + atomic_inc(&bat_priv->bcast_queue_left); +} + +void send_outstanding_bat_packet(struct work_struct *work) +{ + struct delayed_work *delayed_work = + container_of(work, struct delayed_work, work); + struct forw_packet *forw_packet = + container_of(delayed_work, struct forw_packet, delayed_work); + unsigned long flags; + struct bat_priv *bat_priv; + + bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); + spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags); + hlist_del(&forw_packet->list); + spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags); + + if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) + goto out; + + send_packet(forw_packet); + + /** + * we have to have at least one packet in the queue + * to determine the queues wake up time unless we are + * shutting down + */ + if (forw_packet->own) + schedule_own_packet(forw_packet->if_incoming); + +out: + /* don't count own packet */ + if (!forw_packet->own) + atomic_inc(&bat_priv->batman_queue_left); + + forw_packet_free(forw_packet); +} + +void purge_outstanding_packets(struct bat_priv *bat_priv, + struct batman_if *batman_if) +{ + struct forw_packet *forw_packet; + struct hlist_node *tmp_node, *safe_tmp_node; + unsigned long flags; + + if (batman_if) + bat_dbg(DBG_BATMAN, bat_priv, + "purge_outstanding_packets(): %s\n", + batman_if->net_dev->name); + else + bat_dbg(DBG_BATMAN, bat_priv, + "purge_outstanding_packets()\n"); + + /* free bcast list */ + spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags); + hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, + &bat_priv->forw_bcast_list, list) { + + /** + * if purge_outstanding_packets() was called with an argmument + * we delete only packets belonging to the given interface + */ + if ((batman_if) && + (forw_packet->if_incoming != batman_if)) + continue; + + spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags); + + /** + * send_outstanding_bcast_packet() will lock the list to + * delete the item from the list + */ + cancel_delayed_work_sync(&forw_packet->delayed_work); + spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags); + } + spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags); + + /* free batman packet list */ + spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags); + hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, + &bat_priv->forw_bat_list, list) { + + /** + * if purge_outstanding_packets() was called with an argmument + * we delete only packets belonging to the given interface + */ + if ((batman_if) && + (forw_packet->if_incoming != batman_if)) + continue; + + spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags); + + /** + * send_outstanding_bat_packet() will lock the list to + * delete the item from the list + */ + cancel_delayed_work_sync(&forw_packet->delayed_work); + spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags); + } + spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags); +} diff --combined drivers/staging/batman-adv/sysfs-class-net-mesh index 5aa1912,b4cdb60..b4cdb60 --- a/drivers/staging/batman-adv/sysfs-class-net-mesh +++ b/drivers/staging/batman-adv/sysfs-class-net-mesh @@@ -14,6 -14,14 +14,14 @@@ Description mesh will be sent using multiple interfaces at the same time (if available).
+ What: /sys/class/net/<mesh_iface>/mesh/fragmentation + Date: October 2010 + Contact: Andreas Langer an.langer@gmx.de + Description: + Indicates whether the data traffic going through the + mesh will be fragmented or silently discarded if the + packet size exceeds the outgoing interface MTU. + What: /sys/class/net/<mesh_iface>/mesh/orig_interval Date: May 2010 Contact: Marek Lindner lindner_marek@yahoo.de diff --combined drivers/staging/batman-adv/translation-table.c index 12b2325,0000000..681ccbd mode 100644,000000..100644 --- a/drivers/staging/batman-adv/translation-table.c +++ b/drivers/staging/batman-adv/translation-table.c @@@ -1,513 -1,0 +1,518 @@@ +/* + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "translation-table.h" +#include "soft-interface.h" +#include "types.h" +#include "hash.h" + +static void hna_local_purge(struct work_struct *work); +static void _hna_global_del_orig(struct bat_priv *bat_priv, + struct hna_global_entry *hna_global_entry, + char *message); + +static void hna_local_start_timer(struct bat_priv *bat_priv) +{ + INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge); + queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ); +} + +int hna_local_init(struct bat_priv *bat_priv) +{ + if (bat_priv->hna_local_hash) + return 1; + + bat_priv->hna_local_hash = hash_new(128, compare_orig, choose_orig); + + if (!bat_priv->hna_local_hash) + return 0; + + atomic_set(&bat_priv->hna_local_changed, 0); + hna_local_start_timer(bat_priv); + + return 1; +} + +void hna_local_add(struct net_device *soft_iface, uint8_t *addr) +{ + struct bat_priv *bat_priv = netdev_priv(soft_iface); + struct hna_local_entry *hna_local_entry; + struct hna_global_entry *hna_global_entry; + struct hashtable_t *swaphash; + unsigned long flags; ++ int required_bytes; + + spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags); + hna_local_entry = + ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash, + addr)); + spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags); + + if (hna_local_entry) { + hna_local_entry->last_seen = jiffies; + return; + } + + /* only announce as many hosts as possible in the batman-packet and + space in batman_packet->num_hna That also should give a limit to + MAC-flooding. */ - if ((bat_priv->num_local_hna + 1 > (ETH_DATA_LEN - BAT_PACKET_LEN) - / ETH_ALEN) || ++ required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN; ++ required_bytes += BAT_PACKET_LEN; ++ ++ if ((required_bytes > ETH_DATA_LEN) || ++ (atomic_read(&bat_priv->aggregation_enabled) && ++ required_bytes > MAX_AGGREGATION_BYTES) || + (bat_priv->num_local_hna + 1 > 255)) { + bat_dbg(DBG_ROUTES, bat_priv, + "Can't add new local hna entry (%pM): " + "number of local hna entries exceeds packet size\n", + addr); + return; + } + + bat_dbg(DBG_ROUTES, bat_priv, + "Creating new local hna entry: %pM\n", addr); + + hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC); + if (!hna_local_entry) + return; + + memcpy(hna_local_entry->addr, addr, ETH_ALEN); + hna_local_entry->last_seen = jiffies; + + /* the batman interface mac address should never be purged */ + if (compare_orig(addr, soft_iface->dev_addr)) + hna_local_entry->never_purge = 1; + else + hna_local_entry->never_purge = 0; + + spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags); + + hash_add(bat_priv->hna_local_hash, hna_local_entry); + bat_priv->num_local_hna++; + atomic_set(&bat_priv->hna_local_changed, 1); + + if (bat_priv->hna_local_hash->elements * 4 > + bat_priv->hna_local_hash->size) { + swaphash = hash_resize(bat_priv->hna_local_hash, + bat_priv->hna_local_hash->size * 2); + + if (!swaphash) + pr_err("Couldn't resize local hna hash table\n"); + else + bat_priv->hna_local_hash = swaphash; + } + + spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags); + + /* remove address from global hash if present */ + spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags); + + hna_global_entry = ((struct hna_global_entry *) + hash_find(bat_priv->hna_global_hash, addr)); + + if (hna_global_entry) + _hna_global_del_orig(bat_priv, hna_global_entry, + "local hna received"); + + spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags); +} + +int hna_local_fill_buffer(struct bat_priv *bat_priv, + unsigned char *buff, int buff_len) +{ + struct hna_local_entry *hna_local_entry; + HASHIT(hashit); + int i = 0; + unsigned long flags; + + spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags); + + while (hash_iterate(bat_priv->hna_local_hash, &hashit)) { + + if (buff_len < (i + 1) * ETH_ALEN) + break; + + hna_local_entry = hashit.bucket->data; + memcpy(buff + (i * ETH_ALEN), hna_local_entry->addr, ETH_ALEN); + + i++; + } + + /* if we did not get all new local hnas see you next time ;-) */ + if (i == bat_priv->num_local_hna) + atomic_set(&bat_priv->hna_local_changed, 0); + + spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags); + return i; +} + +int hna_local_seq_print_text(struct seq_file *seq, void *offset) +{ + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct hna_local_entry *hna_local_entry; + HASHIT(hashit); + HASHIT(hashit_count); + unsigned long flags; + size_t buf_size, pos; + char *buff; + + if (!bat_priv->primary_if) { + return seq_printf(seq, "BATMAN mesh %s disabled - " + "please specify interfaces to enable it\n", + net_dev->name); + } + + seq_printf(seq, "Locally retrieved addresses (from %s) " + "announced via HNA:\n", + net_dev->name); + + spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags); + + buf_size = 1; + /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ + while (hash_iterate(bat_priv->hna_local_hash, &hashit_count)) + buf_size += 21; + + buff = kmalloc(buf_size, GFP_ATOMIC); + if (!buff) { + spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags); + return -ENOMEM; + } + buff[0] = '\0'; + pos = 0; + + while (hash_iterate(bat_priv->hna_local_hash, &hashit)) { + hna_local_entry = hashit.bucket->data; + + pos += snprintf(buff + pos, 22, " * %pM\n", + hna_local_entry->addr); + } + + spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags); + + seq_printf(seq, "%s", buff); + kfree(buff); + return 0; +} + +static void _hna_local_del(void *data, void *arg) +{ + struct bat_priv *bat_priv = (struct bat_priv *)arg; + + kfree(data); + bat_priv->num_local_hna--; + atomic_set(&bat_priv->hna_local_changed, 1); +} + +static void hna_local_del(struct bat_priv *bat_priv, + struct hna_local_entry *hna_local_entry, + char *message) +{ + bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n", + hna_local_entry->addr, message); + + hash_remove(bat_priv->hna_local_hash, hna_local_entry->addr); + _hna_local_del(hna_local_entry, bat_priv); +} + +void hna_local_remove(struct bat_priv *bat_priv, + uint8_t *addr, char *message) +{ + struct hna_local_entry *hna_local_entry; + unsigned long flags; + + spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags); + + hna_local_entry = (struct hna_local_entry *) + hash_find(bat_priv->hna_local_hash, addr); + if (hna_local_entry) + hna_local_del(bat_priv, hna_local_entry, message); + + spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags); +} + +static void hna_local_purge(struct work_struct *work) +{ + struct delayed_work *delayed_work = + container_of(work, struct delayed_work, work); + struct bat_priv *bat_priv = + container_of(delayed_work, struct bat_priv, hna_work); + struct hna_local_entry *hna_local_entry; + HASHIT(hashit); + unsigned long flags; + unsigned long timeout; + + spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags); + + while (hash_iterate(bat_priv->hna_local_hash, &hashit)) { + hna_local_entry = hashit.bucket->data; + + timeout = hna_local_entry->last_seen + LOCAL_HNA_TIMEOUT * HZ; + + if ((!hna_local_entry->never_purge) && + time_after(jiffies, timeout)) + hna_local_del(bat_priv, hna_local_entry, + "address timed out"); + } + + spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags); + hna_local_start_timer(bat_priv); +} + +void hna_local_free(struct bat_priv *bat_priv) +{ + if (!bat_priv->hna_local_hash) + return; + + cancel_delayed_work_sync(&bat_priv->hna_work); + hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv); + bat_priv->hna_local_hash = NULL; +} + +int hna_global_init(struct bat_priv *bat_priv) +{ + if (bat_priv->hna_global_hash) + return 1; + + bat_priv->hna_global_hash = hash_new(128, compare_orig, choose_orig); + + if (!bat_priv->hna_global_hash) + return 0; + + return 1; +} + +void hna_global_add_orig(struct bat_priv *bat_priv, + struct orig_node *orig_node, + unsigned char *hna_buff, int hna_buff_len) +{ + struct hna_global_entry *hna_global_entry; + struct hna_local_entry *hna_local_entry; + struct hashtable_t *swaphash; + int hna_buff_count = 0; + unsigned long flags; + unsigned char *hna_ptr; + + while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) { + spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags); + + hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); + hna_global_entry = (struct hna_global_entry *) + hash_find(bat_priv->hna_global_hash, hna_ptr); + + if (!hna_global_entry) { + spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, + flags); + + hna_global_entry = + kmalloc(sizeof(struct hna_global_entry), + GFP_ATOMIC); + + if (!hna_global_entry) + break; + + memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN); + + bat_dbg(DBG_ROUTES, bat_priv, + "Creating new global hna entry: " + "%pM (via %pM)\n", + hna_global_entry->addr, orig_node->orig); + + spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags); + hash_add(bat_priv->hna_global_hash, hna_global_entry); + + } + + hna_global_entry->orig_node = orig_node; + spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags); + + /* remove address from local hash if present */ + spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags); + + hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); + hna_local_entry = (struct hna_local_entry *) + hash_find(bat_priv->hna_local_hash, hna_ptr); + + if (hna_local_entry) + hna_local_del(bat_priv, hna_local_entry, + "global hna received"); + + spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags); + + hna_buff_count++; + } + + /* initialize, and overwrite if malloc succeeds */ + orig_node->hna_buff = NULL; + orig_node->hna_buff_len = 0; + + if (hna_buff_len > 0) { + orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC); + if (orig_node->hna_buff) { + memcpy(orig_node->hna_buff, hna_buff, hna_buff_len); + orig_node->hna_buff_len = hna_buff_len; + } + } + + spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags); + + if (bat_priv->hna_global_hash->elements * 4 > + bat_priv->hna_global_hash->size) { + swaphash = hash_resize(bat_priv->hna_global_hash, + bat_priv->hna_global_hash->size * 2); + + if (!swaphash) + pr_err("Couldn't resize global hna hash table\n"); + else + bat_priv->hna_global_hash = swaphash; + } + + spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags); +} + +int hna_global_seq_print_text(struct seq_file *seq, void *offset) +{ + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct hna_global_entry *hna_global_entry; + HASHIT(hashit); + HASHIT(hashit_count); + unsigned long flags; + size_t buf_size, pos; + char *buff; + + if (!bat_priv->primary_if) { + return seq_printf(seq, "BATMAN mesh %s disabled - " + "please specify interfaces to enable it\n", + net_dev->name); + } + + seq_printf(seq, "Globally announced HNAs received via the mesh %s\n", + net_dev->name); + + spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags); + + buf_size = 1; + /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/ + while (hash_iterate(bat_priv->hna_global_hash, &hashit_count)) + buf_size += 43; + + buff = kmalloc(buf_size, GFP_ATOMIC); + if (!buff) { + spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags); + return -ENOMEM; + } + buff[0] = '\0'; + pos = 0; + + while (hash_iterate(bat_priv->hna_global_hash, &hashit)) { + hna_global_entry = hashit.bucket->data; + + pos += snprintf(buff + pos, 44, + " * %pM via %pM\n", hna_global_entry->addr, + hna_global_entry->orig_node->orig); + } + + spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags); + + seq_printf(seq, "%s", buff); + kfree(buff); + return 0; +} + +static void _hna_global_del_orig(struct bat_priv *bat_priv, + struct hna_global_entry *hna_global_entry, + char *message) +{ + bat_dbg(DBG_ROUTES, bat_priv, + "Deleting global hna entry %pM (via %pM): %s\n", + hna_global_entry->addr, hna_global_entry->orig_node->orig, + message); + + hash_remove(bat_priv->hna_global_hash, hna_global_entry->addr); + kfree(hna_global_entry); +} + +void hna_global_del_orig(struct bat_priv *bat_priv, + struct orig_node *orig_node, char *message) +{ + struct hna_global_entry *hna_global_entry; + int hna_buff_count = 0; + unsigned long flags; + unsigned char *hna_ptr; + + if (orig_node->hna_buff_len == 0) + return; + + spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags); + + while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) { + hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN); + hna_global_entry = (struct hna_global_entry *) + hash_find(bat_priv->hna_global_hash, hna_ptr); + + if ((hna_global_entry) && + (hna_global_entry->orig_node == orig_node)) + _hna_global_del_orig(bat_priv, hna_global_entry, + message); + + hna_buff_count++; + } + + spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags); + + orig_node->hna_buff_len = 0; + kfree(orig_node->hna_buff); + orig_node->hna_buff = NULL; +} + +static void hna_global_del(void *data, void *arg) +{ + kfree(data); +} + +void hna_global_free(struct bat_priv *bat_priv) +{ + if (!bat_priv->hna_global_hash) + return; + + hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL); + bat_priv->hna_global_hash = NULL; +} + +struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr) +{ + struct hna_global_entry *hna_global_entry; + unsigned long flags; + + spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags); + hna_global_entry = (struct hna_global_entry *) + hash_find(bat_priv->hna_global_hash, addr); + spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags); + + if (!hna_global_entry) + return NULL; + + return hna_global_entry->orig_node; +}