The following commit has been merged in the linux branch: commit 115cecbe78e852c8ee481c9bfdd1413d3b98ee55 Merge: 137f96870c4736bb7a9a27b1851ab9341f6c6758 73d9024865a95e4bbbce2c59b645236326262ced Author: Sven Eckelmann sven.eckelmann@gmx.de Date: Thu Nov 4 22:27:57 2010 +0100
Merge remote branch 'origin/next' into linux
Conflicts: drivers/staging/batman-adv/bat_printk.c drivers/staging/batman-adv/compat.h
diff --combined drivers/staging/batman-adv/hard-interface.c index 80cfa86,0000000..b68a7e5 mode 100644,000000..100644 --- a/drivers/staging/batman-adv/hard-interface.c +++ b/drivers/staging/batman-adv/hard-interface.c @@@ -1,635 -1,0 +1,640 @@@ +/* + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "hard-interface.h" +#include "soft-interface.h" +#include "send.h" +#include "translation-table.h" +#include "routing.h" +#include "bat_sysfs.h" +#include "originator.h" +#include "hash.h" + +#include <linux/if_arp.h> + +#define MIN(x, y) ((x) < (y) ? (x) : (y)) + +/* protect update critical side of if_list - but not the content */ +static DEFINE_SPINLOCK(if_list_lock); + +struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev) +{ + struct batman_if *batman_if; + + rcu_read_lock(); + list_for_each_entry_rcu(batman_if, &if_list, list) { + if (batman_if->net_dev == net_dev) + goto out; + } + + batman_if = NULL; + +out: + if (batman_if) + hardif_hold(batman_if); + + rcu_read_unlock(); + return batman_if; +} + +static int is_valid_iface(struct net_device *net_dev) +{ + if (net_dev->flags & IFF_LOOPBACK) + return 0; + + if (net_dev->type != ARPHRD_ETHER) + return 0; + + if (net_dev->addr_len != ETH_ALEN) + return 0; + + /* no batman over batman */ +#ifdef HAVE_NET_DEVICE_OPS + if (net_dev->netdev_ops->ndo_start_xmit == interface_tx) + return 0; +#else + if (net_dev->hard_start_xmit == interface_tx) + return 0; +#endif + + /* Device is being bridged */ + /* if (net_dev->priv_flags & IFF_BRIDGE_PORT) + return 0; */ + + return 1; +} + +static struct batman_if *get_active_batman_if(struct net_device *soft_iface) +{ + struct batman_if *batman_if; + + rcu_read_lock(); + list_for_each_entry_rcu(batman_if, &if_list, list) { + if (batman_if->soft_iface != soft_iface) + continue; + + if (batman_if->if_status == IF_ACTIVE) + goto out; + } + + batman_if = NULL; + +out: + if (batman_if) + hardif_hold(batman_if); + + rcu_read_unlock(); + return batman_if; +} + +static void update_primary_addr(struct bat_priv *bat_priv) +{ + struct vis_packet *vis_packet; + + vis_packet = (struct vis_packet *) + bat_priv->my_vis_info->skb_packet->data; + memcpy(vis_packet->vis_orig, + bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); + memcpy(vis_packet->sender_orig, + bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); +} + +static void set_primary_if(struct bat_priv *bat_priv, + struct batman_if *batman_if) +{ + struct batman_packet *batman_packet; + struct batman_if *old_if; + + if (batman_if) + hardif_hold(batman_if); + + old_if = bat_priv->primary_if; + bat_priv->primary_if = batman_if; + + if (old_if) + hardif_put(old_if); + + if (!bat_priv->primary_if) + return; + + batman_packet = (struct batman_packet *)(batman_if->packet_buff); + batman_packet->flags = PRIMARIES_FIRST_HOP; + batman_packet->ttl = TTL; + + update_primary_addr(bat_priv); + + /*** + * hacky trick to make sure that we send the HNA information via + * our new primary interface + */ + atomic_set(&bat_priv->hna_local_changed, 1); +} + +static bool hardif_is_iface_up(struct batman_if *batman_if) +{ + if (batman_if->net_dev->flags & IFF_UP) + return true; + + return false; +} + +static void update_mac_addresses(struct batman_if *batman_if) +{ + memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, + batman_if->net_dev->dev_addr, ETH_ALEN); + memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender, + batman_if->net_dev->dev_addr, ETH_ALEN); +} + - static void check_known_mac_addr(uint8_t *addr) ++static void check_known_mac_addr(struct net_device *net_dev) +{ + struct batman_if *batman_if; + + rcu_read_lock(); + list_for_each_entry_rcu(batman_if, &if_list, list) { + if ((batman_if->if_status != IF_ACTIVE) && + (batman_if->if_status != IF_TO_BE_ACTIVATED)) + continue; + - if (!compare_orig(batman_if->net_dev->dev_addr, addr)) ++ if (batman_if->net_dev == net_dev) ++ continue; ++ ++ if (!compare_orig(batman_if->net_dev->dev_addr, ++ net_dev->dev_addr)) + continue; + + pr_warning("The newly added mac address (%pM) already exists " - "on: %s\n", addr, batman_if->net_dev->name); ++ "on: %s\n", net_dev->dev_addr, ++ batman_if->net_dev->name); + pr_warning("It is strongly recommended to keep mac addresses " + "unique to avoid problems!\n"); + } + rcu_read_unlock(); +} + +int hardif_min_mtu(struct net_device *soft_iface) +{ + struct bat_priv *bat_priv = netdev_priv(soft_iface); + struct batman_if *batman_if; + /* allow big frames if all devices are capable to do so + * (have MTU > 1500 + BAT_HEADER_LEN) */ + int min_mtu = ETH_DATA_LEN; + + if (atomic_read(&bat_priv->frag_enabled)) + goto out; + + rcu_read_lock(); + list_for_each_entry_rcu(batman_if, &if_list, list) { + if ((batman_if->if_status != IF_ACTIVE) && + (batman_if->if_status != IF_TO_BE_ACTIVATED)) + continue; + + if (batman_if->soft_iface != soft_iface) + continue; + + min_mtu = MIN(batman_if->net_dev->mtu - BAT_HEADER_LEN, + min_mtu); + } + rcu_read_unlock(); +out: + return min_mtu; +} + +/* adjusts the MTU if a new interface with a smaller MTU appeared. */ +void update_min_mtu(struct net_device *soft_iface) +{ + int min_mtu; + + min_mtu = hardif_min_mtu(soft_iface); + if (soft_iface->mtu != min_mtu) + soft_iface->mtu = min_mtu; +} + +static void hardif_activate_interface(struct batman_if *batman_if) +{ + struct bat_priv *bat_priv; + + if (batman_if->if_status != IF_INACTIVE) + return; + + bat_priv = netdev_priv(batman_if->soft_iface); + + update_mac_addresses(batman_if); + batman_if->if_status = IF_TO_BE_ACTIVATED; + + /** + * the first active interface becomes our primary interface or + * the next active interface after the old primay interface was removed + */ + if (!bat_priv->primary_if) + set_primary_if(bat_priv, batman_if); + + bat_info(batman_if->soft_iface, "Interface activated: %s\n", + batman_if->net_dev->name); + + update_min_mtu(batman_if->soft_iface); + return; +} + +static void hardif_deactivate_interface(struct batman_if *batman_if) +{ + if ((batman_if->if_status != IF_ACTIVE) && + (batman_if->if_status != IF_TO_BE_ACTIVATED)) + return; + + batman_if->if_status = IF_INACTIVE; + + bat_info(batman_if->soft_iface, "Interface deactivated: %s\n", + batman_if->net_dev->name); + + update_min_mtu(batman_if->soft_iface); +} + +int hardif_enable_interface(struct batman_if *batman_if, char *iface_name) +{ + struct bat_priv *bat_priv; + struct batman_packet *batman_packet; + + if (batman_if->if_status != IF_NOT_IN_USE) + goto out; + + batman_if->soft_iface = dev_get_by_name(&init_net, iface_name); + + if (!batman_if->soft_iface) { + batman_if->soft_iface = softif_create(iface_name); + + if (!batman_if->soft_iface) + goto err; + + /* dev_get_by_name() increases the reference counter for us */ + dev_hold(batman_if->soft_iface); + } + + bat_priv = netdev_priv(batman_if->soft_iface); + batman_if->packet_len = BAT_PACKET_LEN; + batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC); + + if (!batman_if->packet_buff) { + bat_err(batman_if->soft_iface, "Can't add interface packet " + "(%s): out of memory\n", batman_if->net_dev->name); + goto err; + } + + batman_packet = (struct batman_packet *)(batman_if->packet_buff); + batman_packet->packet_type = BAT_PACKET; + batman_packet->version = COMPAT_VERSION; + batman_packet->flags = 0; + batman_packet->ttl = 2; + batman_packet->tq = TQ_MAX_VALUE; + batman_packet->num_hna = 0; + + batman_if->if_num = bat_priv->num_ifaces; + bat_priv->num_ifaces++; + batman_if->if_status = IF_INACTIVE; + orig_hash_add_if(batman_if, bat_priv->num_ifaces); + + batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); + batman_if->batman_adv_ptype.func = batman_skb_recv; + batman_if->batman_adv_ptype.dev = batman_if->net_dev; + hardif_hold(batman_if); + dev_add_pack(&batman_if->batman_adv_ptype); + + atomic_set(&batman_if->seqno, 1); + atomic_set(&batman_if->frag_seqno, 1); + bat_info(batman_if->soft_iface, "Adding interface: %s\n", + batman_if->net_dev->name); + + if (atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu < + ETH_DATA_LEN + BAT_HEADER_LEN) + bat_info(batman_if->soft_iface, + "The MTU of interface %s is too small (%i) to handle " + "the transport of batman-adv packets. Packets going " + "over this interface will be fragmented on layer2 " + "which could impact the performance. Setting the MTU " + "to %zi would solve the problem.\n", + batman_if->net_dev->name, batman_if->net_dev->mtu, + ETH_DATA_LEN + BAT_HEADER_LEN); + + if (!atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu < + ETH_DATA_LEN + BAT_HEADER_LEN) + bat_info(batman_if->soft_iface, + "The MTU of interface %s is too small (%i) to handle " + "the transport of batman-adv packets. If you experience" + " problems getting traffic through try increasing the " + "MTU to %zi.\n", + batman_if->net_dev->name, batman_if->net_dev->mtu, + ETH_DATA_LEN + BAT_HEADER_LEN); + + if (hardif_is_iface_up(batman_if)) + hardif_activate_interface(batman_if); + else + bat_err(batman_if->soft_iface, "Not using interface %s " + "(retrying later): interface not active\n", + batman_if->net_dev->name); + + /* begin scheduling originator messages on that interface */ + schedule_own_packet(batman_if); + +out: + return 0; + +err: + return -ENOMEM; +} + +void hardif_disable_interface(struct batman_if *batman_if) +{ + struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); + + if (batman_if->if_status == IF_ACTIVE) + hardif_deactivate_interface(batman_if); + + if (batman_if->if_status != IF_INACTIVE) + return; + + bat_info(batman_if->soft_iface, "Removing interface: %s\n", + batman_if->net_dev->name); + dev_remove_pack(&batman_if->batman_adv_ptype); + hardif_put(batman_if); + + bat_priv->num_ifaces--; + orig_hash_del_if(batman_if, bat_priv->num_ifaces); + + if (batman_if == bat_priv->primary_if) { + struct batman_if *new_if; + + new_if = get_active_batman_if(batman_if->soft_iface); + set_primary_if(bat_priv, new_if); + + if (new_if) + hardif_put(new_if); + } + + kfree(batman_if->packet_buff); + batman_if->packet_buff = NULL; + batman_if->if_status = IF_NOT_IN_USE; + + /* delete all references to this batman_if */ + purge_orig_ref(bat_priv); + purge_outstanding_packets(bat_priv, batman_if); + dev_put(batman_if->soft_iface); + + /* nobody uses this interface anymore */ + if (!bat_priv->num_ifaces) + softif_destroy(batman_if->soft_iface); + + batman_if->soft_iface = NULL; +} + +static struct batman_if *hardif_add_interface(struct net_device *net_dev) +{ + struct batman_if *batman_if; + int ret; + + ret = is_valid_iface(net_dev); + if (ret != 1) + goto out; + + dev_hold(net_dev); + + batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); + if (!batman_if) { + pr_err("Can't add interface (%s): out of memory\n", + net_dev->name); + goto release_dev; + } + + ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev); + if (ret) + goto free_if; + + batman_if->if_num = -1; + batman_if->net_dev = net_dev; + batman_if->soft_iface = NULL; + batman_if->if_status = IF_NOT_IN_USE; + INIT_LIST_HEAD(&batman_if->list); + atomic_set(&batman_if->refcnt, 0); + hardif_hold(batman_if); + - check_known_mac_addr(batman_if->net_dev->dev_addr); ++ check_known_mac_addr(batman_if->net_dev); + + spin_lock(&if_list_lock); + list_add_tail_rcu(&batman_if->list, &if_list); + spin_unlock(&if_list_lock); + + /* extra reference for return */ + hardif_hold(batman_if); + return batman_if; + +free_if: + kfree(batman_if); +release_dev: + dev_put(net_dev); +out: + return NULL; +} + +static void hardif_remove_interface(struct batman_if *batman_if) +{ + /* first deactivate interface */ + if (batman_if->if_status != IF_NOT_IN_USE) + hardif_disable_interface(batman_if); + + if (batman_if->if_status != IF_NOT_IN_USE) + return; + + batman_if->if_status = IF_TO_BE_REMOVED; + + /* caller must take if_list_lock */ + list_del_rcu(&batman_if->list); + synchronize_rcu(); + sysfs_del_hardif(&batman_if->hardif_obj); + hardif_put(batman_if); +} + +void hardif_remove_interfaces(void) +{ + struct batman_if *batman_if, *batman_if_tmp; + + rtnl_lock(); + spin_lock(&if_list_lock); + list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) { + hardif_remove_interface(batman_if); + } + spin_unlock(&if_list_lock); + rtnl_unlock(); +} + +static int hard_if_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *net_dev = (struct net_device *)ptr; + struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); + struct bat_priv *bat_priv; + + if (!batman_if && event == NETDEV_REGISTER) + batman_if = hardif_add_interface(net_dev); + + if (!batman_if) + goto out; + + switch (event) { + case NETDEV_UP: + hardif_activate_interface(batman_if); + break; + case NETDEV_GOING_DOWN: + case NETDEV_DOWN: + hardif_deactivate_interface(batman_if); + break; + case NETDEV_UNREGISTER: + spin_lock(&if_list_lock); + hardif_remove_interface(batman_if); + spin_unlock(&if_list_lock); + break; + case NETDEV_CHANGEMTU: + if (batman_if->soft_iface) + update_min_mtu(batman_if->soft_iface); + break; + case NETDEV_CHANGEADDR: + if (batman_if->if_status == IF_NOT_IN_USE) { + hardif_put(batman_if); + goto out; + } + - check_known_mac_addr(batman_if->net_dev->dev_addr); ++ check_known_mac_addr(batman_if->net_dev); + update_mac_addresses(batman_if); + + bat_priv = netdev_priv(batman_if->soft_iface); + if (batman_if == bat_priv->primary_if) + update_primary_addr(bat_priv); + break; + default: + break; + }; + hardif_put(batman_if); + +out: + return NOTIFY_DONE; +} + +/* receive a packet with the batman ethertype coming on a hard + * interface */ +int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev) +{ + struct bat_priv *bat_priv; + struct batman_packet *batman_packet; + struct batman_if *batman_if; + int ret; + + batman_if = container_of(ptype, struct batman_if, batman_adv_ptype); + skb = skb_share_check(skb, GFP_ATOMIC); + + /* skb was released by skb_share_check() */ + if (!skb) + goto err_out; + + /* packet should hold at least type and version */ + if (unlikely(!pskb_may_pull(skb, 2))) + goto err_free; + + /* expect a valid ethernet header here. */ + if (unlikely(skb->mac_len != sizeof(struct ethhdr) + || !skb_mac_header(skb))) + goto err_free; + + if (!batman_if->soft_iface) + goto err_free; + + bat_priv = netdev_priv(batman_if->soft_iface); + + if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) + goto err_free; + + /* discard frames on not active interfaces */ + if (batman_if->if_status != IF_ACTIVE) + goto err_free; + + batman_packet = (struct batman_packet *)skb->data; + + if (batman_packet->version != COMPAT_VERSION) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: incompatible batman version (%i)\n", + batman_packet->version); + goto err_free; + } + + /* all receive handlers return whether they received or reused + * the supplied skb. if not, we have to free the skb. */ + + switch (batman_packet->packet_type) { + /* batman originator packet */ + case BAT_PACKET: + ret = recv_bat_packet(skb, batman_if); + break; + + /* batman icmp packet */ + case BAT_ICMP: + ret = recv_icmp_packet(skb, batman_if); + break; + + /* unicast packet */ + case BAT_UNICAST: + ret = recv_unicast_packet(skb, batman_if); + break; + + /* fragmented unicast packet */ + case BAT_UNICAST_FRAG: + ret = recv_ucast_frag_packet(skb, batman_if); + break; + + /* broadcast packet */ + case BAT_BCAST: + ret = recv_bcast_packet(skb, batman_if); + break; + + /* vis packet */ + case BAT_VIS: + ret = recv_vis_packet(skb, batman_if); + break; + default: + ret = NET_RX_DROP; + } + + if (ret == NET_RX_DROP) + kfree_skb(skb); + + /* return NET_RX_SUCCESS in any case as we + * most probably dropped the packet for + * routing-logical reasons. */ + + return NET_RX_SUCCESS; + +err_free: + kfree_skb(skb); +err_out: + return NET_RX_DROP; +} + +struct notifier_block hard_if_notifier = { + .notifier_call = hard_if_event, +}; diff --combined drivers/staging/batman-adv/routing.c index 9010263,0000000..657b69e mode 100644,000000..100644 --- a/drivers/staging/batman-adv/routing.c +++ b/drivers/staging/batman-adv/routing.c @@@ -1,1393 -1,0 +1,1389 @@@ +/* + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "routing.h" +#include "send.h" +#include "hash.h" +#include "soft-interface.h" +#include "hard-interface.h" +#include "icmp_socket.h" +#include "translation-table.h" +#include "originator.h" +#include "types.h" +#include "ring_buffer.h" +#include "vis.h" +#include "aggregation.h" +#include "unicast.h" + +void slide_own_bcast_window(struct batman_if *batman_if) +{ + struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); + HASHIT(hashit); + struct orig_node *orig_node; + TYPE_OF_WORD *word; + unsigned long flags; + + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + + while (hash_iterate(bat_priv->orig_hash, &hashit)) { + orig_node = hashit.bucket->data; + word = &(orig_node->bcast_own[batman_if->if_num * NUM_WORDS]); + + bit_get_packet(bat_priv, word, 1, 0); + orig_node->bcast_own_sum[batman_if->if_num] = + bit_packet_count(word); + } + + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); +} + +static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node, + unsigned char *hna_buff, int hna_buff_len) +{ + if ((hna_buff_len != orig_node->hna_buff_len) || + ((hna_buff_len > 0) && + (orig_node->hna_buff_len > 0) && + (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) { + + if (orig_node->hna_buff_len > 0) + hna_global_del_orig(bat_priv, orig_node, + "originator changed hna"); + + if ((hna_buff_len > 0) && (hna_buff != NULL)) + hna_global_add_orig(bat_priv, orig_node, + hna_buff, hna_buff_len); + } +} + +static void update_route(struct bat_priv *bat_priv, + struct orig_node *orig_node, + struct neigh_node *neigh_node, + unsigned char *hna_buff, int hna_buff_len) +{ + /* route deleted */ + if ((orig_node->router != NULL) && (neigh_node == NULL)) { + + bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", + orig_node->orig); + hna_global_del_orig(bat_priv, orig_node, + "originator timed out"); + + /* route added */ + } else if ((orig_node->router == NULL) && (neigh_node != NULL)) { + + bat_dbg(DBG_ROUTES, bat_priv, + "Adding route towards: %pM (via %pM)\n", + orig_node->orig, neigh_node->addr); + hna_global_add_orig(bat_priv, orig_node, + hna_buff, hna_buff_len); + + /* route changed */ + } else { + bat_dbg(DBG_ROUTES, bat_priv, + "Changing route towards: %pM " + "(now via %pM - was via %pM)\n", + orig_node->orig, neigh_node->addr, + orig_node->router->addr); + } + + orig_node->router = neigh_node; +} + + +void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, + struct neigh_node *neigh_node, unsigned char *hna_buff, + int hna_buff_len) +{ + + if (orig_node == NULL) + return; + + if (orig_node->router != neigh_node) + update_route(bat_priv, orig_node, neigh_node, + hna_buff, hna_buff_len); + /* may be just HNA changed */ + else + update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len); +} + +static int is_bidirectional_neigh(struct orig_node *orig_node, + struct orig_node *orig_neigh_node, + struct batman_packet *batman_packet, + struct batman_if *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; + unsigned char total_count; + + if (orig_node == orig_neigh_node) { + list_for_each_entry(tmp_neigh_node, + &orig_node->neigh_list, + list) { + + if (compare_orig(tmp_neigh_node->addr, + orig_neigh_node->orig) && + (tmp_neigh_node->if_incoming == if_incoming)) + neigh_node = tmp_neigh_node; + } + + if (!neigh_node) + neigh_node = create_neighbor(orig_node, + orig_neigh_node, + orig_neigh_node->orig, + if_incoming); + /* create_neighbor failed, return 0 */ + if (!neigh_node) + return 0; + + neigh_node->last_valid = jiffies; + } else { + /* find packet count of corresponding one hop neighbor */ + list_for_each_entry(tmp_neigh_node, + &orig_neigh_node->neigh_list, list) { + + if (compare_orig(tmp_neigh_node->addr, + orig_neigh_node->orig) && + (tmp_neigh_node->if_incoming == if_incoming)) + neigh_node = tmp_neigh_node; + } + + if (!neigh_node) + neigh_node = create_neighbor(orig_neigh_node, + orig_neigh_node, + orig_neigh_node->orig, + if_incoming); + /* create_neighbor failed, return 0 */ + if (!neigh_node) + return 0; + } + + orig_node->last_valid = jiffies; + + /* pay attention to not get a value bigger than 100 % */ + total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] > + neigh_node->real_packet_count ? + neigh_node->real_packet_count : + orig_neigh_node->bcast_own_sum[if_incoming->if_num]); + + /* if we have too few packets (too less data) we set tq_own to zero */ + /* if we receive too few packets it is not considered bidirectional */ + if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) || + (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM)) + orig_neigh_node->tq_own = 0; + else + /* neigh_node->real_packet_count is never zero as we + * only purge old information when getting new + * information */ + orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) / + neigh_node->real_packet_count; + + /* + * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does + * affect the nearly-symmetric links only a little, but + * punishes asymmetric links more. This will give a value + * between 0 and TQ_MAX_VALUE + */ + orig_neigh_node->tq_asym_penalty = + TQ_MAX_VALUE - + (TQ_MAX_VALUE * + (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) * + (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) * + (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) / + (TQ_LOCAL_WINDOW_SIZE * + TQ_LOCAL_WINDOW_SIZE * + TQ_LOCAL_WINDOW_SIZE); + + batman_packet->tq = ((batman_packet->tq * + orig_neigh_node->tq_own * + orig_neigh_node->tq_asym_penalty) / + (TQ_MAX_VALUE * TQ_MAX_VALUE)); + + bat_dbg(DBG_BATMAN, bat_priv, + "bidirectional: " + "orig = %-15pM neigh = %-15pM => own_bcast = %2i, " + "real recv = %2i, local tq: %3i, asym_penalty: %3i, " + "total tq: %3i\n", + orig_node->orig, orig_neigh_node->orig, total_count, + neigh_node->real_packet_count, orig_neigh_node->tq_own, + orig_neigh_node->tq_asym_penalty, batman_packet->tq); + + /* if link has the minimum required transmission quality + * consider it bidirectional */ + if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT) + return 1; + + return 0; +} + +static void update_orig(struct bat_priv *bat_priv, + struct orig_node *orig_node, + struct ethhdr *ethhdr, + struct batman_packet *batman_packet, + struct batman_if *if_incoming, + unsigned char *hna_buff, int hna_buff_len, + char is_duplicate) +{ + struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; + int tmp_hna_buff_len; + + bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " + "Searching and updating originator entry of received packet\n"); + + list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { + if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) && + (tmp_neigh_node->if_incoming == if_incoming)) { + neigh_node = tmp_neigh_node; + continue; + } + + if (is_duplicate) + continue; + + ring_buffer_set(tmp_neigh_node->tq_recv, + &tmp_neigh_node->tq_index, 0); + tmp_neigh_node->tq_avg = + ring_buffer_avg(tmp_neigh_node->tq_recv); + } + + if (!neigh_node) { + struct orig_node *orig_tmp; + + orig_tmp = get_orig_node(bat_priv, ethhdr->h_source); + if (!orig_tmp) + return; + + neigh_node = create_neighbor(orig_node, orig_tmp, + ethhdr->h_source, if_incoming); + if (!neigh_node) + return; + } else + bat_dbg(DBG_BATMAN, bat_priv, + "Updating existing last-hop neighbor of originator\n"); + + orig_node->flags = batman_packet->flags; + neigh_node->last_valid = jiffies; + + ring_buffer_set(neigh_node->tq_recv, + &neigh_node->tq_index, + batman_packet->tq); + neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); + + if (!is_duplicate) { + orig_node->last_ttl = batman_packet->ttl; + neigh_node->last_ttl = batman_packet->ttl; + } + + tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ? + batman_packet->num_hna * ETH_ALEN : hna_buff_len); + + /* if this neighbor already is our next hop there is nothing + * to change */ + if (orig_node->router == neigh_node) + goto update_hna; + + /* if this neighbor does not offer a better TQ we won't consider it */ + if ((orig_node->router) && + (orig_node->router->tq_avg > neigh_node->tq_avg)) + goto update_hna; + + /* if the TQ is the same and the link not more symetric we + * won't consider it either */ + if ((orig_node->router) && + ((neigh_node->tq_avg == orig_node->router->tq_avg) && + (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num] + >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num]))) + goto update_hna; + + update_routes(bat_priv, orig_node, neigh_node, + hna_buff, tmp_hna_buff_len); + return; + +update_hna: + update_routes(bat_priv, orig_node, orig_node->router, + hna_buff, tmp_hna_buff_len); +} + +/* checks whether the host restarted and is in the protection time. + * returns: + * 0 if the packet is to be accepted + * 1 if the packet is to be ignored. + */ +static int window_protected(struct bat_priv *bat_priv, + int32_t seq_num_diff, + unsigned long *last_reset) +{ + if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) + || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { + if (time_after(jiffies, *last_reset + + msecs_to_jiffies(RESET_PROTECTION_MS))) { + + *last_reset = jiffies; + bat_dbg(DBG_BATMAN, bat_priv, + "old packet received, start protection\n"); + + return 0; + } else + return 1; + } + return 0; +} + +/* processes a batman packet for all interfaces, adjusts the sequence number and + * finds out whether it is a duplicate. + * returns: + * 1 the packet is a duplicate + * 0 the packet has not yet been received + * -1 the packet is old and has been received while the seqno window + * was protected. Caller should drop it. + */ +static char count_real_packets(struct ethhdr *ethhdr, + struct batman_packet *batman_packet, + struct batman_if *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct orig_node *orig_node; + struct neigh_node *tmp_neigh_node; + char is_duplicate = 0; + int32_t seq_diff; + int need_update = 0; + int set_mark; + + orig_node = get_orig_node(bat_priv, batman_packet->orig); + if (orig_node == NULL) + return 0; + + seq_diff = batman_packet->seqno - orig_node->last_real_seqno; + + /* signalize caller that the packet is to be dropped. */ + if (window_protected(bat_priv, seq_diff, + &orig_node->batman_seqno_reset)) + return -1; + + list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { + + is_duplicate |= get_bit_status(tmp_neigh_node->real_bits, + orig_node->last_real_seqno, + batman_packet->seqno); + + if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) && + (tmp_neigh_node->if_incoming == if_incoming)) + set_mark = 1; + else + set_mark = 0; + + /* if the window moved, set the update flag. */ + need_update |= bit_get_packet(bat_priv, + tmp_neigh_node->real_bits, + seq_diff, set_mark); + + tmp_neigh_node->real_packet_count = + bit_packet_count(tmp_neigh_node->real_bits); + } + + if (need_update) { + bat_dbg(DBG_BATMAN, bat_priv, + "updating last_seqno: old %d, new %d\n", + orig_node->last_real_seqno, batman_packet->seqno); + orig_node->last_real_seqno = batman_packet->seqno; + } + + return is_duplicate; +} + +/* copy primary address for bonding */ +static void mark_bonding_address(struct bat_priv *bat_priv, + struct orig_node *orig_node, + struct orig_node *orig_neigh_node, + struct batman_packet *batman_packet) + +{ + if (batman_packet->flags & PRIMARIES_FIRST_HOP) + memcpy(orig_neigh_node->primary_addr, + orig_node->orig, ETH_ALEN); + + return; +} + +/* mark possible bond.candidates in the neighbor list */ +void update_bonding_candidates(struct bat_priv *bat_priv, + struct orig_node *orig_node) +{ + int candidates; + int interference_candidate; + int best_tq; + struct neigh_node *tmp_neigh_node, *tmp_neigh_node2; + struct neigh_node *first_candidate, *last_candidate; + + /* update the candidates for this originator */ + if (!orig_node->router) { + orig_node->bond.candidates = 0; + return; + } + + best_tq = orig_node->router->tq_avg; + + /* update bond.candidates */ + + candidates = 0; + + /* mark other nodes which also received "PRIMARIES FIRST HOP" packets + * as "bonding partner" */ + + /* first, zero the list */ + list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { + tmp_neigh_node->next_bond_candidate = NULL; + } + + first_candidate = NULL; + last_candidate = NULL; + list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { + + /* only consider if it has the same primary address ... */ + if (memcmp(orig_node->orig, + tmp_neigh_node->orig_node->primary_addr, + ETH_ALEN) != 0) + continue; + + /* ... and is good enough to be considered */ + if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD) + continue; + + /* check if we have another candidate with the same + * mac address or interface. If we do, we won't + * select this candidate because of possible interference. */ + + interference_candidate = 0; + list_for_each_entry(tmp_neigh_node2, + &orig_node->neigh_list, list) { + + if (tmp_neigh_node2 == tmp_neigh_node) + continue; + + /* we only care if the other candidate is even + * considered as candidate. */ + if (tmp_neigh_node2->next_bond_candidate == NULL) + continue; + + + if ((tmp_neigh_node->if_incoming == + tmp_neigh_node2->if_incoming) + || (memcmp(tmp_neigh_node->addr, + tmp_neigh_node2->addr, ETH_ALEN) == 0)) { + + interference_candidate = 1; + break; + } + } + /* don't care further if it is an interference candidate */ + if (interference_candidate) + continue; + + if (first_candidate == NULL) { + first_candidate = tmp_neigh_node; + tmp_neigh_node->next_bond_candidate = first_candidate; + } else + tmp_neigh_node->next_bond_candidate = last_candidate; + + last_candidate = tmp_neigh_node; + + candidates++; + } + + if (candidates > 0) { + first_candidate->next_bond_candidate = last_candidate; + orig_node->bond.selected = first_candidate; + } + + orig_node->bond.candidates = candidates; +} + +void receive_bat_packet(struct ethhdr *ethhdr, + struct batman_packet *batman_packet, + unsigned char *hna_buff, int hna_buff_len, + struct batman_if *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct batman_if *batman_if; + struct orig_node *orig_neigh_node, *orig_node; + char has_directlink_flag; + char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; + char is_broadcast = 0, is_bidirectional, is_single_hop_neigh; + char is_duplicate; + uint32_t if_incoming_seqno; + + /* Silently drop when the batman packet is actually not a + * correct packet. + * + * This might happen if a packet is padded (e.g. Ethernet has a + * minimum frame length of 64 byte) and the aggregation interprets + * it as an additional length. + * + * TODO: A more sane solution would be to have a bit in the + * batman_packet to detect whether the packet is the last + * packet in an aggregation. Here we expect that the padding + * is always zero (or not 0x01) + */ + if (batman_packet->packet_type != BAT_PACKET) + return; + + /* could be changed by schedule_own_packet() */ + if_incoming_seqno = atomic_read(&if_incoming->seqno); + + has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0); + + is_single_hop_neigh = (compare_orig(ethhdr->h_source, + batman_packet->orig) ? 1 : 0); + + bat_dbg(DBG_BATMAN, bat_priv, + "Received BATMAN packet via NB: %pM, IF: %s [%pM] " + "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, " + "TTL %d, V %d, IDF %d)\n", + ethhdr->h_source, if_incoming->net_dev->name, + if_incoming->net_dev->dev_addr, batman_packet->orig, + batman_packet->prev_sender, batman_packet->seqno, + batman_packet->tq, batman_packet->ttl, batman_packet->version, + has_directlink_flag); + + rcu_read_lock(); + list_for_each_entry_rcu(batman_if, &if_list, list) { + if (batman_if->if_status != IF_ACTIVE) + continue; + + if (batman_if->soft_iface != if_incoming->soft_iface) + continue; + + if (compare_orig(ethhdr->h_source, + batman_if->net_dev->dev_addr)) + is_my_addr = 1; + + if (compare_orig(batman_packet->orig, + batman_if->net_dev->dev_addr)) + is_my_orig = 1; + + if (compare_orig(batman_packet->prev_sender, + batman_if->net_dev->dev_addr)) + is_my_oldorig = 1; + + if (compare_orig(ethhdr->h_source, broadcast_addr)) + is_broadcast = 1; + } + rcu_read_unlock(); + + if (batman_packet->version != COMPAT_VERSION) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: incompatible batman version (%i)\n", + batman_packet->version); + return; + } + + if (is_my_addr) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: received my own broadcast (sender: %pM" + ")\n", + ethhdr->h_source); + return; + } + + if (is_broadcast) { + bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " + "ignoring all packets with broadcast source addr (sender: %pM" + ")\n", ethhdr->h_source); + return; + } + + if (is_my_orig) { + TYPE_OF_WORD *word; + int offset; + + orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source); + + if (!orig_neigh_node) + return; + + /* neighbor has to indicate direct link and it has to + * come via the corresponding interface */ + /* if received seqno equals last send seqno save new + * seqno for bidirectional check */ + if (has_directlink_flag && + compare_orig(if_incoming->net_dev->dev_addr, + batman_packet->orig) && + (batman_packet->seqno - if_incoming_seqno + 2 == 0)) { + offset = if_incoming->if_num * NUM_WORDS; + word = &(orig_neigh_node->bcast_own[offset]); + bit_mark(word, 0); + orig_neigh_node->bcast_own_sum[if_incoming->if_num] = + bit_packet_count(word); + } + + bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " + "originator packet from myself (via neighbor)\n"); + return; + } + + if (is_my_oldorig) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all rebroadcast echos (sender: " + "%pM)\n", ethhdr->h_source); + return; + } + + orig_node = get_orig_node(bat_priv, batman_packet->orig); + if (orig_node == NULL) + return; + + is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming); + + if (is_duplicate == -1) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: packet within seqno protection time " + "(sender: %pM)\n", ethhdr->h_source); + return; + } + + if (batman_packet->tq == 0) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: originator packet with tq equal 0\n"); + return; + } + + /* avoid temporary routing loops */ + if ((orig_node->router) && + (orig_node->router->orig_node->router) && + (compare_orig(orig_node->router->addr, + batman_packet->prev_sender)) && + !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) && + (compare_orig(orig_node->router->addr, + orig_node->router->orig_node->router->addr))) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all rebroadcast packets that " + "may make me loop (sender: %pM)\n", ethhdr->h_source); + return; + } + + /* if sender is a direct neighbor the sender mac equals + * originator mac */ + orig_neigh_node = (is_single_hop_neigh ? + orig_node : + get_orig_node(bat_priv, ethhdr->h_source)); + if (orig_neigh_node == NULL) + return; + + /* drop packet if sender is not a direct neighbor and if we + * don't route towards it */ + if (!is_single_hop_neigh && + (orig_neigh_node->router == NULL)) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: OGM via unknown neighbor!\n"); + return; + } + + is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node, + batman_packet, if_incoming); + + /* update ranking if it is not a duplicate or has the same + * seqno and similar ttl as the non-duplicate */ + if (is_bidirectional && + (!is_duplicate || + ((orig_node->last_real_seqno == batman_packet->seqno) && + (orig_node->last_ttl - 3 <= batman_packet->ttl)))) + update_orig(bat_priv, orig_node, ethhdr, batman_packet, + if_incoming, hna_buff, hna_buff_len, is_duplicate); + + mark_bonding_address(bat_priv, orig_node, + orig_neigh_node, batman_packet); + update_bonding_candidates(bat_priv, orig_node); + + /* is single hop (direct) neighbor */ + if (is_single_hop_neigh) { + + /* mark direct link on incoming interface */ + schedule_forward_packet(orig_node, ethhdr, batman_packet, + 1, hna_buff_len, if_incoming); + + bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " + "rebroadcast neighbor packet with direct link flag\n"); + return; + } + + /* multihop originator */ + if (!is_bidirectional) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: not received via bidirectional link\n"); + return; + } + + if (is_duplicate) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: duplicate packet received\n"); + return; + } + + bat_dbg(DBG_BATMAN, bat_priv, + "Forwarding packet: rebroadcast originator packet\n"); + schedule_forward_packet(orig_node, ethhdr, batman_packet, + 0, hna_buff_len, if_incoming); +} + +int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if) +{ + struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); + struct ethhdr *ethhdr; + unsigned long flags; + + /* drop packet if it has not necessary minimum size */ + if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet)))) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with broadcast indication but unicast recipient */ + if (!is_bcast(ethhdr->h_dest)) + return NET_RX_DROP; + + /* packet with broadcast sender address */ + if (is_bcast(ethhdr->h_source)) + return NET_RX_DROP; + + /* create a copy of the skb, if needed, to modify it. */ + if (skb_cow(skb, 0) < 0) + return NET_RX_DROP; + + /* keep skb linear */ + if (skb_linearize(skb) < 0) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + receive_aggr_bat_packet(ethhdr, + skb->data, + skb_headlen(skb), + batman_if); + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + + kfree_skb(skb); + return NET_RX_SUCCESS; +} + +static int recv_my_icmp_packet(struct bat_priv *bat_priv, + struct sk_buff *skb, size_t icmp_len) +{ + struct orig_node *orig_node; + struct icmp_packet_rr *icmp_packet; + struct ethhdr *ethhdr; + struct batman_if *batman_if; + int ret; + unsigned long flags; + uint8_t dstaddr[ETH_ALEN]; + + icmp_packet = (struct icmp_packet_rr *)skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* add data to device queue */ + if (icmp_packet->msg_type != ECHO_REQUEST) { + bat_socket_receive_packet(icmp_packet, icmp_len); + return NET_RX_DROP; + } + + if (!bat_priv->primary_if) + return NET_RX_DROP; + + /* answer echo request (ping) */ + /* get routing information */ + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, + icmp_packet->orig)); + ret = NET_RX_DROP; + + if ((orig_node != NULL) && + (orig_node->router != NULL)) { + + /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + batman_if = orig_node->router->if_incoming; + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + + /* create a copy of the skb, if needed, to modify it. */ + if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + return NET_RX_DROP; + + icmp_packet = (struct icmp_packet_rr *)skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); + memcpy(icmp_packet->orig, + bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); + icmp_packet->msg_type = ECHO_REPLY; + icmp_packet->ttl = TTL; + + send_skb_packet(skb, batman_if, dstaddr); + ret = NET_RX_SUCCESS; + + } else + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + + return ret; +} + +static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, + struct sk_buff *skb, size_t icmp_len) +{ + struct orig_node *orig_node; + struct icmp_packet *icmp_packet; + struct ethhdr *ethhdr; + struct batman_if *batman_if; + int ret; + unsigned long flags; + uint8_t dstaddr[ETH_ALEN]; + + icmp_packet = (struct icmp_packet *)skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* send TTL exceeded if packet is an echo request (traceroute) */ + if (icmp_packet->msg_type != ECHO_REQUEST) { + pr_debug("Warning - can't forward icmp packet from %pM to " + "%pM: ttl exceeded\n", icmp_packet->orig, + icmp_packet->dst); + return NET_RX_DROP; + } + + if (!bat_priv->primary_if) + return NET_RX_DROP; + + /* get routing information */ + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + orig_node = ((struct orig_node *) + hash_find(bat_priv->orig_hash, icmp_packet->orig)); + ret = NET_RX_DROP; + + if ((orig_node != NULL) && + (orig_node->router != NULL)) { + + /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + batman_if = orig_node->router->if_incoming; + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + + /* create a copy of the skb, if needed, to modify it. */ + if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + return NET_RX_DROP; + + icmp_packet = (struct icmp_packet *) skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); + memcpy(icmp_packet->orig, + bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); + icmp_packet->msg_type = TTL_EXCEEDED; + icmp_packet->ttl = TTL; + + send_skb_packet(skb, batman_if, dstaddr); + ret = NET_RX_SUCCESS; + + } else + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + + return ret; +} + + +int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if) +{ + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct icmp_packet_rr *icmp_packet; + struct ethhdr *ethhdr; + struct orig_node *orig_node; + struct batman_if *batman_if; + int hdr_size = sizeof(struct icmp_packet); + int ret; + unsigned long flags; + uint8_t dstaddr[ETH_ALEN]; + + /** + * we truncate all incoming icmp packets if they don't match our size + */ + if (skb->len >= sizeof(struct icmp_packet_rr)) + hdr_size = sizeof(struct icmp_packet_rr); + + /* drop packet if it has not necessary minimum size */ + if (unlikely(!pskb_may_pull(skb, hdr_size))) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with unicast indication but broadcast recipient */ + if (is_bcast(ethhdr->h_dest)) + return NET_RX_DROP; + + /* packet with broadcast sender address */ + if (is_bcast(ethhdr->h_source)) + return NET_RX_DROP; + + /* not for me */ + if (!is_my_mac(ethhdr->h_dest)) + return NET_RX_DROP; + + icmp_packet = (struct icmp_packet_rr *)skb->data; + + /* add record route information if not full */ + if ((hdr_size == sizeof(struct icmp_packet_rr)) && + (icmp_packet->rr_cur < BAT_RR_LEN)) { + memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]), + ethhdr->h_dest, ETH_ALEN); + icmp_packet->rr_cur++; + } + + /* packet for me */ + if (is_my_mac(icmp_packet->dst)) + return recv_my_icmp_packet(bat_priv, skb, hdr_size); + + /* TTL exceeded */ + if (icmp_packet->ttl < 2) + return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size); + + ret = NET_RX_DROP; + + /* get routing information */ + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + orig_node = ((struct orig_node *) + hash_find(bat_priv->orig_hash, icmp_packet->dst)); + + if ((orig_node != NULL) && + (orig_node->router != NULL)) { + + /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + batman_if = orig_node->router->if_incoming; + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + + /* create a copy of the skb, if needed, to modify it. */ + if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + return NET_RX_DROP; + + icmp_packet = (struct icmp_packet_rr *)skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* decrement ttl */ + icmp_packet->ttl--; + + /* route it */ + send_skb_packet(skb, batman_if, dstaddr); + ret = NET_RX_SUCCESS; + + } else + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + + return ret; +} + +/* find a suitable router for this originator, and use + * bonding if possible. */ - struct neigh_node *find_router(struct orig_node *orig_node, ++struct neigh_node *find_router(struct bat_priv *bat_priv, ++ struct orig_node *orig_node, + struct batman_if *recv_if) +{ - struct bat_priv *bat_priv; + struct orig_node *primary_orig_node; + struct orig_node *router_orig; + struct neigh_node *router, *first_candidate, *best_router; + static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; + int bonding_enabled; + + if (!orig_node) + return NULL; + + if (!orig_node->router) + return NULL; + + /* without bonding, the first node should + * always choose the default router. */ + - if (!recv_if) - return orig_node->router; - - bat_priv = netdev_priv(recv_if->soft_iface); + bonding_enabled = atomic_read(&bat_priv->bonding_enabled); + - if (!bonding_enabled) ++ if ((!recv_if) && (!bonding_enabled)) + return orig_node->router; + + router_orig = orig_node->router->orig_node; + + /* if we have something in the primary_addr, we can search + * for a potential bonding candidate. */ + if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0) + return orig_node->router; + + /* find the orig_node which has the primary interface. might + * even be the same as our router_orig in many cases */ + + if (memcmp(router_orig->primary_addr, + router_orig->orig, ETH_ALEN) == 0) { + primary_orig_node = router_orig; + } else { + primary_orig_node = hash_find(bat_priv->orig_hash, + router_orig->primary_addr); + + if (!primary_orig_node) + return orig_node->router; + } + + /* with less than 2 candidates, we can't do any + * bonding and prefer the original router. */ + + if (primary_orig_node->bond.candidates < 2) + return orig_node->router; + + + /* all nodes between should choose a candidate which + * is is not on the interface where the packet came + * in. */ + first_candidate = primary_orig_node->bond.selected; + router = first_candidate; + + if (bonding_enabled) { + /* in the bonding case, send the packets in a round + * robin fashion over the remaining interfaces. */ + do { + /* recv_if == NULL on the first node. */ + if (router->if_incoming != recv_if) + break; + + router = router->next_bond_candidate; + } while (router != first_candidate); + + primary_orig_node->bond.selected = router->next_bond_candidate; + + } else { + /* if bonding is disabled, use the best of the + * remaining candidates which are not using + * this interface. */ + best_router = first_candidate; + + do { + /* recv_if == NULL on the first node. */ + if ((router->if_incoming != recv_if) && + (router->tq_avg > best_router->tq_avg)) + best_router = router; + + router = router->next_bond_candidate; + } while (router != first_candidate); + + router = best_router; + } + + return router; +} + +static int check_unicast_packet(struct sk_buff *skb, int hdr_size) +{ + struct ethhdr *ethhdr; + + /* drop packet if it has not necessary minimum size */ + if (unlikely(!pskb_may_pull(skb, hdr_size))) + return -1; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with unicast indication but broadcast recipient */ + if (is_bcast(ethhdr->h_dest)) + return -1; + + /* packet with broadcast sender address */ + if (is_bcast(ethhdr->h_source)) + return -1; + + /* not for me */ + if (!is_my_mac(ethhdr->h_dest)) + return -1; + + return 0; +} + +static int route_unicast_packet(struct sk_buff *skb, + struct batman_if *recv_if, int hdr_size) +{ + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct orig_node *orig_node; + struct neigh_node *router; + struct batman_if *batman_if; + uint8_t dstaddr[ETH_ALEN]; + unsigned long flags; + struct unicast_packet *unicast_packet; + struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); + + unicast_packet = (struct unicast_packet *)skb->data; + + /* packet for me */ + if (is_my_mac(unicast_packet->dest)) { + interface_rx(recv_if->soft_iface, skb, hdr_size); + return NET_RX_SUCCESS; + } + + /* TTL exceeded */ + if (unicast_packet->ttl < 2) { + pr_debug("Warning - can't forward unicast packet from %pM to " + "%pM: ttl exceeded\n", ethhdr->h_source, + unicast_packet->dest); + return NET_RX_DROP; + } + + /* get routing information */ + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + orig_node = ((struct orig_node *) + hash_find(bat_priv->orig_hash, unicast_packet->dest)); + - router = find_router(orig_node, recv_if); ++ router = find_router(bat_priv, orig_node, recv_if); + + if (!router) { + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + return NET_RX_DROP; + } + + /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + + batman_if = router->if_incoming; + memcpy(dstaddr, router->addr, ETH_ALEN); + + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + + /* create a copy of the skb, if needed, to modify it. */ + if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + return NET_RX_DROP; + + unicast_packet = (struct unicast_packet *)skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* decrement ttl */ + unicast_packet->ttl--; + + /* route it */ + send_skb_packet(skb, batman_if, dstaddr); + + return NET_RX_SUCCESS; +} + +int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if) +{ + struct unicast_packet *unicast_packet; + int hdr_size = sizeof(struct unicast_packet); + + if (check_unicast_packet(skb, hdr_size) < 0) + return NET_RX_DROP; + + unicast_packet = (struct unicast_packet *)skb->data; + + /* packet for me */ + if (is_my_mac(unicast_packet->dest)) { + interface_rx(recv_if->soft_iface, skb, hdr_size); + return NET_RX_SUCCESS; + } + + return route_unicast_packet(skb, recv_if, hdr_size); +} + +int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if) +{ + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct unicast_frag_packet *unicast_packet; + struct orig_node *orig_node; + struct frag_packet_list_entry *tmp_frag_entry; + int hdr_size = sizeof(struct unicast_frag_packet); + unsigned long flags; + + if (check_unicast_packet(skb, hdr_size) < 0) + return NET_RX_DROP; + + unicast_packet = (struct unicast_frag_packet *)skb->data; + + /* packet for me */ + if (is_my_mac(unicast_packet->dest)) { + + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + orig_node = ((struct orig_node *) + hash_find(bat_priv->orig_hash, unicast_packet->orig)); + + if (!orig_node) { + pr_debug("couldn't find orig node for fragmentation\n"); + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, + flags); + return NET_RX_DROP; + } + + orig_node->last_frag_packet = jiffies; + + if (list_empty(&orig_node->frag_list) && + create_frag_buffer(&orig_node->frag_list)) { + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, + flags); + return NET_RX_DROP; + } + + tmp_frag_entry = + search_frag_packet(&orig_node->frag_list, + unicast_packet); + + if (!tmp_frag_entry) { + create_frag_entry(&orig_node->frag_list, skb); + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, + flags); + return NET_RX_SUCCESS; + } + + skb = merge_frag_packet(&orig_node->frag_list, + tmp_frag_entry, skb); + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + if (!skb) + return NET_RX_DROP; + + interface_rx(recv_if->soft_iface, skb, hdr_size); + return NET_RX_SUCCESS; + } + + return route_unicast_packet(skb, recv_if, hdr_size); +} + + +int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if) +{ + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct orig_node *orig_node; + struct bcast_packet *bcast_packet; + struct ethhdr *ethhdr; + int hdr_size = sizeof(struct bcast_packet); + int32_t seq_diff; + unsigned long flags; + + /* drop packet if it has not necessary minimum size */ + if (unlikely(!pskb_may_pull(skb, hdr_size))) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with broadcast indication but unicast recipient */ + if (!is_bcast(ethhdr->h_dest)) + return NET_RX_DROP; + + /* packet with broadcast sender address */ + if (is_bcast(ethhdr->h_source)) + return NET_RX_DROP; + + /* ignore broadcasts sent by myself */ + if (is_my_mac(ethhdr->h_source)) + return NET_RX_DROP; + + bcast_packet = (struct bcast_packet *)skb->data; + + /* ignore broadcasts originated by myself */ + if (is_my_mac(bcast_packet->orig)) + return NET_RX_DROP; + + if (bcast_packet->ttl < 2) + return NET_RX_DROP; + + spin_lock_irqsave(&bat_priv->orig_hash_lock, flags); + orig_node = ((struct orig_node *) + hash_find(bat_priv->orig_hash, bcast_packet->orig)); + + if (orig_node == NULL) { + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + return NET_RX_DROP; + } + + /* check whether the packet is a duplicate */ + if (get_bit_status(orig_node->bcast_bits, + orig_node->last_bcast_seqno, + ntohl(bcast_packet->seqno))) { + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + return NET_RX_DROP; + } + + seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; + + /* check whether the packet is old and the host just restarted. */ + if (window_protected(bat_priv, seq_diff, + &orig_node->bcast_seqno_reset)) { + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + return NET_RX_DROP; + } + + /* mark broadcast in flood history, update window position + * if required. */ + if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) + orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); + + spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); + /* rebroadcast packet */ + add_bcast_packet_to_list(bat_priv, skb); + + /* broadcast for me */ + interface_rx(recv_if->soft_iface, skb, hdr_size); + + return NET_RX_SUCCESS; +} + +int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if) +{ + struct vis_packet *vis_packet; + struct ethhdr *ethhdr; + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + int hdr_size = sizeof(struct vis_packet); + + /* keep skb linear */ + if (skb_linearize(skb) < 0) + return NET_RX_DROP; + + if (unlikely(!pskb_may_pull(skb, hdr_size))) + return NET_RX_DROP; + + vis_packet = (struct vis_packet *)skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* not for me */ + if (!is_my_mac(ethhdr->h_dest)) + return NET_RX_DROP; + + /* ignore own packets */ + if (is_my_mac(vis_packet->vis_orig)) + return NET_RX_DROP; + + if (is_my_mac(vis_packet->sender_orig)) + return NET_RX_DROP; + + switch (vis_packet->vis_type) { + case VIS_TYPE_SERVER_SYNC: + receive_server_sync_packet(bat_priv, vis_packet, + skb_headlen(skb)); + break; + + case VIS_TYPE_CLIENT_UPDATE: + receive_client_update_packet(bat_priv, vis_packet, + skb_headlen(skb)); + break; + + default: /* ignore unknown packet */ + break; + } + + /* We take a copy of the data in the packet, so we should + always free the skbuf. */ + return NET_RX_DROP; +} diff --combined drivers/staging/batman-adv/routing.h index 06ea99d,92674c8..92674c8 --- a/drivers/staging/batman-adv/routing.h +++ b/drivers/staging/batman-adv/routing.h @@@ -38,8 -38,8 +38,8 @@@ int recv_ucast_frag_packet(struct sk_bu int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if); - struct neigh_node *find_router(struct orig_node *orig_node, - struct batman_if *recv_if); + struct neigh_node *find_router(struct bat_priv *bat_priv, + struct orig_node *orig_node, struct batman_if *recv_if); void update_bonding_candidates(struct bat_priv *bat_priv, struct orig_node *orig_node);
diff --combined drivers/staging/batman-adv/unicast.c index 0dac50d,0459413..0459413 --- a/drivers/staging/batman-adv/unicast.c +++ b/drivers/staging/batman-adv/unicast.c @@@ -224,7 -224,7 +224,7 @@@ int unicast_send_skb(struct sk_buff *sk if (!orig_node) orig_node = transtable_search(bat_priv, ethhdr->h_dest);
- router = find_router(orig_node, NULL); + router = find_router(bat_priv, orig_node, NULL);
if (!router) goto unlock;