The following commit has been merged in the linux branch: commit ac622e833503b76e719747b27402489128a36c9b Merge: ab8569a35b5827ff47582d71d7731725d27e071a d5e36e0dae1649e038cc37c3bc72e7ac3c962c2c Author: Sven Eckelmann sven.eckelmann@gmx.de Date: Mon Aug 9 23:21:51 2010 +0200
Merge branch 'maint' into linux
Conflicts: drivers/staging/batman-adv/icmp_socket.c
diff --combined drivers/staging/batman-adv/icmp_socket.c index fc3d32c,0000000..3ae7dd2 mode 100644,000000..100644 --- a/drivers/staging/batman-adv/icmp_socket.c +++ b/drivers/staging/batman-adv/icmp_socket.c @@@ -1,334 -1,0 +1,338 @@@ +/* + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include <linux/debugfs.h> +#include <linux/slab.h> +#include "icmp_socket.h" +#include "send.h" +#include "types.h" +#include "hash.h" +#include "hard-interface.h" + + +static struct socket_client *socket_client_hash[256]; + +static void bat_socket_add_packet(struct socket_client *socket_client, + struct icmp_packet_rr *icmp_packet, + size_t icmp_len); + +void bat_socket_init(void) +{ + memset(socket_client_hash, 0, sizeof(socket_client_hash)); +} + +static int bat_socket_open(struct inode *inode, struct file *file) +{ + unsigned int i; + struct socket_client *socket_client; + + socket_client = kmalloc(sizeof(struct socket_client), GFP_KERNEL); + + if (!socket_client) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(socket_client_hash); i++) { + if (!socket_client_hash[i]) { + socket_client_hash[i] = socket_client; + break; + } + } + + if (i == ARRAY_SIZE(socket_client_hash)) { + pr_err("Error - can't add another packet client: " + "maximum number of clients reached\n"); + kfree(socket_client); + return -EXFULL; + } + + INIT_LIST_HEAD(&socket_client->queue_list); + socket_client->queue_len = 0; + socket_client->index = i; ++ socket_client->bat_priv = inode->i_private; + spin_lock_init(&socket_client->lock); + init_waitqueue_head(&socket_client->queue_wait); + + file->private_data = socket_client; + + inc_module_count(); + return 0; +} + +static int bat_socket_release(struct inode *inode, struct file *file) +{ + struct socket_client *socket_client = file->private_data; + struct socket_packet *socket_packet; + struct list_head *list_pos, *list_pos_tmp; + unsigned long flags; + + spin_lock_irqsave(&socket_client->lock, flags); + + /* for all packets in the queue ... */ + list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) { + socket_packet = list_entry(list_pos, + struct socket_packet, list); + + list_del(list_pos); + kfree(socket_packet); + } + + socket_client_hash[socket_client->index] = NULL; + spin_unlock_irqrestore(&socket_client->lock, flags); + + kfree(socket_client); + dec_module_count(); + + return 0; +} + +static ssize_t bat_socket_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct socket_client *socket_client = file->private_data; + struct socket_packet *socket_packet; + size_t packet_len; + int error; + unsigned long flags; + + if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0)) + return -EAGAIN; + + if ((!buf) || (count < sizeof(struct icmp_packet))) + return -EINVAL; + + if (!access_ok(VERIFY_WRITE, buf, count)) + return -EFAULT; + + error = wait_event_interruptible(socket_client->queue_wait, + socket_client->queue_len); + + if (error) + return error; + + spin_lock_irqsave(&socket_client->lock, flags); + + socket_packet = list_first_entry(&socket_client->queue_list, + struct socket_packet, list); + list_del(&socket_packet->list); + socket_client->queue_len--; + + spin_unlock_irqrestore(&socket_client->lock, flags); + + error = __copy_to_user(buf, &socket_packet->icmp_packet, + socket_packet->icmp_len); + + packet_len = socket_packet->icmp_len; + kfree(socket_packet); + + if (error) + return -EFAULT; + + return packet_len; +} + +static ssize_t bat_socket_write(struct file *file, const char __user *buff, + size_t len, loff_t *off) +{ - /* FIXME: each orig_node->batman_if will be attached to a softif */ - struct bat_priv *bat_priv = netdev_priv(soft_device); + struct socket_client *socket_client = file->private_data; ++ struct bat_priv *bat_priv = socket_client->bat_priv; + struct icmp_packet_rr icmp_packet; + struct orig_node *orig_node; + struct batman_if *batman_if; + size_t packet_len = sizeof(struct icmp_packet); + uint8_t dstaddr[ETH_ALEN]; + unsigned long flags; + + if (len < sizeof(struct icmp_packet)) { + bat_dbg(DBG_BATMAN, bat_priv, + "Error - can't send packet from char device: " + "invalid packet size\n"); + return -EINVAL; + } + ++ if (!bat_priv->primary_if) ++ return -EFAULT; ++ + if (len >= sizeof(struct icmp_packet_rr)) + packet_len = sizeof(struct icmp_packet_rr); + + if (!access_ok(VERIFY_READ, buff, packet_len)) + return -EFAULT; + + if (__copy_from_user(&icmp_packet, buff, packet_len)) + return -EFAULT; + + if (icmp_packet.packet_type != BAT_ICMP) { + bat_dbg(DBG_BATMAN, bat_priv, + "Error - can't send packet from char device: " + "got bogus packet type (expected: BAT_ICMP)\n"); + return -EINVAL; + } + + if (icmp_packet.msg_type != ECHO_REQUEST) { + bat_dbg(DBG_BATMAN, bat_priv, + "Error - can't send packet from char device: " + "got bogus message type (expected: ECHO_REQUEST)\n"); + return -EINVAL; + } + + icmp_packet.uid = socket_client->index; + + if (icmp_packet.version != COMPAT_VERSION) { + icmp_packet.msg_type = PARAMETER_PROBLEM; + icmp_packet.ttl = COMPAT_VERSION; + bat_socket_add_packet(socket_client, &icmp_packet, packet_len); + goto out; + } + + if (atomic_read(&module_state) != MODULE_ACTIVE) + goto dst_unreach; + + spin_lock_irqsave(&orig_hash_lock, flags); + orig_node = ((struct orig_node *)hash_find(orig_hash, icmp_packet.dst)); + + if (!orig_node) + goto unlock; + + if (!orig_node->router) + goto unlock; + + batman_if = orig_node->router->if_incoming; + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); + + spin_unlock_irqrestore(&orig_hash_lock, flags); + + if (!batman_if) + goto dst_unreach; + + if (batman_if->if_status != IF_ACTIVE) + goto dst_unreach; + - memcpy(icmp_packet.orig, batman_if->net_dev->dev_addr, ETH_ALEN); ++ memcpy(icmp_packet.orig, ++ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); + + if (packet_len == sizeof(struct icmp_packet_rr)) + memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN); + + send_raw_packet((unsigned char *)&icmp_packet, + packet_len, batman_if, dstaddr); + + goto out; + +unlock: + spin_unlock_irqrestore(&orig_hash_lock, flags); +dst_unreach: + icmp_packet.msg_type = DESTINATION_UNREACHABLE; + bat_socket_add_packet(socket_client, &icmp_packet, packet_len); +out: + return len; +} + +static unsigned int bat_socket_poll(struct file *file, poll_table *wait) +{ + struct socket_client *socket_client = file->private_data; + + poll_wait(file, &socket_client->queue_wait, wait); + + if (socket_client->queue_len > 0) + return POLLIN | POLLRDNORM; + + return 0; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = bat_socket_open, + .release = bat_socket_release, + .read = bat_socket_read, + .write = bat_socket_write, + .poll = bat_socket_poll, +}; + +int bat_socket_setup(struct bat_priv *bat_priv) +{ + struct dentry *d; + + if (!bat_priv->debug_dir) + goto err; + + d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR, - bat_priv->debug_dir, NULL, &fops); ++ bat_priv->debug_dir, bat_priv, &fops); + if (d) + goto err; + + return 0; + +err: + return 1; +} + +static void bat_socket_add_packet(struct socket_client *socket_client, + struct icmp_packet_rr *icmp_packet, + size_t icmp_len) +{ + struct socket_packet *socket_packet; + unsigned long flags; + + socket_packet = kmalloc(sizeof(struct socket_packet), GFP_ATOMIC); + + if (!socket_packet) + return; + + INIT_LIST_HEAD(&socket_packet->list); + memcpy(&socket_packet->icmp_packet, icmp_packet, icmp_len); + socket_packet->icmp_len = icmp_len; + + spin_lock_irqsave(&socket_client->lock, flags); + + /* while waiting for the lock the socket_client could have been + * deleted */ + if (!socket_client_hash[icmp_packet->uid]) { + spin_unlock_irqrestore(&socket_client->lock, flags); + kfree(socket_packet); + return; + } + + list_add_tail(&socket_packet->list, &socket_client->queue_list); + socket_client->queue_len++; + + if (socket_client->queue_len > 100) { + socket_packet = list_first_entry(&socket_client->queue_list, + struct socket_packet, list); + + list_del(&socket_packet->list); + kfree(socket_packet); + socket_client->queue_len--; + } + + spin_unlock_irqrestore(&socket_client->lock, flags); + + wake_up(&socket_client->queue_wait); +} + +void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet, + size_t icmp_len) +{ + struct socket_client *hash = socket_client_hash[icmp_packet->uid]; + + if (hash) + bat_socket_add_packet(hash, icmp_packet, icmp_len); +} diff --combined drivers/staging/batman-adv/main.c index 2686019,0000000..ef7c20a mode 100644,000000..100644 --- a/drivers/staging/batman-adv/main.c +++ b/drivers/staging/batman-adv/main.c @@@ -1,288 -1,0 +1,291 @@@ +/* + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "bat_sysfs.h" +#include "bat_debugfs.h" +#include "routing.h" +#include "send.h" +#include "originator.h" +#include "soft-interface.h" +#include "icmp_socket.h" +#include "translation-table.h" +#include "hard-interface.h" +#include "types.h" +#include "vis.h" +#include "hash.h" + +struct list_head if_list; +struct hlist_head forw_bat_list; +struct hlist_head forw_bcast_list; +struct hashtable_t *orig_hash; + +DEFINE_SPINLOCK(orig_hash_lock); +DEFINE_SPINLOCK(forw_bat_list_lock); +DEFINE_SPINLOCK(forw_bcast_list_lock); + +atomic_t bcast_queue_left; +atomic_t batman_queue_left; + +int16_t num_hna; + +struct net_device *soft_device; + +unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; +atomic_t module_state; + +static struct packet_type batman_adv_packet_type __read_mostly = { + .type = __constant_htons(ETH_P_BATMAN), + .func = batman_skb_recv, +}; + +struct workqueue_struct *bat_event_workqueue; + +static int __init batman_init(void) +{ + int retval; + + INIT_LIST_HEAD(&if_list); + INIT_HLIST_HEAD(&forw_bat_list); + INIT_HLIST_HEAD(&forw_bcast_list); + + atomic_set(&module_state, MODULE_INACTIVE); + + atomic_set(&bcast_queue_left, BCAST_QUEUE_LEN); + atomic_set(&batman_queue_left, BATMAN_QUEUE_LEN); + + /* the name should not be longer than 10 chars - see + * http://lwn.net/Articles/23634/ */ + bat_event_workqueue = create_singlethread_workqueue("bat_events"); + + if (!bat_event_workqueue) + return -ENOMEM; + + bat_socket_init(); + debugfs_init(); + + /* initialize layer 2 interface */ + soft_device = alloc_netdev(sizeof(struct bat_priv) , "bat%d", + interface_setup); + + if (!soft_device) { + pr_err("Unable to allocate the batman interface\n"); + goto end; + } + + retval = register_netdev(soft_device); + + if (retval < 0) { + pr_err("Unable to register the batman interface: %i\n", retval); + goto free_soft_device; + } + + retval = sysfs_add_meshif(soft_device); + + if (retval < 0) + goto unreg_soft_device; + + retval = debugfs_add_meshif(soft_device); + + if (retval < 0) + goto unreg_sysfs; + + register_netdevice_notifier(&hard_if_notifier); + dev_add_pack(&batman_adv_packet_type); + + pr_info("B.A.T.M.A.N. advanced %s%s (compatibility version %i) " + "loaded\n", SOURCE_VERSION, REVISION_VERSION_STR, + COMPAT_VERSION); + + return 0; + +unreg_sysfs: + sysfs_del_meshif(soft_device); +unreg_soft_device: + unregister_netdev(soft_device); + soft_device = NULL; + return -ENOMEM; + +free_soft_device: + free_netdev(soft_device); + soft_device = NULL; +end: + return -ENOMEM; +} + +static void __exit batman_exit(void) +{ + deactivate_module(); + + debugfs_destroy(); + unregister_netdevice_notifier(&hard_if_notifier); + hardif_remove_interfaces(); + + if (soft_device) { + debugfs_del_meshif(soft_device); + sysfs_del_meshif(soft_device); + unregister_netdev(soft_device); + soft_device = NULL; + } + + dev_remove_pack(&batman_adv_packet_type); + + destroy_workqueue(bat_event_workqueue); + bat_event_workqueue = NULL; +} + +/* activates the module, starts timer ... */ +void activate_module(void) +{ + if (originator_init() < 1) + goto err; + + if (hna_local_init() < 1) + goto err; + + if (hna_global_init() < 1) + goto err; + + hna_local_add(soft_device->dev_addr); + + if (vis_init() < 1) + goto err; + + update_min_mtu(); + atomic_set(&module_state, MODULE_ACTIVE); + goto end; + +err: + pr_err("Unable to allocate memory for mesh information structures: " + "out of mem ?\n"); + deactivate_module(); +end: + return; +} + +/* shuts down the whole module.*/ +void deactivate_module(void) +{ + atomic_set(&module_state, MODULE_DEACTIVATING); + + purge_outstanding_packets(NULL); + flush_workqueue(bat_event_workqueue); + + vis_quit(); + + /* TODO: unregister BATMAN pack */ + + originator_free(); + + hna_local_free(); + hna_global_free(); + + synchronize_net(); + + synchronize_rcu(); + atomic_set(&module_state, MODULE_INACTIVE); +} + +void inc_module_count(void) +{ + try_module_get(THIS_MODULE); +} + +void dec_module_count(void) +{ + module_put(THIS_MODULE); +} + +int addr_to_string(char *buff, uint8_t *addr) +{ + return sprintf(buff, "%pM", addr); +} + +/* returns 1 if they are the same originator */ + +int compare_orig(void *data1, void *data2) +{ + return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); +} + +/* hashfunction to choose an entry in a hash table of given size */ +/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ +int choose_orig(void *data, int32_t size) +{ + unsigned char *key = data; + uint32_t hash = 0; + size_t i; + + for (i = 0; i < 6; i++) { + hash += key[i]; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash % size; +} + +int is_my_mac(uint8_t *addr) +{ + struct batman_if *batman_if; ++ + rcu_read_lock(); + list_for_each_entry_rcu(batman_if, &if_list, list) { - if ((batman_if->net_dev) && - (compare_orig(batman_if->net_dev->dev_addr, addr))) { ++ if (batman_if->if_status != IF_ACTIVE) ++ continue; ++ ++ if (compare_orig(batman_if->net_dev->dev_addr, addr)) { + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + return 0; + +} + +int is_bcast(uint8_t *addr) +{ + return (addr[0] == (uint8_t)0xff) && (addr[1] == (uint8_t)0xff); +} + +int is_mcast(uint8_t *addr) +{ + return *addr & 0x01; +} + +module_init(batman_init); +module_exit(batman_exit); + +MODULE_LICENSE("GPL"); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE); +#ifdef REVISION_VERSION +MODULE_VERSION(SOURCE_VERSION "-" REVISION_VERSION); +#else +MODULE_VERSION(SOURCE_VERSION); +#endif diff --combined drivers/staging/batman-adv/originator.c index 28bb627,0000000..de5a8c1 mode 100644,000000..100644 --- a/drivers/staging/batman-adv/originator.c +++ b/drivers/staging/batman-adv/originator.c @@@ -1,509 -1,0 +1,511 @@@ +/* + * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +/* increase the reference counter for this originator */ + +#include "main.h" +#include "originator.h" +#include "hash.h" +#include "translation-table.h" +#include "routing.h" +#include "hard-interface.h" + +static DECLARE_DELAYED_WORK(purge_orig_wq, purge_orig); + +static void start_purge_timer(void) +{ + queue_delayed_work(bat_event_workqueue, &purge_orig_wq, 1 * HZ); +} + +int originator_init(void) +{ + unsigned long flags; + if (orig_hash) + return 1; + + spin_lock_irqsave(&orig_hash_lock, flags); + orig_hash = hash_new(128, compare_orig, choose_orig); + + if (!orig_hash) + goto err; + + spin_unlock_irqrestore(&orig_hash_lock, flags); + start_purge_timer(); + return 1; + +err: + spin_unlock_irqrestore(&orig_hash_lock, flags); + return 0; +} + +struct neigh_node * +create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node, + uint8_t *neigh, struct batman_if *if_incoming) +{ + /* FIXME: each orig_node->batman_if will be attached to a softif */ + struct bat_priv *bat_priv = netdev_priv(soft_device); + struct neigh_node *neigh_node; + + bat_dbg(DBG_BATMAN, bat_priv, + "Creating new last-hop neighbor of originator\n"); + + neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC); + if (!neigh_node) + return NULL; + + INIT_LIST_HEAD(&neigh_node->list); + + memcpy(neigh_node->addr, neigh, ETH_ALEN); + neigh_node->orig_node = orig_neigh_node; + neigh_node->if_incoming = if_incoming; + + list_add_tail(&neigh_node->list, &orig_node->neigh_list); + return neigh_node; +} + +static void free_orig_node(void *data) +{ + struct list_head *list_pos, *list_pos_tmp; + struct neigh_node *neigh_node; + struct orig_node *orig_node = (struct orig_node *)data; + + /* for all neighbors towards this originator ... */ + list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) { + neigh_node = list_entry(list_pos, struct neigh_node, list); + + list_del(list_pos); + kfree(neigh_node); + } + + hna_global_del_orig(orig_node, "originator timed out"); + + kfree(orig_node->bcast_own); + kfree(orig_node->bcast_own_sum); + kfree(orig_node); +} + +void originator_free(void) +{ + unsigned long flags; + + if (!orig_hash) + return; + + cancel_delayed_work_sync(&purge_orig_wq); + + spin_lock_irqsave(&orig_hash_lock, flags); + hash_delete(orig_hash, free_orig_node); + orig_hash = NULL; + spin_unlock_irqrestore(&orig_hash_lock, flags); +} + +/* this function finds or creates an originator entry for the given + * address if it does not exits */ +struct orig_node *get_orig_node(uint8_t *addr) +{ + /* FIXME: each batman_if will be attached to a softif */ + struct bat_priv *bat_priv = netdev_priv(soft_device); + struct orig_node *orig_node; + struct hashtable_t *swaphash; + int size; + + orig_node = ((struct orig_node *)hash_find(orig_hash, addr)); + + if (orig_node != NULL) + return orig_node; + + bat_dbg(DBG_BATMAN, bat_priv, + "Creating new originator: %pM\n", addr); + + orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC); + if (!orig_node) + return NULL; + + INIT_LIST_HEAD(&orig_node->neigh_list); + + memcpy(orig_node->orig, addr, ETH_ALEN); + orig_node->router = NULL; + orig_node->hna_buff = NULL; + orig_node->bcast_seqno_reset = jiffies - 1 + - msecs_to_jiffies(RESET_PROTECTION_MS); + orig_node->batman_seqno_reset = jiffies - 1 + - msecs_to_jiffies(RESET_PROTECTION_MS); + + size = bat_priv->num_ifaces * sizeof(TYPE_OF_WORD) * NUM_WORDS; + + orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); + if (!orig_node->bcast_own) + goto free_orig_node; + + size = bat_priv->num_ifaces * sizeof(uint8_t); + orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC); + if (!orig_node->bcast_own_sum) + goto free_bcast_own; + + if (hash_add(orig_hash, orig_node) < 0) + goto free_bcast_own_sum; + + if (orig_hash->elements * 4 > orig_hash->size) { + swaphash = hash_resize(orig_hash, orig_hash->size * 2); + + if (swaphash == NULL) + bat_err(soft_device, + "Couldn't resize orig hash table\n"); + else + orig_hash = swaphash; + } + + return orig_node; +free_bcast_own_sum: + kfree(orig_node->bcast_own_sum); +free_bcast_own: + kfree(orig_node->bcast_own); +free_orig_node: + kfree(orig_node); + return NULL; +} + +static bool purge_orig_neighbors(struct orig_node *orig_node, + struct neigh_node **best_neigh_node) +{ + /* FIXME: each orig_node->batman_if will be attached to a softif */ + struct bat_priv *bat_priv = netdev_priv(soft_device); + struct list_head *list_pos, *list_pos_tmp; + struct neigh_node *neigh_node; + bool neigh_purged = false; + + *best_neigh_node = NULL; + + /* for all neighbors towards this originator ... */ + list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) { + neigh_node = list_entry(list_pos, struct neigh_node, list); + + if ((time_after(jiffies, + neigh_node->last_valid + PURGE_TIMEOUT * HZ)) || + (neigh_node->if_incoming->if_status == + IF_TO_BE_REMOVED)) { + + if (neigh_node->if_incoming->if_status == + IF_TO_BE_REMOVED) + bat_dbg(DBG_BATMAN, bat_priv, + "neighbor purge: originator %pM, " + "neighbor: %pM, iface: %s\n", + orig_node->orig, neigh_node->addr, + neigh_node->if_incoming->dev); + else + bat_dbg(DBG_BATMAN, bat_priv, + "neighbor timeout: originator %pM, " + "neighbor: %pM, last_valid: %lu\n", + orig_node->orig, neigh_node->addr, + (neigh_node->last_valid / HZ)); + + neigh_purged = true; + list_del(list_pos); + kfree(neigh_node); + } else { + if ((*best_neigh_node == NULL) || + (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) + *best_neigh_node = neigh_node; + } + } + return neigh_purged; +} + +static bool purge_orig_node(struct orig_node *orig_node) +{ + /* FIXME: each batman_if will be attached to a softif */ + struct bat_priv *bat_priv = netdev_priv(soft_device); + struct neigh_node *best_neigh_node; + + if (time_after(jiffies, + orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) { + + bat_dbg(DBG_BATMAN, bat_priv, + "Originator timeout: originator %pM, last_valid %lu\n", + orig_node->orig, (orig_node->last_valid / HZ)); + return true; + } else { + if (purge_orig_neighbors(orig_node, &best_neigh_node)) { + update_routes(orig_node, best_neigh_node, + orig_node->hna_buff, + orig_node->hna_buff_len); + /* update bonding candidates, we could have lost + * some candidates. */ + update_bonding_candidates(bat_priv, orig_node); + } + } + + return false; +} + +void purge_orig(struct work_struct *work) +{ + HASHIT(hashit); + struct orig_node *orig_node; + unsigned long flags; + + spin_lock_irqsave(&orig_hash_lock, flags); + + /* for all origins... */ + while (hash_iterate(orig_hash, &hashit)) { + orig_node = hashit.bucket->data; + if (purge_orig_node(orig_node)) { + hash_remove_bucket(orig_hash, &hashit); + free_orig_node(orig_node); + } + } + + spin_unlock_irqrestore(&orig_hash_lock, flags); + + /* if work == NULL we were not called by the timer + * and thus do not need to re-arm the timer */ + if (work) + start_purge_timer(); +} + +int orig_seq_print_text(struct seq_file *seq, void *offset) +{ + HASHIT(hashit); + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct orig_node *orig_node; + struct neigh_node *neigh_node; + int batman_count = 0; + int last_seen_secs; + int last_seen_msecs; + unsigned long flags; + char orig_str[ETH_STR_LEN], router_str[ETH_STR_LEN]; + + if ((!bat_priv->primary_if) || + (bat_priv->primary_if->if_status != IF_ACTIVE)) { + if (!bat_priv->primary_if) + return seq_printf(seq, "BATMAN mesh %s disabled - " + "please specify interfaces to enable it\n", + net_dev->name); + + return seq_printf(seq, "BATMAN mesh %s " + "disabled - primary interface not active\n", + net_dev->name); + } + + rcu_read_lock(); + seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s (%s)]\n", + SOURCE_VERSION, REVISION_VERSION_STR, + bat_priv->primary_if->dev, bat_priv->primary_if->addr_str, + net_dev->name); + seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", + "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", + "outgoingIF", "Potential nexthops"); + rcu_read_unlock(); + + spin_lock_irqsave(&orig_hash_lock, flags); + + while (hash_iterate(orig_hash, &hashit)) { + + orig_node = hashit.bucket->data; + + if (!orig_node->router) + continue; + + if (orig_node->router->tq_avg == 0) + continue; + + addr_to_string(orig_str, orig_node->orig); + addr_to_string(router_str, orig_node->router->addr); + last_seen_secs = jiffies_to_msecs(jiffies - + orig_node->last_valid) / 1000; + last_seen_msecs = jiffies_to_msecs(jiffies - + orig_node->last_valid) % 1000; + + seq_printf(seq, "%-17s %4i.%03is (%3i) %17s [%10s]:", + orig_str, last_seen_secs, last_seen_msecs, + orig_node->router->tq_avg, router_str, + orig_node->router->if_incoming->dev); + + list_for_each_entry(neigh_node, &orig_node->neigh_list, list) { + addr_to_string(orig_str, neigh_node->addr); + seq_printf(seq, " %17s (%3i)", orig_str, + neigh_node->tq_avg); + } + + seq_printf(seq, "\n"); + batman_count++; + } + + spin_unlock_irqrestore(&orig_hash_lock, flags); + + if ((batman_count == 0)) + seq_printf(seq, "No batman nodes in range ...\n"); + + return 0; +} + +static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) +{ + void *data_ptr; + + data_ptr = kmalloc(max_if_num * sizeof(TYPE_OF_WORD) * NUM_WORDS, + GFP_ATOMIC); + if (!data_ptr) { + pr_err("Can't resize orig: out of memory\n"); + return -1; + } + + memcpy(data_ptr, orig_node->bcast_own, + (max_if_num - 1) * sizeof(TYPE_OF_WORD) * NUM_WORDS); + kfree(orig_node->bcast_own); + orig_node->bcast_own = data_ptr; + + data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); + if (!data_ptr) { + pr_err("Can't resize orig: out of memory\n"); + return -1; + } + + memcpy(data_ptr, orig_node->bcast_own_sum, + (max_if_num - 1) * sizeof(uint8_t)); + kfree(orig_node->bcast_own_sum); + orig_node->bcast_own_sum = data_ptr; + + return 0; +} + +int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) +{ + struct orig_node *orig_node; ++ unsigned long flags; + HASHIT(hashit); + + /* resize all orig nodes because orig_node->bcast_own(_sum) depend on + * if_num */ - spin_lock(&orig_hash_lock); ++ spin_lock_irqsave(&orig_hash_lock, flags); + + while (hash_iterate(orig_hash, &hashit)) { + orig_node = hashit.bucket->data; + + if (orig_node_add_if(orig_node, max_if_num) == -1) + goto err; + } + - spin_unlock(&orig_hash_lock); ++ spin_unlock_irqrestore(&orig_hash_lock, flags); + return 0; + +err: - spin_unlock(&orig_hash_lock); ++ spin_unlock_irqrestore(&orig_hash_lock, flags); + return -ENOMEM; +} + +static int orig_node_del_if(struct orig_node *orig_node, + int max_if_num, int del_if_num) +{ + void *data_ptr = NULL; + int chunk_size; + + /* last interface was removed */ + if (max_if_num == 0) + goto free_bcast_own; + + chunk_size = sizeof(TYPE_OF_WORD) * NUM_WORDS; + data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC); + if (!data_ptr) { + pr_err("Can't resize orig: out of memory\n"); + return -1; + } + + /* copy first part */ + memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); + + /* copy second part */ + memcpy(data_ptr, + orig_node->bcast_own + ((del_if_num + 1) * chunk_size), + (max_if_num - del_if_num) * chunk_size); + +free_bcast_own: + kfree(orig_node->bcast_own); + orig_node->bcast_own = data_ptr; + + if (max_if_num == 0) + goto free_own_sum; + + data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); + if (!data_ptr) { + pr_err("Can't resize orig: out of memory\n"); + return -1; + } + + memcpy(data_ptr, orig_node->bcast_own_sum, + del_if_num * sizeof(uint8_t)); + + memcpy(data_ptr, + orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)), + (max_if_num - del_if_num) * sizeof(uint8_t)); + +free_own_sum: + kfree(orig_node->bcast_own_sum); + orig_node->bcast_own_sum = data_ptr; + + return 0; +} + +int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) +{ + struct batman_if *batman_if_tmp; + struct orig_node *orig_node; ++ unsigned long flags; + HASHIT(hashit); + int ret; + + /* resize all orig nodes because orig_node->bcast_own(_sum) depend on + * if_num */ - spin_lock(&orig_hash_lock); ++ spin_lock_irqsave(&orig_hash_lock, flags); + + while (hash_iterate(orig_hash, &hashit)) { + orig_node = hashit.bucket->data; + + ret = orig_node_del_if(orig_node, max_if_num, + batman_if->if_num); + + if (ret == -1) + goto err; + } + + /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ + rcu_read_lock(); + list_for_each_entry_rcu(batman_if_tmp, &if_list, list) { + if (batman_if_tmp->if_status == IF_NOT_IN_USE) + continue; + + if (batman_if == batman_if_tmp) + continue; + + if (batman_if_tmp->if_num > batman_if->if_num) + batman_if_tmp->if_num--; + } + rcu_read_unlock(); + + batman_if->if_num = -1; - spin_unlock(&orig_hash_lock); ++ spin_unlock_irqrestore(&orig_hash_lock, flags); + return 0; + +err: - spin_unlock(&orig_hash_lock); ++ spin_unlock_irqrestore(&orig_hash_lock, flags); + return -ENOMEM; +} diff --combined drivers/staging/batman-adv/routing.c index 066cc91,0000000..032195e mode 100644,000000..100644 --- a/drivers/staging/batman-adv/routing.c +++ b/drivers/staging/batman-adv/routing.c @@@ -1,1305 -1,0 +1,1317 @@@ +/* + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "routing.h" +#include "send.h" +#include "hash.h" +#include "soft-interface.h" +#include "hard-interface.h" +#include "icmp_socket.h" +#include "translation-table.h" +#include "originator.h" +#include "types.h" +#include "ring_buffer.h" +#include "vis.h" +#include "aggregation.h" + +static DECLARE_WAIT_QUEUE_HEAD(thread_wait); + +void slide_own_bcast_window(struct batman_if *batman_if) +{ + HASHIT(hashit); + struct orig_node *orig_node; + TYPE_OF_WORD *word; + unsigned long flags; + + spin_lock_irqsave(&orig_hash_lock, flags); + + while (hash_iterate(orig_hash, &hashit)) { + orig_node = hashit.bucket->data; + word = &(orig_node->bcast_own[batman_if->if_num * NUM_WORDS]); + + bit_get_packet(word, 1, 0); + orig_node->bcast_own_sum[batman_if->if_num] = + bit_packet_count(word); + } + + spin_unlock_irqrestore(&orig_hash_lock, flags); +} + +static void update_HNA(struct orig_node *orig_node, + unsigned char *hna_buff, int hna_buff_len) +{ + if ((hna_buff_len != orig_node->hna_buff_len) || + ((hna_buff_len > 0) && + (orig_node->hna_buff_len > 0) && + (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) { + + if (orig_node->hna_buff_len > 0) + hna_global_del_orig(orig_node, + "originator changed hna"); + + if ((hna_buff_len > 0) && (hna_buff != NULL)) + hna_global_add_orig(orig_node, hna_buff, hna_buff_len); + } +} + +static void update_route(struct orig_node *orig_node, + struct neigh_node *neigh_node, + unsigned char *hna_buff, int hna_buff_len) +{ + /* FIXME: each orig_node->batman_if will be attached to a softif */ + struct bat_priv *bat_priv = netdev_priv(soft_device); + + /* route deleted */ + if ((orig_node->router != NULL) && (neigh_node == NULL)) { + + bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", + orig_node->orig); + hna_global_del_orig(orig_node, "originator timed out"); + + /* route added */ + } else if ((orig_node->router == NULL) && (neigh_node != NULL)) { + + bat_dbg(DBG_ROUTES, bat_priv, + "Adding route towards: %pM (via %pM)\n", + orig_node->orig, neigh_node->addr); + hna_global_add_orig(orig_node, hna_buff, hna_buff_len); + + /* route changed */ + } else { + bat_dbg(DBG_ROUTES, bat_priv, + "Changing route towards: %pM " + "(now via %pM - was via %pM)\n", + orig_node->orig, neigh_node->addr, + orig_node->router->addr); + } + + orig_node->router = neigh_node; +} + + +void update_routes(struct orig_node *orig_node, + struct neigh_node *neigh_node, + unsigned char *hna_buff, int hna_buff_len) +{ + + if (orig_node == NULL) + return; + + if (orig_node->router != neigh_node) + update_route(orig_node, neigh_node, hna_buff, hna_buff_len); + /* may be just HNA changed */ + else + update_HNA(orig_node, hna_buff, hna_buff_len); +} + +static int is_bidirectional_neigh(struct orig_node *orig_node, + struct orig_node *orig_neigh_node, + struct batman_packet *batman_packet, + struct batman_if *if_incoming) +{ + /* FIXME: each orig_node->batman_if will be attached to a softif */ + struct bat_priv *bat_priv = netdev_priv(soft_device); + struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; + unsigned char total_count; + + if (orig_node == orig_neigh_node) { + list_for_each_entry(tmp_neigh_node, + &orig_node->neigh_list, + list) { + + if (compare_orig(tmp_neigh_node->addr, + orig_neigh_node->orig) && + (tmp_neigh_node->if_incoming == if_incoming)) + neigh_node = tmp_neigh_node; + } + + if (!neigh_node) + neigh_node = create_neighbor(orig_node, + orig_neigh_node, + orig_neigh_node->orig, + if_incoming); + /* create_neighbor failed, return 0 */ + if (!neigh_node) + return 0; + + neigh_node->last_valid = jiffies; + } else { + /* find packet count of corresponding one hop neighbor */ + list_for_each_entry(tmp_neigh_node, + &orig_neigh_node->neigh_list, list) { + + if (compare_orig(tmp_neigh_node->addr, + orig_neigh_node->orig) && + (tmp_neigh_node->if_incoming == if_incoming)) + neigh_node = tmp_neigh_node; + } + + if (!neigh_node) + neigh_node = create_neighbor(orig_neigh_node, + orig_neigh_node, + orig_neigh_node->orig, + if_incoming); + /* create_neighbor failed, return 0 */ + if (!neigh_node) + return 0; + } + + orig_node->last_valid = jiffies; + + /* pay attention to not get a value bigger than 100 % */ + total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] > + neigh_node->real_packet_count ? + neigh_node->real_packet_count : + orig_neigh_node->bcast_own_sum[if_incoming->if_num]); + + /* if we have too few packets (too less data) we set tq_own to zero */ + /* if we receive too few packets it is not considered bidirectional */ + if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) || + (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM)) + orig_neigh_node->tq_own = 0; + else + /* neigh_node->real_packet_count is never zero as we + * only purge old information when getting new + * information */ + orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) / + neigh_node->real_packet_count; + + /* + * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does + * affect the nearly-symmetric links only a little, but + * punishes asymmetric links more. This will give a value + * between 0 and TQ_MAX_VALUE + */ + orig_neigh_node->tq_asym_penalty = + TQ_MAX_VALUE - + (TQ_MAX_VALUE * + (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) * + (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) * + (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) / + (TQ_LOCAL_WINDOW_SIZE * + TQ_LOCAL_WINDOW_SIZE * + TQ_LOCAL_WINDOW_SIZE); + + batman_packet->tq = ((batman_packet->tq * + orig_neigh_node->tq_own * + orig_neigh_node->tq_asym_penalty) / + (TQ_MAX_VALUE * TQ_MAX_VALUE)); + + bat_dbg(DBG_BATMAN, bat_priv, + "bidirectional: " + "orig = %-15pM neigh = %-15pM => own_bcast = %2i, " + "real recv = %2i, local tq: %3i, asym_penalty: %3i, " + "total tq: %3i\n", + orig_node->orig, orig_neigh_node->orig, total_count, + neigh_node->real_packet_count, orig_neigh_node->tq_own, + orig_neigh_node->tq_asym_penalty, batman_packet->tq); + + /* if link has the minimum required transmission quality + * consider it bidirectional */ + if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT) + return 1; + + return 0; +} + +static void update_orig(struct orig_node *orig_node, struct ethhdr *ethhdr, + struct batman_packet *batman_packet, + struct batman_if *if_incoming, + unsigned char *hna_buff, int hna_buff_len, + char is_duplicate) +{ + /* FIXME: get bat_priv */ + struct bat_priv *bat_priv = netdev_priv(soft_device); + struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; + int tmp_hna_buff_len; + + bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " + "Searching and updating originator entry of received packet\n"); + + list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { + if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) && + (tmp_neigh_node->if_incoming == if_incoming)) { + neigh_node = tmp_neigh_node; + continue; + } + + if (is_duplicate) + continue; + + ring_buffer_set(tmp_neigh_node->tq_recv, + &tmp_neigh_node->tq_index, 0); + tmp_neigh_node->tq_avg = + ring_buffer_avg(tmp_neigh_node->tq_recv); + } + + if (!neigh_node) { + struct orig_node *orig_tmp; + + orig_tmp = get_orig_node(ethhdr->h_source); + if (!orig_tmp) + return; + + neigh_node = create_neighbor(orig_node, + orig_tmp, + ethhdr->h_source, if_incoming); + if (!neigh_node) + return; + } else + bat_dbg(DBG_BATMAN, bat_priv, + "Updating existing last-hop neighbor of originator\n"); + + orig_node->flags = batman_packet->flags; + neigh_node->last_valid = jiffies; + + ring_buffer_set(neigh_node->tq_recv, + &neigh_node->tq_index, + batman_packet->tq); + neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); + + if (!is_duplicate) { + orig_node->last_ttl = batman_packet->ttl; + neigh_node->last_ttl = batman_packet->ttl; + } + + tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ? + batman_packet->num_hna * ETH_ALEN : hna_buff_len); + + /* if this neighbor already is our next hop there is nothing + * to change */ + if (orig_node->router == neigh_node) + goto update_hna; + + /* if this neighbor does not offer a better TQ we won't consider it */ + if ((orig_node->router) && + (orig_node->router->tq_avg > neigh_node->tq_avg)) + goto update_hna; + + /* if the TQ is the same and the link not more symetric we + * won't consider it either */ + if ((orig_node->router) && + ((neigh_node->tq_avg == orig_node->router->tq_avg) && + (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num] + >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num]))) + goto update_hna; + + update_routes(orig_node, neigh_node, hna_buff, tmp_hna_buff_len); + return; + +update_hna: + update_routes(orig_node, orig_node->router, hna_buff, tmp_hna_buff_len); +} + +/* checks whether the host restarted and is in the protection time. + * returns: + * 0 if the packet is to be accepted + * 1 if the packet is to be ignored. + */ +static int window_protected(int32_t seq_num_diff, + unsigned long *last_reset) +{ + /* FIXME: each orig_node->batman_if will be attached to a softif */ + struct bat_priv *bat_priv = netdev_priv(soft_device); + + if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) + || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { + if (time_after(jiffies, *last_reset + + msecs_to_jiffies(RESET_PROTECTION_MS))) { + + *last_reset = jiffies; + bat_dbg(DBG_BATMAN, bat_priv, + "old packet received, start protection\n"); + + return 0; + } else + return 1; + } + return 0; +} + +/* processes a batman packet for all interfaces, adjusts the sequence number and + * finds out whether it is a duplicate. + * returns: + * 1 the packet is a duplicate + * 0 the packet has not yet been received + * -1 the packet is old and has been received while the seqno window + * was protected. Caller should drop it. + */ +static char count_real_packets(struct ethhdr *ethhdr, + struct batman_packet *batman_packet, + struct batman_if *if_incoming) +{ + /* FIXME: each orig_node->batman_if will be attached to a softif */ + struct bat_priv *bat_priv = netdev_priv(soft_device); + struct orig_node *orig_node; + struct neigh_node *tmp_neigh_node; + char is_duplicate = 0; + int32_t seq_diff; + int need_update = 0; + int set_mark; + + orig_node = get_orig_node(batman_packet->orig); + if (orig_node == NULL) + return 0; + + seq_diff = batman_packet->seqno - orig_node->last_real_seqno; + + /* signalize caller that the packet is to be dropped. */ + if (window_protected(seq_diff, &orig_node->batman_seqno_reset)) + return -1; + + list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { + + is_duplicate |= get_bit_status(tmp_neigh_node->real_bits, + orig_node->last_real_seqno, + batman_packet->seqno); + + if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) && + (tmp_neigh_node->if_incoming == if_incoming)) + set_mark = 1; + else + set_mark = 0; + + /* if the window moved, set the update flag. */ + need_update |= bit_get_packet(tmp_neigh_node->real_bits, + seq_diff, set_mark); + + tmp_neigh_node->real_packet_count = + bit_packet_count(tmp_neigh_node->real_bits); + } + + if (need_update) { + bat_dbg(DBG_BATMAN, bat_priv, + "updating last_seqno: old %d, new %d\n", + orig_node->last_real_seqno, batman_packet->seqno); + orig_node->last_real_seqno = batman_packet->seqno; + } + + return is_duplicate; +} + +/* copy primary address for bonding */ +static void mark_bonding_address(struct bat_priv *bat_priv, + struct orig_node *orig_node, + struct orig_node *orig_neigh_node, + struct batman_packet *batman_packet) + +{ + if (batman_packet->flags & PRIMARIES_FIRST_HOP) + memcpy(orig_neigh_node->primary_addr, + orig_node->orig, ETH_ALEN); + + return; +} + +/* mark possible bond.candidates in the neighbor list */ +void update_bonding_candidates(struct bat_priv *bat_priv, + struct orig_node *orig_node) +{ + int candidates; + int interference_candidate; + int best_tq; + struct neigh_node *tmp_neigh_node, *tmp_neigh_node2; + struct neigh_node *first_candidate, *last_candidate; + + /* update the candidates for this originator */ + if (!orig_node->router) { + orig_node->bond.candidates = 0; + return; + } + + best_tq = orig_node->router->tq_avg; + + /* update bond.candidates */ + + candidates = 0; + + /* mark other nodes which also received "PRIMARIES FIRST HOP" packets + * as "bonding partner" */ + + /* first, zero the list */ + list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { + tmp_neigh_node->next_bond_candidate = NULL; + } + + first_candidate = NULL; + last_candidate = NULL; + list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { + + /* only consider if it has the same primary address ... */ + if (memcmp(orig_node->orig, + tmp_neigh_node->orig_node->primary_addr, + ETH_ALEN) != 0) + continue; + + /* ... and is good enough to be considered */ + if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD) + continue; + + /* check if we have another candidate with the same + * mac address or interface. If we do, we won't + * select this candidate because of possible interference. */ + + interference_candidate = 0; + list_for_each_entry(tmp_neigh_node2, + &orig_node->neigh_list, list) { + + if (tmp_neigh_node2 == tmp_neigh_node) + continue; + + /* we only care if the other candidate is even + * considered as candidate. */ + if (tmp_neigh_node2->next_bond_candidate == NULL) + continue; + + + if ((tmp_neigh_node->if_incoming == + tmp_neigh_node2->if_incoming) + || (memcmp(tmp_neigh_node->addr, + tmp_neigh_node2->addr, ETH_ALEN) == 0)) { + + interference_candidate = 1; + break; + } + } + /* don't care further if it is an interference candidate */ + if (interference_candidate) + continue; + + if (first_candidate == NULL) { + first_candidate = tmp_neigh_node; + tmp_neigh_node->next_bond_candidate = first_candidate; + } else + tmp_neigh_node->next_bond_candidate = last_candidate; + + last_candidate = tmp_neigh_node; + + candidates++; + } + + if (candidates > 0) { + first_candidate->next_bond_candidate = last_candidate; + orig_node->bond.selected = first_candidate; + } + + orig_node->bond.candidates = candidates; +} + +void receive_bat_packet(struct ethhdr *ethhdr, + struct batman_packet *batman_packet, + unsigned char *hna_buff, int hna_buff_len, + struct batman_if *if_incoming) +{ + /* FIXME: each orig_node->batman_if will be attached to a softif */ + struct bat_priv *bat_priv = netdev_priv(soft_device); + struct batman_if *batman_if; + struct orig_node *orig_neigh_node, *orig_node; + char has_directlink_flag; + char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; + char is_broadcast = 0, is_bidirectional, is_single_hop_neigh; + char is_duplicate; + uint32_t if_incoming_seqno; + + /* Silently drop when the batman packet is actually not a + * correct packet. + * + * This might happen if a packet is padded (e.g. Ethernet has a + * minimum frame length of 64 byte) and the aggregation interprets + * it as an additional length. + * + * TODO: A more sane solution would be to have a bit in the + * batman_packet to detect whether the packet is the last + * packet in an aggregation. Here we expect that the padding + * is always zero (or not 0x01) + */ + if (batman_packet->packet_type != BAT_PACKET) + return; + + /* could be changed by schedule_own_packet() */ + if_incoming_seqno = atomic_read(&if_incoming->seqno); + + has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0); + + is_single_hop_neigh = (compare_orig(ethhdr->h_source, + batman_packet->orig) ? 1 : 0); + + bat_dbg(DBG_BATMAN, bat_priv, + "Received BATMAN packet via NB: %pM, IF: %s [%s] " + "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, " + "TTL %d, V %d, IDF %d)\n", + ethhdr->h_source, if_incoming->dev, if_incoming->addr_str, + batman_packet->orig, batman_packet->prev_sender, + batman_packet->seqno, batman_packet->tq, batman_packet->ttl, + batman_packet->version, has_directlink_flag); + + list_for_each_entry_rcu(batman_if, &if_list, list) { + if (batman_if->if_status != IF_ACTIVE) + continue; + + if (compare_orig(ethhdr->h_source, + batman_if->net_dev->dev_addr)) + is_my_addr = 1; + + if (compare_orig(batman_packet->orig, + batman_if->net_dev->dev_addr)) + is_my_orig = 1; + + if (compare_orig(batman_packet->prev_sender, + batman_if->net_dev->dev_addr)) + is_my_oldorig = 1; + + if (compare_orig(ethhdr->h_source, broadcast_addr)) + is_broadcast = 1; + } + + if (batman_packet->version != COMPAT_VERSION) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: incompatible batman version (%i)\n", + batman_packet->version); + return; + } + + if (is_my_addr) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: received my own broadcast (sender: %pM" + ")\n", + ethhdr->h_source); + return; + } + + if (is_broadcast) { + bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " + "ignoring all packets with broadcast source addr (sender: %pM" + ")\n", ethhdr->h_source); + return; + } + + if (is_my_orig) { + TYPE_OF_WORD *word; + int offset; + + orig_neigh_node = get_orig_node(ethhdr->h_source); + + if (!orig_neigh_node) + return; + + /* neighbor has to indicate direct link and it has to + * come via the corresponding interface */ + /* if received seqno equals last send seqno save new + * seqno for bidirectional check */ + if (has_directlink_flag && + compare_orig(if_incoming->net_dev->dev_addr, + batman_packet->orig) && + (batman_packet->seqno - if_incoming_seqno + 2 == 0)) { + offset = if_incoming->if_num * NUM_WORDS; + word = &(orig_neigh_node->bcast_own[offset]); + bit_mark(word, 0); + orig_neigh_node->bcast_own_sum[if_incoming->if_num] = + bit_packet_count(word); + } + + bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " + "originator packet from myself (via neighbor)\n"); + return; + } + + if (is_my_oldorig) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all rebroadcast echos (sender: " + "%pM)\n", ethhdr->h_source); + return; + } + + orig_node = get_orig_node(batman_packet->orig); + if (orig_node == NULL) + return; + + is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming); + + if (is_duplicate == -1) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: packet within seqno protection time " + "(sender: %pM)\n", ethhdr->h_source); + return; + } + + if (batman_packet->tq == 0) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: originator packet with tq equal 0\n"); + return; + } + + /* avoid temporary routing loops */ + if ((orig_node->router) && + (orig_node->router->orig_node->router) && + (compare_orig(orig_node->router->addr, + batman_packet->prev_sender)) && + !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) && + (compare_orig(orig_node->router->addr, + orig_node->router->orig_node->router->addr))) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all rebroadcast packets that " + "may make me loop (sender: %pM)\n", ethhdr->h_source); + return; + } + + /* if sender is a direct neighbor the sender mac equals + * originator mac */ + orig_neigh_node = (is_single_hop_neigh ? + orig_node : get_orig_node(ethhdr->h_source)); + if (orig_neigh_node == NULL) + return; + + /* drop packet if sender is not a direct neighbor and if we + * don't route towards it */ + if (!is_single_hop_neigh && + (orig_neigh_node->router == NULL)) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: OGM via unknown neighbor!\n"); + return; + } + + is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node, + batman_packet, if_incoming); + + /* update ranking if it is not a duplicate or has the same + * seqno and similar ttl as the non-duplicate */ + if (is_bidirectional && + (!is_duplicate || + ((orig_node->last_real_seqno == batman_packet->seqno) && + (orig_node->last_ttl - 3 <= batman_packet->ttl)))) + update_orig(orig_node, ethhdr, batman_packet, + if_incoming, hna_buff, hna_buff_len, is_duplicate); + + mark_bonding_address(bat_priv, orig_node, + orig_neigh_node, batman_packet); + update_bonding_candidates(bat_priv, orig_node); + + /* is single hop (direct) neighbor */ + if (is_single_hop_neigh) { + + /* mark direct link on incoming interface */ + schedule_forward_packet(orig_node, ethhdr, batman_packet, + 1, hna_buff_len, if_incoming); + + bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " + "rebroadcast neighbor packet with direct link flag\n"); + return; + } + + /* multihop originator */ + if (!is_bidirectional) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: not received via bidirectional link\n"); + return; + } + + if (is_duplicate) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: duplicate packet received\n"); + return; + } + + bat_dbg(DBG_BATMAN, bat_priv, + "Forwarding packet: rebroadcast originator packet\n"); + schedule_forward_packet(orig_node, ethhdr, batman_packet, + 0, hna_buff_len, if_incoming); +} + +int recv_bat_packet(struct sk_buff *skb, + struct batman_if *batman_if) +{ + struct ethhdr *ethhdr; + unsigned long flags; + struct sk_buff *skb_old; + + /* drop packet if it has not necessary minimum size */ + if (skb_headlen(skb) < sizeof(struct batman_packet)) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with broadcast indication but unicast recipient */ + if (!is_bcast(ethhdr->h_dest)) + return NET_RX_DROP; + + /* packet with broadcast sender address */ + if (is_bcast(ethhdr->h_source)) + return NET_RX_DROP; + + /* TODO: we use headlen instead of "length", because + * only this data is paged in. */ + + /* create a copy of the skb, if needed, to modify it. */ + if (!skb_clone_writable(skb, skb_headlen(skb))) { + skb_old = skb; + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) + return NET_RX_DROP; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + kfree_skb(skb_old); + } + + spin_lock_irqsave(&orig_hash_lock, flags); + receive_aggr_bat_packet(ethhdr, + skb->data, + skb_headlen(skb), + batman_if); + spin_unlock_irqrestore(&orig_hash_lock, flags); + + kfree_skb(skb); + return NET_RX_SUCCESS; +} + +static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len) +{ ++ /* FIXME: each batman_if will be attached to a softif */ ++ struct bat_priv *bat_priv = netdev_priv(soft_device); + struct orig_node *orig_node; + struct icmp_packet_rr *icmp_packet; + struct ethhdr *ethhdr; + struct sk_buff *skb_old; + struct batman_if *batman_if; + int ret; + unsigned long flags; + uint8_t dstaddr[ETH_ALEN]; + + icmp_packet = (struct icmp_packet_rr *)skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* add data to device queue */ + if (icmp_packet->msg_type != ECHO_REQUEST) { + bat_socket_receive_packet(icmp_packet, icmp_len); + return NET_RX_DROP; + } + ++ if (!bat_priv->primary_if) ++ return NET_RX_DROP; ++ + /* answer echo request (ping) */ + /* get routing information */ + spin_lock_irqsave(&orig_hash_lock, flags); + orig_node = ((struct orig_node *)hash_find(orig_hash, + icmp_packet->orig)); + ret = NET_RX_DROP; + + if ((orig_node != NULL) && + (orig_node->router != NULL)) { + + /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + batman_if = orig_node->router->if_incoming; + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); + spin_unlock_irqrestore(&orig_hash_lock, flags); + + /* create a copy of the skb, if needed, to modify it. */ + skb_old = NULL; + if (!skb_clone_writable(skb, icmp_len)) { + skb_old = skb; + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) + return NET_RX_DROP; + icmp_packet = (struct icmp_packet_rr *)skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + kfree_skb(skb_old); + } + + memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); - memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); ++ memcpy(icmp_packet->orig, ++ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); + icmp_packet->msg_type = ECHO_REPLY; + icmp_packet->ttl = TTL; + + send_skb_packet(skb, batman_if, dstaddr); + ret = NET_RX_SUCCESS; + + } else + spin_unlock_irqrestore(&orig_hash_lock, flags); + + return ret; +} + +static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len) +{ ++ /* FIXME: each batman_if will be attached to a softif */ ++ struct bat_priv *bat_priv = netdev_priv(soft_device); + struct orig_node *orig_node; + struct icmp_packet *icmp_packet; + struct ethhdr *ethhdr; + struct sk_buff *skb_old; + struct batman_if *batman_if; + int ret; + unsigned long flags; + uint8_t dstaddr[ETH_ALEN]; + + icmp_packet = (struct icmp_packet *)skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* send TTL exceeded if packet is an echo request (traceroute) */ + if (icmp_packet->msg_type != ECHO_REQUEST) { + pr_warning("Warning - can't forward icmp packet from %pM to " + "%pM: ttl exceeded\n", icmp_packet->orig, + icmp_packet->dst); + return NET_RX_DROP; + } + ++ if (!bat_priv->primary_if) ++ return NET_RX_DROP; ++ + /* get routing information */ + spin_lock_irqsave(&orig_hash_lock, flags); + orig_node = ((struct orig_node *) + hash_find(orig_hash, icmp_packet->orig)); + ret = NET_RX_DROP; + + if ((orig_node != NULL) && + (orig_node->router != NULL)) { + + /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + batman_if = orig_node->router->if_incoming; + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); + spin_unlock_irqrestore(&orig_hash_lock, flags); + + /* create a copy of the skb, if needed, to modify it. */ + if (!skb_clone_writable(skb, icmp_len)) { + skb_old = skb; + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) + return NET_RX_DROP; + icmp_packet = (struct icmp_packet *) skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + kfree_skb(skb_old); + } + + memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); - memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); ++ memcpy(icmp_packet->orig, ++ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); + icmp_packet->msg_type = TTL_EXCEEDED; + icmp_packet->ttl = TTL; + + send_skb_packet(skb, batman_if, dstaddr); + ret = NET_RX_SUCCESS; + + } else + spin_unlock_irqrestore(&orig_hash_lock, flags); + + return ret; +} + + +int recv_icmp_packet(struct sk_buff *skb) +{ + struct icmp_packet_rr *icmp_packet; + struct ethhdr *ethhdr; + struct orig_node *orig_node; + struct sk_buff *skb_old; + struct batman_if *batman_if; + int hdr_size = sizeof(struct icmp_packet); + int ret; + unsigned long flags; + uint8_t dstaddr[ETH_ALEN]; + + /** + * we truncate all incoming icmp packets if they don't match our size + */ + if (skb_headlen(skb) >= sizeof(struct icmp_packet_rr)) + hdr_size = sizeof(struct icmp_packet_rr); + + /* drop packet if it has not necessary minimum size */ + if (skb_headlen(skb) < hdr_size) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with unicast indication but broadcast recipient */ + if (is_bcast(ethhdr->h_dest)) + return NET_RX_DROP; + + /* packet with broadcast sender address */ + if (is_bcast(ethhdr->h_source)) + return NET_RX_DROP; + + /* not for me */ + if (!is_my_mac(ethhdr->h_dest)) + return NET_RX_DROP; + + icmp_packet = (struct icmp_packet_rr *)skb->data; + + /* add record route information if not full */ + if ((hdr_size == sizeof(struct icmp_packet_rr)) && + (icmp_packet->rr_cur < BAT_RR_LEN)) { + memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]), + ethhdr->h_dest, ETH_ALEN); + icmp_packet->rr_cur++; + } + + /* packet for me */ + if (is_my_mac(icmp_packet->dst)) + return recv_my_icmp_packet(skb, hdr_size); + + /* TTL exceeded */ + if (icmp_packet->ttl < 2) + return recv_icmp_ttl_exceeded(skb, hdr_size); + + ret = NET_RX_DROP; + + /* get routing information */ + spin_lock_irqsave(&orig_hash_lock, flags); + orig_node = ((struct orig_node *) + hash_find(orig_hash, icmp_packet->dst)); + + if ((orig_node != NULL) && + (orig_node->router != NULL)) { + + /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + batman_if = orig_node->router->if_incoming; + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); + spin_unlock_irqrestore(&orig_hash_lock, flags); + + /* create a copy of the skb, if needed, to modify it. */ + if (!skb_clone_writable(skb, hdr_size)) { + skb_old = skb; + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) + return NET_RX_DROP; + icmp_packet = (struct icmp_packet_rr *)skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + kfree_skb(skb_old); + } + + /* decrement ttl */ + icmp_packet->ttl--; + + /* route it */ + send_skb_packet(skb, batman_if, dstaddr); + ret = NET_RX_SUCCESS; + + } else + spin_unlock_irqrestore(&orig_hash_lock, flags); + + return ret; +} + +/* find a suitable router for this originator, and use + * bonding if possible. */ +struct neigh_node *find_router(struct orig_node *orig_node, + struct batman_if *recv_if) +{ + /* FIXME: each orig_node->batman_if will be attached to a softif */ + struct bat_priv *bat_priv = netdev_priv(soft_device); + struct orig_node *primary_orig_node; + struct orig_node *router_orig; + struct neigh_node *router, *first_candidate, *best_router; + static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; + int bonding_enabled; + + if (!orig_node) + return NULL; + + if (!orig_node->router) + return NULL; + + /* without bonding, the first node should + * always choose the default router. */ + + bonding_enabled = atomic_read(&bat_priv->bonding_enabled); + if (!bonding_enabled && (recv_if == NULL)) + return orig_node->router; + + router_orig = orig_node->router->orig_node; + + /* if we have something in the primary_addr, we can search + * for a potential bonding candidate. */ + if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0) + return orig_node->router; + + /* find the orig_node which has the primary interface. might + * even be the same as our router_orig in many cases */ + + if (memcmp(router_orig->primary_addr, + router_orig->orig, ETH_ALEN) == 0) { + primary_orig_node = router_orig; + } else { + primary_orig_node = hash_find(orig_hash, + router_orig->primary_addr); + if (!primary_orig_node) + return orig_node->router; + } + + /* with less than 2 candidates, we can't do any + * bonding and prefer the original router. */ + + if (primary_orig_node->bond.candidates < 2) + return orig_node->router; + + + /* all nodes between should choose a candidate which + * is is not on the interface where the packet came + * in. */ + first_candidate = primary_orig_node->bond.selected; + router = first_candidate; + + if (bonding_enabled) { + /* in the bonding case, send the packets in a round + * robin fashion over the remaining interfaces. */ + do { + /* recv_if == NULL on the first node. */ + if (router->if_incoming != recv_if) + break; + + router = router->next_bond_candidate; + } while (router != first_candidate); + + primary_orig_node->bond.selected = router->next_bond_candidate; + + } else { + /* if bonding is disabled, use the best of the + * remaining candidates which are not using + * this interface. */ + best_router = first_candidate; + + do { + /* recv_if == NULL on the first node. */ + if ((router->if_incoming != recv_if) && + (router->tq_avg > best_router->tq_avg)) + best_router = router; + + router = router->next_bond_candidate; + } while (router != first_candidate); + + router = best_router; + } + + return router; +} + +int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if) +{ + struct unicast_packet *unicast_packet; + struct orig_node *orig_node; + struct neigh_node *router; + struct ethhdr *ethhdr; + struct batman_if *batman_if; + struct sk_buff *skb_old; + uint8_t dstaddr[ETH_ALEN]; + int hdr_size = sizeof(struct unicast_packet); + unsigned long flags; + + /* drop packet if it has not necessary minimum size */ + if (skb_headlen(skb) < hdr_size) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *) skb_mac_header(skb); + + /* packet with unicast indication but broadcast recipient */ + if (is_bcast(ethhdr->h_dest)) + return NET_RX_DROP; + + /* packet with broadcast sender address */ + if (is_bcast(ethhdr->h_source)) + return NET_RX_DROP; + + /* not for me */ + if (!is_my_mac(ethhdr->h_dest)) + return NET_RX_DROP; + + unicast_packet = (struct unicast_packet *) skb->data; + + /* packet for me */ + if (is_my_mac(unicast_packet->dest)) { + interface_rx(skb, hdr_size); + return NET_RX_SUCCESS; + } + + /* TTL exceeded */ + if (unicast_packet->ttl < 2) { + pr_warning("Warning - can't forward unicast packet from %pM to " + "%pM: ttl exceeded\n", ethhdr->h_source, + unicast_packet->dest); + return NET_RX_DROP; + } + + /* get routing information */ + spin_lock_irqsave(&orig_hash_lock, flags); + orig_node = ((struct orig_node *) + hash_find(orig_hash, unicast_packet->dest)); + + router = find_router(orig_node, recv_if); + + if (!router) { + spin_unlock_irqrestore(&orig_hash_lock, flags); + return NET_RX_DROP; + } + + /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + + batman_if = router->if_incoming; + memcpy(dstaddr, router->addr, ETH_ALEN); + + spin_unlock_irqrestore(&orig_hash_lock, flags); + + /* create a copy of the skb, if needed, to modify it. */ + if (!skb_clone_writable(skb, sizeof(struct unicast_packet))) { + skb_old = skb; + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) + return NET_RX_DROP; + unicast_packet = (struct unicast_packet *) skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + kfree_skb(skb_old); + } + + /* decrement ttl */ + unicast_packet->ttl--; + + /* route it */ + send_skb_packet(skb, batman_if, dstaddr); + + return NET_RX_SUCCESS; +} + +int recv_bcast_packet(struct sk_buff *skb) +{ + struct orig_node *orig_node; + struct bcast_packet *bcast_packet; + struct ethhdr *ethhdr; + int hdr_size = sizeof(struct bcast_packet); + int32_t seq_diff; + unsigned long flags; + + /* drop packet if it has not necessary minimum size */ + if (skb_headlen(skb) < hdr_size) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with broadcast indication but unicast recipient */ + if (!is_bcast(ethhdr->h_dest)) + return NET_RX_DROP; + + /* packet with broadcast sender address */ + if (is_bcast(ethhdr->h_source)) + return NET_RX_DROP; + + /* ignore broadcasts sent by myself */ + if (is_my_mac(ethhdr->h_source)) + return NET_RX_DROP; + + bcast_packet = (struct bcast_packet *)skb->data; + + /* ignore broadcasts originated by myself */ + if (is_my_mac(bcast_packet->orig)) + return NET_RX_DROP; + + if (bcast_packet->ttl < 2) + return NET_RX_DROP; + + spin_lock_irqsave(&orig_hash_lock, flags); + orig_node = ((struct orig_node *) + hash_find(orig_hash, bcast_packet->orig)); + + if (orig_node == NULL) { + spin_unlock_irqrestore(&orig_hash_lock, flags); + return NET_RX_DROP; + } + + /* check whether the packet is a duplicate */ + if (get_bit_status(orig_node->bcast_bits, + orig_node->last_bcast_seqno, + ntohl(bcast_packet->seqno))) { + spin_unlock_irqrestore(&orig_hash_lock, flags); + return NET_RX_DROP; + } + + seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; + + /* check whether the packet is old and the host just restarted. */ + if (window_protected(seq_diff, &orig_node->bcast_seqno_reset)) { + spin_unlock_irqrestore(&orig_hash_lock, flags); + return NET_RX_DROP; + } + + /* mark broadcast in flood history, update window position + * if required. */ + if (bit_get_packet(orig_node->bcast_bits, seq_diff, 1)) + orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); + + spin_unlock_irqrestore(&orig_hash_lock, flags); + /* rebroadcast packet */ + add_bcast_packet_to_list(skb); + + /* broadcast for me */ + interface_rx(skb, hdr_size); + + return NET_RX_SUCCESS; +} + +int recv_vis_packet(struct sk_buff *skb) +{ + struct vis_packet *vis_packet; + struct ethhdr *ethhdr; + struct bat_priv *bat_priv; + int hdr_size = sizeof(struct vis_packet); + + if (skb_headlen(skb) < hdr_size) + return NET_RX_DROP; + + vis_packet = (struct vis_packet *) skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* not for me */ + if (!is_my_mac(ethhdr->h_dest)) + return NET_RX_DROP; + + /* ignore own packets */ + if (is_my_mac(vis_packet->vis_orig)) + return NET_RX_DROP; + + if (is_my_mac(vis_packet->sender_orig)) + return NET_RX_DROP; + + /* FIXME: each batman_if will be attached to a softif */ + bat_priv = netdev_priv(soft_device); + + switch (vis_packet->vis_type) { + case VIS_TYPE_SERVER_SYNC: + /* TODO: handle fragmented skbs properly */ + receive_server_sync_packet(bat_priv, vis_packet, + skb_headlen(skb)); + break; + + case VIS_TYPE_CLIENT_UPDATE: + /* TODO: handle fragmented skbs properly */ + receive_client_update_packet(bat_priv, vis_packet, + skb_headlen(skb)); + break; + + default: /* ignore unknown packet */ + break; + } + + /* We take a copy of the data in the packet, so we should + always free the skbuf. */ + return NET_RX_DROP; +} diff --combined drivers/staging/batman-adv/types.h index 21d0717,9aa9d36..9aa9d36 --- a/drivers/staging/batman-adv/types.h +++ b/drivers/staging/batman-adv/types.h @@@ -126,6 -126,7 +126,7 @@@ struct socket_client unsigned char index; spinlock_t lock; wait_queue_head_t queue_wait; + struct bat_priv *bat_priv; };
struct socket_packet {