[commits] [git] linux integration branch, linux, updated. v2.6.36-rc3-487-ga24f6612

postmaster at open-mesh.net postmaster at open-mesh.net
Sat Sep 4 16:25:34 CEST 2010


The following commit has been merged in the linux branch:
commit a24f6612aba0bd608e7399c6bca18bd434f8f1a3
Merge: 2aa1367d56254c93e7d80bb4fd2a425f71798303 6b0d828f5b4033928fb300a9950111aa7533bb3f
Author: Sven Eckelmann <sven.eckelmann at gmx.de>
Date:   Sat Sep 4 16:20:01 2010 +0200

    Merge remote branch 'origin/next' into linux
    
    Conflicts:
    	drivers/staging/batman-adv/CHANGELOG
    	drivers/staging/batman-adv/Makefile.kbuild
    	drivers/staging/batman-adv/README
    	drivers/staging/batman-adv/compat.h
    	drivers/staging/batman-adv/routing.c
    	drivers/staging/batman-adv/send.c
    	drivers/staging/batman-adv/soft-interface.c

diff --combined drivers/staging/batman-adv/Makefile
index e9817b5,0000000..4b5c434
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/Makefile
+++ b/drivers/staging/batman-adv/Makefile
@@@ -1,22 -1,0 +1,22 @@@
 +#
 +# Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
 +#
 +# Marek Lindner, Simon Wunderlich
 +#
 +# This program is free software; you can redistribute it and/or
 +# modify it under the terms of version 2 of the GNU General Public
 +# License as published by the Free Software Foundation.
 +#
 +# This program is distributed in the hope that it will be useful, but
 +# WITHOUT ANY WARRANTY; without even the implied warranty of
 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 +# General Public License for more details.
 +#
 +# You should have received a copy of the GNU General Public License
 +# along with this program; if not, write to the Free Software
 +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 +# 02110-1301, USA
 +#
 +
 +obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
- batman-adv-objs := main.o bat_debugfs.o bat_sysfs.o send.o routing.o soft-interface.o icmp_socket.o translation-table.o bitarray.o hash.o ring_buffer.o vis.o hard-interface.o aggregation.o originator.o
++batman-adv-objs := main.o bat_debugfs.o bat_sysfs.o send.o routing.o soft-interface.o icmp_socket.o translation-table.o bitarray.o hash.o ring_buffer.o vis.o hard-interface.o aggregation.o originator.o unicast.o
diff --combined drivers/staging/batman-adv/README
index 7192b7f,0000000..3a975fc
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/README
+++ b/drivers/staging/batman-adv/README
@@@ -1,240 -1,0 +1,239 @@@
- [state: 12-06-2010]
++[state: 04-09-2010]
 +
 +BATMAN-ADV
 +----------
 +
 +Batman  advanced  is  a new approach to wireless networking which
 +does no longer operate on the IP basis. Unlike the batman daemon,
 +which  exchanges  information  using UDP packets and sets routing
 +tables, batman-advanced operates on ISO/OSI Layer 2 only and uses
 +and  routes  (or  better: bridges) Ethernet Frames. It emulates a
 +virtual network switch of all nodes participating.  Therefore all
 +nodes  appear  to be link local, thus all higher operating proto-
 +cols won't be affected by any changes within the network. You can
 +run almost any protocol above batman advanced, prominent examples
 +are: IPv4, IPv6, DHCP, IPX.
 +
 +Batman advanced was implemented as a Linux kernel driver  to  re-
 +duce the overhead to a minimum. It does not depend on any (other)
 +network driver, and can be used on wifi as well as ethernet  lan,
 +vpn,  etc ... (anything with ethernet-style layer 2).
 +
 +CONFIGURATION
 +-------------
 +
 +Load the batman-adv module into your kernel:
 +
 +# insmod batman-adv.ko
 +
 +The  module  is now waiting for activation. You must add some in-
 +terfaces on which batman can operate. After  loading  the  module
 +batman  advanced  will scan your systems interfaces to search for
 +compatible interfaces. Once found, it will create  subfolders  in
 +the /sys directories of each supported interface, e.g.
 +
 +# ls /sys/class/net/eth0/batman_adv/
 +# iface_status  mesh_iface
 +
 +If an interface does not have the "batman_adv" subfolder it prob-
 +ably is not supported. Not supported  interfaces  are:  loopback,
 +non-ethernet and batman's own interfaces.
 +
 +Note:  After the module was loaded it will continuously watch for
 +new interfaces to verify the compatibility. There is no  need  to
 +reload the module if you plug your USB wifi adapter into your ma-
 +chine after batman advanced was initially loaded.
 +
 +To activate a  given  interface  simply  write  "bat0"  into  its
 +"mesh_iface" file inside the batman_adv subfolder:
 +
 +# echo bat0 > /sys/class/net/eth0/batman_adv/mesh_iface
 +
 +Repeat  this step for all interfaces you wish to add.  Now batman
 +starts using/broadcasting on this/these interface(s).
 +
 +By reading the "iface_status" file you can check its status:
 +
 +# cat /sys/class/net/eth0/batman_adv/iface_status
 +# active
 +
 +To deactivate an interface you have  to  write  "none"  into  its
 +"mesh_iface" file:
 +
 +# echo none > /sys/class/net/eth0/batman_adv/mesh_iface
 +
 +
 +All  mesh  wide  settings  can be found in batman's own interface
 +folder:
 +
 +#  ls  /sys/class/net/bat0/mesh/
- #  aggregate_ogm   originators        transtable_global  vis_mode
- #  orig_interval   transtable_local   vis_data
++#  aggregated_ogms  bonding  orig_interval  vis_mode
++
++
++There is a special folder for debugging informations:
++
++#  ls /sys/kernel/debug/batman_adv/bat0/
++#  originators  socket  transtable_global  transtable_local
++#  vis_data
 +
 +
 +Some of the files contain all sort of status information  regard-
 +ing  the  mesh  network.  For  example, you can view the table of
 +originators (mesh participants) with:
 +
- # cat /sys/class/net/bat0/mesh/originators
++# cat /sys/kernel/debug/batman_adv/bat0/originators
 +
 +Other files allow to change batman's behaviour to better fit your
 +requirements.  For instance, you can check the current originator
 +interval (value in milliseconds which determines how often batman
 +sends its broadcast packets):
 +
 +# cat /sys/class/net/bat0/mesh/orig_interval
- # status: 1000
++# 1000
 +
 +and also change its value:
 +
 +# echo 3000 > /sys/class/net/bat0/mesh/orig_interval
 +
 +In very mobile scenarios, you might want to adjust the originator
 +interval to a lower value. This will make the mesh  more  respon-
 +sive to topology changes, but will also increase the overhead.
 +
 +
 +USAGE
 +-----
 +
 +To  make use of your newly created mesh, batman advanced provides
 +a new interface "bat0" which you should use from this  point  on.
 +All  interfaces  added  to  batman  advanced are not relevant any
 +longer because batman handles them for you. Basically, one "hands
 +over" the data by using the batman interface and batman will make
 +sure it reaches its destination.
 +
 +The "bat0" interface can be used like any  other  regular  inter-
 +face.  It needs an IP address which can be either statically con-
 +figured or dynamically (by using DHCP or similar services):
 +
 +# NodeA: ifconfig bat0 192.168.0.1
 +# NodeB: ifconfig bat0 192.168.0.2
 +# NodeB: ping 192.168.0.1
 +
 +Note:  In  order to avoid problems remove all IP addresses previ-
 +ously assigned to interfaces now used by batman advanced, e.g.
 +
 +# ifconfig eth0 0.0.0.0
 +
 +
 +VISUALIZATION
 +-------------
 +
 +If you want topology visualization, at least one mesh  node  must
 +be configured as VIS-server:
 +
 +# echo "server" > /sys/class/net/bat0/mesh/vis_mode
 +
 +Each  node  is  either configured as "server" or as "client" (de-
 +fault: "client").  Clients send their topology data to the server
 +next to them, and server synchronize with other servers. If there
 +is no server configured (default) within the  mesh,  no  topology
 +information   will  be  transmitted.  With  these  "synchronizing
 +servers", there can be 1 or more vis servers sharing the same (or
 +at least very similar) data.
 +
 +When  configured  as  server,  you can get a topology snapshot of
 +your mesh:
 +
- # cat /sys/class/net/bat0/mesh/vis_data
++# cat /sys/kernel/debug/batman_adv/bat0/vis_data
 +
 +This raw output is intended to be easily parsable and convertable
 +with  other tools. Have a look at the batctl README if you want a
 +vis output in dot or json format for instance and how those  out-
 +puts could then be visualised in an image.
 +
 +The raw format consists of comma separated values per entry where
 +each entry is giving information about a  certain  source  inter-
 +face.  Each  entry can/has to have the following values:
 +-> "mac" - mac address of an originator's source interface
 +           (each line begins with it)
 +-> "TQ mac  value"  -  src mac's link quality towards mac address
 +                       of a neighbor originator's interface which
 +                       is being used for routing
 +-> "HNA mac" - HNA announced by source mac
 +-> "PRIMARY" - this  is a primary interface
 +-> "SEC mac" - secondary mac address of source
 +               (requires preceding PRIMARY)
 +
 +The TQ value has a range from 4 to 255 with 255 being  the  best.
 +The HNA entries are showing which hosts are connected to the mesh
 +via bat0 or being bridged into the mesh network.  The PRIMARY/SEC
 +values are only applied on primary interfaces
 +
 +
 +LOGGING/DEBUGGING
 +-----------------
 +
 +All error messages, warnings and information messages are sent to
 +the kernel log. Depending on your operating  system  distribution
 +this  can  be read in one of a number of ways. Try using the com-
 +mands: dmesg, logread, or looking in the files  /var/log/kern.log
 +or  /var/log/syslog.  All  batman-adv  messages are prefixed with
 +"batman-adv:" So to see just these messages try
 +
 +# dmesg | grep batman-adv
 +
 +When investigating problems with your mesh network  it  is  some-
 +times  necessary  to see more detail debug messages. This must be
 +enabled when compiling the batman-adv module. When building  bat-
 +man-adv  as  part of kernel, use "make menuconfig" and enable the
 +option "B.A.T.M.A.N. debugging".
 +
++Those additional  debug messages can be accessed  using a special
++file in debugfs
++
++# cat /sys/kernel/debug/batman_adv/bat0/log
++
 +The additional debug output is by default disabled. It can be en-
- abled  either  at kernel modules load time or during run time. To
- enable debug output at module load time, add the module parameter
- debug=<value>.  <value> can take one of four values.
++abled  during run time. Following log_levels are defined:
 +
 +0 - All  debug  output  disabled
 +1 - Enable messages related to routing / flooding / broadcasting
 +2 - Enable route or hna added / changed / deleted
 +3 - Enable all messages
 +
- e.g.
- 
- # modprobe batman-adv debug=2
- 
- will load the module and enable debug messages for when routes or
- HNAs change.
- 
- The debug output can also be changed at runtime  using  the  file
- /sys/module/batman-adv/parameters/debug. e.g.
- 
- # echo 2 > /sys/module/batman-adv/parameters/debug
++The debug output can be changed at runtime  using  the  file
++/sys/class/net/bat0/mesh/log_level. e.g.
 +
- enables debug messages for when routes or HNAs
++# echo 2 > /sys/class/net/bat0/mesh/log_level
 +
- The  debug  output  is sent to the kernel logs. So try dmesg, lo-
- gread, etc to see the debug messages.
++will enable debug messages for when routes or HNAs change.
 +
 +
 +BATCTL
 +------
 +
 +As batman advanced operates on layer 2 all hosts participating in
 +the  virtual switch are completely transparent for all  protocols
 +above layer 2. Therefore the common diagnosis tools do  not  work
 +as  expected.  To  overcome these problems batctl was created. At
 +the  moment the  batctl contains ping,  traceroute,  tcpdump  and
 +interfaces to the kernel module settings.
 +
 +For more information, please see the manpage (man batctl).
 +
 +batctl is available on http://www.open-mesh.org/
 +
 +
 +CONTACT
 +-------
 +
 +Please send us comments, experiences, questions, anything :)
 +
 +IRC:            #batman   on   irc.freenode.org
 +Mailing-list:   b.a.t.m.a.n at open-mesh.net (optional  subscription
 +          at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
 +
 +You can also contact the Authors:
 +
 +Marek  Lindner  <lindner_marek at yahoo.de>
 +Simon  Wunderlich  <siwu at hrz.tu-chemnitz.de>
 +
diff --combined drivers/staging/batman-adv/TODO
index 9c5aea2,0000000..1457c7f
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/TODO
+++ b/drivers/staging/batman-adv/TODO
@@@ -1,13 -1,0 +1,10 @@@
-  * Use hweight* for hamming weight calculation
-  * Save/cache packets direktly as skb instead of using a normal memory region
-    and copying it in a skb using send_raw_packet and similar functions
 + * Request a new review
 + * Process the comments from the review
 + * Move into mainline proper
 +
 +Please send all patches to:
 +	Marek Lindner <lindner_marek at yahoo.de>
 +	Simon Wunderlich <siwu at hrz.tu-chemnitz.de>
 +	Andrew Lunn <andrew at lunn.ch>
 +	b.a.t.m.a.n at lists.open-mesh.net
 +	Greg Kroah-Hartman <gregkh at suse.de>
diff --combined drivers/staging/batman-adv/aggregation.c
index 9862d16,46b9c2b..46b9c2b
--- a/drivers/staging/batman-adv/aggregation.c
+++ b/drivers/staging/batman-adv/aggregation.c
@@@ -39,7 -39,7 +39,7 @@@ static bool can_aggregate_with(struct b
  			       struct forw_packet *forw_packet)
  {
  	struct batman_packet *batman_packet =
- 		(struct batman_packet *)forw_packet->packet_buff;
+ 		(struct batman_packet *)forw_packet->skb->data;
  	int aggregated_bytes = forw_packet->packet_len + packet_len;
  
  	/**
@@@ -97,21 -97,19 +97,19 @@@
  
  #define atomic_dec_not_zero(v)          atomic_add_unless((v), -1, 0)
  /* create a new aggregated packet and add this packet to it */
- static void new_aggregated_packet(unsigned char *packet_buff,
- 			   int packet_len,
- 			   unsigned long send_time,
- 			   bool direct_link,
- 			   struct batman_if *if_incoming,
- 			   int own_packet)
+ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
+ 				  unsigned long send_time, bool direct_link,
+ 				  struct batman_if *if_incoming,
+ 				  int own_packet)
  {
+ 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  	struct forw_packet *forw_packet_aggr;
  	unsigned long flags;
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
+ 	unsigned char *skb_buff;
  
  	/* own packet should always be scheduled */
  	if (!own_packet) {
- 		if (!atomic_dec_not_zero(&batman_queue_left)) {
+ 		if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
  			bat_dbg(DBG_BATMAN, bat_priv,
  				"batman packet queue full\n");
  			return;
@@@ -121,27 -119,26 +119,26 @@@
  	forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
  	if (!forw_packet_aggr) {
  		if (!own_packet)
- 			atomic_inc(&batman_queue_left);
+ 			atomic_inc(&bat_priv->batman_queue_left);
  		return;
  	}
  
- 	forw_packet_aggr->packet_buff = kmalloc(MAX_AGGREGATION_BYTES,
- 						GFP_ATOMIC);
- 	if (!forw_packet_aggr->packet_buff) {
+ 	forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
+ 					      sizeof(struct ethhdr));
+ 	if (!forw_packet_aggr->skb) {
  		if (!own_packet)
- 			atomic_inc(&batman_queue_left);
+ 			atomic_inc(&bat_priv->batman_queue_left);
  		kfree(forw_packet_aggr);
  		return;
  	}
+ 	skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
  
  	INIT_HLIST_NODE(&forw_packet_aggr->list);
  
+ 	skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
  	forw_packet_aggr->packet_len = packet_len;
- 	memcpy(forw_packet_aggr->packet_buff,
- 	       packet_buff,
- 	       forw_packet_aggr->packet_len);
+ 	memcpy(skb_buff, packet_buff, packet_len);
  
- 	forw_packet_aggr->skb = NULL;
  	forw_packet_aggr->own = own_packet;
  	forw_packet_aggr->if_incoming = if_incoming;
  	forw_packet_aggr->num_packets = 0;
@@@ -153,9 -150,9 +150,9 @@@
  		forw_packet_aggr->direct_link_flags |= 1;
  
  	/* add new packet to packet list */
- 	spin_lock_irqsave(&forw_bat_list_lock, flags);
- 	hlist_add_head(&forw_packet_aggr->list, &forw_bat_list);
- 	spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ 	spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
+ 	hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
+ 	spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
  
  	/* start timer for this packet */
  	INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
@@@ -171,8 -168,10 +168,10 @@@ static void aggregate(struct forw_packe
  		      int packet_len,
  		      bool direct_link)
  {
- 	memcpy((forw_packet_aggr->packet_buff + forw_packet_aggr->packet_len),
- 	       packet_buff, packet_len);
+ 	unsigned char *skb_buff;
+ 
+ 	skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
+ 	memcpy(skb_buff, packet_buff, packet_len);
  	forw_packet_aggr->packet_len += packet_len;
  	forw_packet_aggr->num_packets++;
  
@@@ -199,11 -198,11 +198,11 @@@ void add_bat_packet_to_list(struct bat_
  	unsigned long flags;
  
  	/* find position for the packet in the forward queue */
- 	spin_lock_irqsave(&forw_bat_list_lock, flags);
+ 	spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
  	/* own packets are not to be aggregated */
  	if ((atomic_read(&bat_priv->aggregation_enabled)) && (!own_packet)) {
- 		hlist_for_each_entry(forw_packet_pos, tmp_node, &forw_bat_list,
- 				     list) {
+ 		hlist_for_each_entry(forw_packet_pos, tmp_node,
+ 				     &bat_priv->forw_bat_list, list) {
  			if (can_aggregate_with(batman_packet,
  					       packet_len,
  					       send_time,
@@@ -220,7 -219,7 +219,7 @@@
  	 * suitable aggregation packet found */
  	if (forw_packet_aggr == NULL) {
  		/* the following section can run without the lock */
- 		spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ 		spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
  
  		/**
  		 * if we could not aggregate this packet with one of the others
@@@ -238,7 -237,7 +237,7 @@@
  		aggregate(forw_packet_aggr,
  			  packet_buff, packet_len,
  			  direct_link);
- 		spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ 		spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
  	}
  }
  
diff --combined drivers/staging/batman-adv/bat_sysfs.c
index 05ca15a,0000000..0610169
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/bat_sysfs.c
+++ b/drivers/staging/batman-adv/bat_sysfs.c
@@@ -1,488 -1,0 +1,536 @@@
 +/*
 + * Copyright (C) 2010 B.A.T.M.A.N. contributors:
 + *
 + * Marek Lindner
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of version 2 of the GNU General Public
 + * License as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 + * 02110-1301, USA
 + *
 + */
 +
 +#include "main.h"
 +#include "bat_sysfs.h"
 +#include "translation-table.h"
 +#include "originator.h"
 +#include "hard-interface.h"
 +#include "vis.h"
 +
 +#define to_dev(obj)     container_of(obj, struct device, kobj)
 +
 +#define BAT_ATTR(_name, _mode, _show, _store)	\
 +struct bat_attribute bat_attr_##_name = {	\
 +	.attr = {.name = __stringify(_name),	\
 +		 .mode = _mode },		\
 +	.show   = _show,			\
 +	.store  = _store,			\
 +};
 +
 +static ssize_t show_aggr_ogms(struct kobject *kobj, struct attribute *attr,
 +			     char *buff)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
 +	int aggr_status = atomic_read(&bat_priv->aggregation_enabled);
 +
 +	return sprintf(buff, "%s\n",
 +		       aggr_status == 0 ? "disabled" : "enabled");
 +}
 +
 +static ssize_t store_aggr_ogms(struct kobject *kobj, struct attribute *attr,
 +			      char *buff, size_t count)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct net_device *net_dev = to_net_dev(dev);
 +	struct bat_priv *bat_priv = netdev_priv(net_dev);
 +	int aggr_tmp = -1;
 +
 +	if (((count == 2) && (buff[0] == '1')) ||
 +	    (strncmp(buff, "enable", 6) == 0))
 +		aggr_tmp = 1;
 +
 +	if (((count == 2) && (buff[0] == '0')) ||
 +	    (strncmp(buff, "disable", 7) == 0))
 +		aggr_tmp = 0;
 +
 +	if (aggr_tmp < 0) {
 +		if (buff[count - 1] == '\n')
 +			buff[count - 1] = '\0';
 +
 +		bat_info(net_dev,
 +			 "Invalid parameter for 'aggregate OGM' setting"
 +			 "received: %s\n", buff);
 +		return -EINVAL;
 +	}
 +
 +	if (atomic_read(&bat_priv->aggregation_enabled) == aggr_tmp)
 +		return count;
 +
 +	bat_info(net_dev, "Changing aggregation from: %s to: %s\n",
 +		 atomic_read(&bat_priv->aggregation_enabled) == 1 ?
 +		 "enabled" : "disabled", aggr_tmp == 1 ? "enabled" :
 +		 "disabled");
 +
 +	atomic_set(&bat_priv->aggregation_enabled, (unsigned)aggr_tmp);
 +	return count;
 +}
 +
 +static ssize_t show_bond(struct kobject *kobj, struct attribute *attr,
 +			     char *buff)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
 +	int bond_status = atomic_read(&bat_priv->bonding_enabled);
 +
 +	return sprintf(buff, "%s\n",
 +		       bond_status == 0 ? "disabled" : "enabled");
 +}
 +
 +static ssize_t store_bond(struct kobject *kobj, struct attribute *attr,
 +			  char *buff, size_t count)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct net_device *net_dev = to_net_dev(dev);
 +	struct bat_priv *bat_priv = netdev_priv(net_dev);
 +	int bonding_enabled_tmp = -1;
 +
 +	if (((count == 2) && (buff[0] == '1')) ||
 +	    (strncmp(buff, "enable", 6) == 0))
 +		bonding_enabled_tmp = 1;
 +
 +	if (((count == 2) && (buff[0] == '0')) ||
 +	    (strncmp(buff, "disable", 7) == 0))
 +		bonding_enabled_tmp = 0;
 +
 +	if (bonding_enabled_tmp < 0) {
 +		if (buff[count - 1] == '\n')
 +			buff[count - 1] = '\0';
 +
 +		bat_err(net_dev,
 +			"Invalid parameter for 'bonding' setting received: "
 +			"%s\n", buff);
 +		return -EINVAL;
 +	}
 +
 +	if (atomic_read(&bat_priv->bonding_enabled) == bonding_enabled_tmp)
 +		return count;
 +
 +	bat_info(net_dev, "Changing bonding from: %s to: %s\n",
 +		 atomic_read(&bat_priv->bonding_enabled) == 1 ?
 +		 "enabled" : "disabled",
 +		 bonding_enabled_tmp == 1 ? "enabled" : "disabled");
 +
 +	atomic_set(&bat_priv->bonding_enabled, (unsigned)bonding_enabled_tmp);
 +	return count;
 +}
 +
++static ssize_t show_frag(struct kobject *kobj, struct attribute *attr,
++			     char *buff)
++{
++	struct device *dev = to_dev(kobj->parent);
++	struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
++	int frag_status = atomic_read(&bat_priv->frag_enabled);
++
++	return sprintf(buff, "%s\n",
++		       frag_status == 0 ? "disabled" : "enabled");
++}
++
++static ssize_t store_frag(struct kobject *kobj, struct attribute *attr,
++			  char *buff, size_t count)
++{
++	struct device *dev = to_dev(kobj->parent);
++	struct net_device *net_dev = to_net_dev(dev);
++	struct bat_priv *bat_priv = netdev_priv(net_dev);
++	int frag_enabled_tmp = -1;
++
++	if (((count == 2) && (buff[0] == '1')) ||
++	    (strncmp(buff, "enable", 6) == 0))
++		frag_enabled_tmp = 1;
++
++	if (((count == 2) && (buff[0] == '0')) ||
++	    (strncmp(buff, "disable", 7) == 0))
++		frag_enabled_tmp = 0;
++
++	if (frag_enabled_tmp < 0) {
++		if (buff[count - 1] == '\n')
++			buff[count - 1] = '\0';
++
++		bat_err(net_dev,
++			"Invalid parameter for 'fragmentation' setting on mesh"
++			"received: %s\n", buff);
++		return -EINVAL;
++	}
++
++	if (atomic_read(&bat_priv->frag_enabled) == frag_enabled_tmp)
++		return count;
++
++	bat_info(net_dev, "Changing fragmentation from: %s to: %s\n",
++		 atomic_read(&bat_priv->frag_enabled) == 1 ?
++		 "enabled" : "disabled",
++		 frag_enabled_tmp == 1 ? "enabled" : "disabled");
++
++	atomic_set(&bat_priv->frag_enabled, (unsigned)frag_enabled_tmp);
++	update_min_mtu(net_dev);
++	return count;
++}
++
 +static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr,
 +			     char *buff)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
 +	int vis_mode = atomic_read(&bat_priv->vis_mode);
 +
 +	return sprintf(buff, "%s\n",
 +		       vis_mode == VIS_TYPE_CLIENT_UPDATE ?
 +							"client" : "server");
 +}
 +
 +static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
 +			      char *buff, size_t count)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct net_device *net_dev = to_net_dev(dev);
 +	struct bat_priv *bat_priv = netdev_priv(net_dev);
 +	unsigned long val;
 +	int ret, vis_mode_tmp = -1;
 +
 +	ret = strict_strtoul(buff, 10, &val);
 +
 +	if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
 +	    (strncmp(buff, "client", 6) == 0) ||
 +	    (strncmp(buff, "off", 3) == 0))
 +		vis_mode_tmp = VIS_TYPE_CLIENT_UPDATE;
 +
 +	if (((count == 2) && (!ret) && (val == VIS_TYPE_SERVER_SYNC)) ||
 +	    (strncmp(buff, "server", 6) == 0))
 +		vis_mode_tmp = VIS_TYPE_SERVER_SYNC;
 +
 +	if (vis_mode_tmp < 0) {
 +		if (buff[count - 1] == '\n')
 +			buff[count - 1] = '\0';
 +
 +		bat_info(net_dev,
 +			 "Invalid parameter for 'vis mode' setting received: "
 +			 "%s\n", buff);
 +		return -EINVAL;
 +	}
 +
 +	if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
 +		return count;
 +
 +	bat_info(net_dev, "Changing vis mode from: %s to: %s\n",
 +		 atomic_read(&bat_priv->vis_mode) == VIS_TYPE_CLIENT_UPDATE ?
 +		 "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
 +		 "client" : "server");
 +
 +	atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp);
 +	return count;
 +}
 +
 +static ssize_t show_orig_interval(struct kobject *kobj, struct attribute *attr,
 +				 char *buff)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
 +
 +	return sprintf(buff, "%i\n",
 +		       atomic_read(&bat_priv->orig_interval));
 +}
 +
 +static ssize_t store_orig_interval(struct kobject *kobj, struct attribute *attr,
 +				  char *buff, size_t count)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct net_device *net_dev = to_net_dev(dev);
 +	struct bat_priv *bat_priv = netdev_priv(net_dev);
 +	unsigned long orig_interval_tmp;
 +	int ret;
 +
 +	ret = strict_strtoul(buff, 10, &orig_interval_tmp);
 +	if (ret) {
 +		bat_info(net_dev, "Invalid parameter for 'orig_interval' "
 +			 "setting received: %s\n", buff);
 +		return -EINVAL;
 +	}
 +
 +	if (orig_interval_tmp < JITTER * 2) {
 +		bat_info(net_dev, "New originator interval too small: %li "
 +			 "(min: %i)\n", orig_interval_tmp, JITTER * 2);
 +		return -EINVAL;
 +	}
 +
 +	if (atomic_read(&bat_priv->orig_interval) == orig_interval_tmp)
 +		return count;
 +
 +	bat_info(net_dev, "Changing originator interval from: %i to: %li\n",
 +		 atomic_read(&bat_priv->orig_interval),
 +		 orig_interval_tmp);
 +
 +	atomic_set(&bat_priv->orig_interval, orig_interval_tmp);
 +	return count;
 +}
 +
 +#ifdef CONFIG_BATMAN_ADV_DEBUG
 +static ssize_t show_log_level(struct kobject *kobj, struct attribute *attr,
 +			     char *buff)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
 +	int log_level = atomic_read(&bat_priv->log_level);
 +
 +	return sprintf(buff, "%d\n", log_level);
 +}
 +
 +static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr,
 +			      char *buff, size_t count)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct net_device *net_dev = to_net_dev(dev);
 +	struct bat_priv *bat_priv = netdev_priv(net_dev);
 +	unsigned long log_level_tmp;
 +	int ret;
 +
 +	ret = strict_strtoul(buff, 10, &log_level_tmp);
 +	if (ret) {
 +		bat_info(net_dev, "Invalid parameter for 'log_level' "
 +			 "setting received: %s\n", buff);
 +		return -EINVAL;
 +	}
 +
 +	if (log_level_tmp > 3) {
 +		bat_info(net_dev, "New log level too big: %li "
 +			 "(max: %i)\n", log_level_tmp, 3);
 +		return -EINVAL;
 +	}
 +
 +	if (atomic_read(&bat_priv->log_level) == log_level_tmp)
 +		return count;
 +
 +	bat_info(net_dev, "Changing log level from: %i to: %li\n",
 +		 atomic_read(&bat_priv->log_level),
 +		 log_level_tmp);
 +
 +	atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp);
 +	return count;
 +}
 +#endif
 +
 +static BAT_ATTR(aggregated_ogms, S_IRUGO | S_IWUSR,
 +		show_aggr_ogms, store_aggr_ogms);
 +static BAT_ATTR(bonding, S_IRUGO | S_IWUSR, show_bond, store_bond);
++static BAT_ATTR(fragmentation, S_IRUGO | S_IWUSR, show_frag, store_frag);
 +static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
 +static BAT_ATTR(orig_interval, S_IRUGO | S_IWUSR,
 +		show_orig_interval, store_orig_interval);
 +#ifdef CONFIG_BATMAN_ADV_DEBUG
 +static BAT_ATTR(log_level, S_IRUGO | S_IWUSR, show_log_level, store_log_level);
 +#endif
 +
 +static struct bat_attribute *mesh_attrs[] = {
 +	&bat_attr_aggregated_ogms,
 +	&bat_attr_bonding,
++	&bat_attr_fragmentation,
 +	&bat_attr_vis_mode,
 +	&bat_attr_orig_interval,
 +#ifdef CONFIG_BATMAN_ADV_DEBUG
 +	&bat_attr_log_level,
 +#endif
 +	NULL,
 +};
 +
 +int sysfs_add_meshif(struct net_device *dev)
 +{
 +	struct kobject *batif_kobject = &dev->dev.kobj;
 +	struct bat_priv *bat_priv = netdev_priv(dev);
 +	struct bat_attribute **bat_attr;
 +	int err;
 +
- 	/* FIXME: should be done in the general mesh setup
- 		  routine as soon as we have it */
- 	atomic_set(&bat_priv->aggregation_enabled, 1);
- 	atomic_set(&bat_priv->bonding_enabled, 0);
- 	atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
- 	atomic_set(&bat_priv->orig_interval, 1000);
- 	atomic_set(&bat_priv->log_level, 0);
- 
- 	bat_priv->primary_if = NULL;
- 	bat_priv->num_ifaces = 0;
- 
 +	bat_priv->mesh_obj = kobject_create_and_add(SYSFS_IF_MESH_SUBDIR,
 +						    batif_kobject);
 +	if (!bat_priv->mesh_obj) {
 +		bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
 +			SYSFS_IF_MESH_SUBDIR);
 +		goto out;
 +	}
 +
 +	for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr) {
 +		err = sysfs_create_file(bat_priv->mesh_obj,
 +					&((*bat_attr)->attr));
 +		if (err) {
 +			bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
 +				dev->name, SYSFS_IF_MESH_SUBDIR,
 +				((*bat_attr)->attr).name);
 +			goto rem_attr;
 +		}
 +	}
 +
 +	return 0;
 +
 +rem_attr:
 +	for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
 +		sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
 +
 +	kobject_put(bat_priv->mesh_obj);
 +	bat_priv->mesh_obj = NULL;
 +out:
 +	return -ENOMEM;
 +}
 +
 +void sysfs_del_meshif(struct net_device *dev)
 +{
 +	struct bat_priv *bat_priv = netdev_priv(dev);
 +	struct bat_attribute **bat_attr;
 +
 +	for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
 +		sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
 +
 +	kobject_put(bat_priv->mesh_obj);
 +	bat_priv->mesh_obj = NULL;
 +}
 +
 +static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
 +			       char *buff)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct net_device *net_dev = to_net_dev(dev);
 +	struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
 +
 +	if (!batman_if)
 +		return 0;
 +
 +	return sprintf(buff, "%s\n",
 +		       batman_if->if_status == IF_NOT_IN_USE ?
- 							"none" : "bat0");
++					"none" : batman_if->soft_iface->name);
 +}
 +
 +static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
 +				char *buff, size_t count)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct net_device *net_dev = to_net_dev(dev);
 +	struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
 +	int status_tmp = -1;
 +
 +	if (!batman_if)
 +		return count;
 +
- 	if (strncmp(buff, "none", 4) == 0)
- 		status_tmp = IF_NOT_IN_USE;
- 
- 	if (strncmp(buff, "bat0", 4) == 0)
- 		status_tmp = IF_I_WANT_YOU;
- 
- 	if (status_tmp < 0) {
- 		if (buff[count - 1] == '\n')
- 			buff[count - 1] = '\0';
++	if (buff[count - 1] == '\n')
++		buff[count - 1] = '\0';
 +
++	if (strlen(buff) >= IFNAMSIZ) {
 +		pr_err("Invalid parameter for 'mesh_iface' setting received: "
- 		       "%s\n", buff);
++		       "interface name too long '%s'\n", buff);
 +		return -EINVAL;
 +	}
 +
- 	if ((batman_if->if_status == status_tmp) ||
- 	    ((status_tmp == IF_I_WANT_YOU) &&
- 	     (batman_if->if_status != IF_NOT_IN_USE)))
++	if (strncmp(buff, "none", 4) == 0)
++		status_tmp = IF_NOT_IN_USE;
++	else
++		status_tmp = IF_I_WANT_YOU;
++
++	if ((batman_if->if_status == status_tmp) || ((batman_if->soft_iface) &&
++	    (strncmp(batman_if->soft_iface->name, buff, IFNAMSIZ) == 0)))
 +		return count;
 +
- 	if (status_tmp == IF_I_WANT_YOU)
- 		status_tmp = hardif_enable_interface(batman_if);
- 	else
++	if (status_tmp == IF_NOT_IN_USE) {
++		rtnl_lock();
++		hardif_disable_interface(batman_if);
++		rtnl_unlock();
++		return count;
++	}
++
++	/* if the interface already is in use */
++	if (batman_if->if_status != IF_NOT_IN_USE) {
++		rtnl_lock();
 +		hardif_disable_interface(batman_if);
++		rtnl_unlock();
++	}
 +
- 	return (status_tmp < 0 ? status_tmp : count);
++	return hardif_enable_interface(batman_if, buff);
 +}
 +
 +static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
 +				 char *buff)
 +{
 +	struct device *dev = to_dev(kobj->parent);
 +	struct net_device *net_dev = to_net_dev(dev);
 +	struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
 +
 +	if (!batman_if)
 +		return 0;
 +
 +	switch (batman_if->if_status) {
 +	case IF_TO_BE_REMOVED:
 +		return sprintf(buff, "disabling\n");
 +	case IF_INACTIVE:
 +		return sprintf(buff, "inactive\n");
 +	case IF_ACTIVE:
 +		return sprintf(buff, "active\n");
 +	case IF_TO_BE_ACTIVATED:
 +		return sprintf(buff, "enabling\n");
 +	case IF_NOT_IN_USE:
 +	default:
 +		return sprintf(buff, "not in use\n");
 +	}
 +}
 +
 +static BAT_ATTR(mesh_iface, S_IRUGO | S_IWUSR,
 +		show_mesh_iface, store_mesh_iface);
 +static BAT_ATTR(iface_status, S_IRUGO, show_iface_status, NULL);
 +
 +static struct bat_attribute *batman_attrs[] = {
 +	&bat_attr_mesh_iface,
 +	&bat_attr_iface_status,
 +	NULL,
 +};
 +
 +int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
 +{
 +	struct kobject *hardif_kobject = &dev->dev.kobj;
 +	struct bat_attribute **bat_attr;
 +	int err;
 +
 +	*hardif_obj = kobject_create_and_add(SYSFS_IF_BAT_SUBDIR,
 +						    hardif_kobject);
 +
 +	if (!*hardif_obj) {
 +		bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
 +			SYSFS_IF_BAT_SUBDIR);
 +		goto out;
 +	}
 +
 +	for (bat_attr = batman_attrs; *bat_attr; ++bat_attr) {
 +		err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
 +		if (err) {
 +			bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
 +				dev->name, SYSFS_IF_BAT_SUBDIR,
 +				((*bat_attr)->attr).name);
 +			goto rem_attr;
 +		}
 +	}
 +
 +	return 0;
 +
 +rem_attr:
 +	for (bat_attr = batman_attrs; *bat_attr; ++bat_attr)
 +		sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
 +out:
 +	return -ENOMEM;
 +}
 +
 +void sysfs_del_hardif(struct kobject **hardif_obj)
 +{
 +	kobject_put(*hardif_obj);
 +	*hardif_obj = NULL;
 +}
diff --combined drivers/staging/batman-adv/bitarray.c
index dd4193c,814274f..814274f
--- a/drivers/staging/batman-adv/bitarray.c
+++ b/drivers/staging/batman-adv/bitarray.c
@@@ -22,6 -22,8 +22,8 @@@
  #include "main.h"
  #include "bitarray.h"
  
+ #include <linux/bitops.h>
+ 
  /* returns true if the corresponding bit in the given seq_bits indicates true
   * and curr_seqno is within range of last_seqno */
  uint8_t get_bit_status(TYPE_OF_WORD *seq_bits, uint32_t last_seqno,
@@@ -125,11 -127,10 +127,10 @@@ static void bit_reset_window(TYPE_OF_WO
   *  1 if the window was moved (either new or very old)
   *  0 if the window was not moved/shifted.
   */
- char bit_get_packet(TYPE_OF_WORD *seq_bits, int32_t seq_num_diff,
- 		    int8_t set_mark)
+ char bit_get_packet(void *priv, TYPE_OF_WORD *seq_bits,
+ 		    int32_t seq_num_diff, int8_t set_mark)
  {
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
+ 	struct bat_priv *bat_priv = (struct bat_priv *)priv;
  
  	/* sequence number is slightly older. We already got a sequence number
  	 * higher than this one, so we just mark it. */
@@@ -187,21 -188,14 +188,14 @@@
  }
  
  /* count the hamming weight, how many good packets did we receive? just count
-  * the 1's. The inner loop uses the Kernighan algorithm, see
-  * http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan
+  * the 1's.
   */
  int bit_packet_count(TYPE_OF_WORD *seq_bits)
  {
  	int i, hamming = 0;
- 	TYPE_OF_WORD word;
  
- 	for (i = 0; i < NUM_WORDS; i++) {
- 		word = seq_bits[i];
+ 	for (i = 0; i < NUM_WORDS; i++)
+ 		hamming += hweight_long(seq_bits[i]);
  
- 		while (word) {
- 			word &= word-1;
- 			hamming++;
- 		}
- 	}
  	return hamming;
  }
diff --combined drivers/staging/batman-adv/bitarray.h
index 01897d6,d961d56..d961d56
--- a/drivers/staging/batman-adv/bitarray.h
+++ b/drivers/staging/batman-adv/bitarray.h
@@@ -22,7 -22,8 +22,8 @@@
  #ifndef _NET_BATMAN_ADV_BITARRAY_H_
  #define _NET_BATMAN_ADV_BITARRAY_H_
  
- /* you should choose something big, if you don't want to waste cpu */
+ /* you should choose something big, if you don't want to waste cpu
+    and keep the type in sync with bit_packet_count */
  #define TYPE_OF_WORD unsigned long
  #define WORD_BIT_SIZE (sizeof(TYPE_OF_WORD) * 8)
  
@@@ -37,8 -38,8 +38,8 @@@ void bit_mark(TYPE_OF_WORD *seq_bits, i
  
  /* receive and process one packet, returns 1 if received seq_num is considered
   * new, 0 if old  */
- char bit_get_packet(TYPE_OF_WORD *seq_bits, int32_t seq_num_diff,
- 					int8_t set_mark);
+ char bit_get_packet(void *priv, TYPE_OF_WORD *seq_bits,
+ 		    int32_t seq_num_diff, int8_t set_mark);
  
  /* count the hamming weight, how many good packets did we receive? */
  int  bit_packet_count(TYPE_OF_WORD *seq_bits);
diff --combined drivers/staging/batman-adv/hard-interface.c
index baa8b05,0000000..a587da9
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@@ -1,541 -1,0 +1,596 @@@
 +/*
 + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
 + *
 + * Marek Lindner, Simon Wunderlich
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of version 2 of the GNU General Public
 + * License as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 + * 02110-1301, USA
 + *
 + */
 +
 +#include "main.h"
 +#include "hard-interface.h"
 +#include "soft-interface.h"
 +#include "send.h"
 +#include "translation-table.h"
 +#include "routing.h"
 +#include "bat_sysfs.h"
 +#include "originator.h"
 +#include "hash.h"
 +
 +#include <linux/if_arp.h>
- #include <linux/netfilter_bridge.h>
 +
 +#define MIN(x, y) ((x) < (y) ? (x) : (y))
 +
 +struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev)
 +{
 +	struct batman_if *batman_if;
 +
 +	rcu_read_lock();
 +	list_for_each_entry_rcu(batman_if, &if_list, list) {
 +		if (batman_if->net_dev == net_dev)
 +			goto out;
 +	}
 +
 +	batman_if = NULL;
 +
 +out:
 +	rcu_read_unlock();
 +	return batman_if;
 +}
 +
 +static int is_valid_iface(struct net_device *net_dev)
 +{
 +	if (net_dev->flags & IFF_LOOPBACK)
 +		return 0;
 +
 +	if (net_dev->type != ARPHRD_ETHER)
 +		return 0;
 +
 +	if (net_dev->addr_len != ETH_ALEN)
 +		return 0;
 +
 +	/* no batman over batman */
 +#ifdef HAVE_NET_DEVICE_OPS
 +	if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
 +		return 0;
 +#else
 +	if (net_dev->hard_start_xmit == interface_tx)
 +		return 0;
 +#endif
 +
 +	/* Device is being bridged */
 +	/* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
 +		return 0; */
 +
 +	return 1;
 +}
 +
- static struct batman_if *get_active_batman_if(void)
++static struct batman_if *get_active_batman_if(struct net_device *soft_iface)
 +{
 +	struct batman_if *batman_if;
 +
- 	/* TODO: should check interfaces belonging to bat_priv */
 +	rcu_read_lock();
 +	list_for_each_entry_rcu(batman_if, &if_list, list) {
++		if (batman_if->soft_iface != soft_iface)
++			continue;
++
 +		if (batman_if->if_status == IF_ACTIVE)
 +			goto out;
 +	}
 +
 +	batman_if = NULL;
 +
 +out:
 +	rcu_read_unlock();
 +	return batman_if;
 +}
 +
 +static void set_primary_if(struct bat_priv *bat_priv,
 +			   struct batman_if *batman_if)
 +{
 +	struct batman_packet *batman_packet;
++	struct vis_packet *vis_packet;
 +
 +	bat_priv->primary_if = batman_if;
 +
 +	if (!bat_priv->primary_if)
 +		return;
 +
- 	set_main_if_addr(batman_if->net_dev->dev_addr);
- 
 +	batman_packet = (struct batman_packet *)(batman_if->packet_buff);
 +	batman_packet->flags = PRIMARIES_FIRST_HOP;
 +	batman_packet->ttl = TTL;
 +
++	vis_packet = (struct vis_packet *)
++				bat_priv->my_vis_info->skb_packet->data;
++	memcpy(vis_packet->vis_orig,
++	       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
++	memcpy(vis_packet->sender_orig,
++	       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
++
 +	/***
 +	 * hacky trick to make sure that we send the HNA information via
 +	 * our new primary interface
 +	 */
- 	atomic_set(&hna_local_changed, 1);
++	atomic_set(&bat_priv->hna_local_changed, 1);
 +}
 +
 +static bool hardif_is_iface_up(struct batman_if *batman_if)
 +{
 +	if (batman_if->net_dev->flags & IFF_UP)
 +		return true;
 +
 +	return false;
 +}
 +
 +static void update_mac_addresses(struct batman_if *batman_if)
 +{
- 	if (!batman_if || !batman_if->packet_buff)
- 		return;
- 
 +	addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
 +
 +	memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
 +	       batman_if->net_dev->dev_addr, ETH_ALEN);
 +	memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender,
 +	       batman_if->net_dev->dev_addr, ETH_ALEN);
 +}
 +
 +static void check_known_mac_addr(uint8_t *addr)
 +{
 +	struct batman_if *batman_if;
 +
 +	rcu_read_lock();
 +	list_for_each_entry_rcu(batman_if, &if_list, list) {
 +		if ((batman_if->if_status != IF_ACTIVE) &&
 +		    (batman_if->if_status != IF_TO_BE_ACTIVATED))
 +			continue;
 +
 +		if (!compare_orig(batman_if->net_dev->dev_addr, addr))
 +			continue;
 +
 +		pr_warning("The newly added mac address (%pM) already exists "
- 			   "on: %s\n", addr, batman_if->dev);
++			   "on: %s\n", addr, batman_if->net_dev->name);
 +		pr_warning("It is strongly recommended to keep mac addresses "
 +			   "unique to avoid problems!\n");
 +	}
 +	rcu_read_unlock();
 +}
 +
- int hardif_min_mtu(void)
++int hardif_min_mtu(struct net_device *soft_iface)
 +{
++	struct bat_priv *bat_priv = netdev_priv(soft_iface);
 +	struct batman_if *batman_if;
 +	/* allow big frames if all devices are capable to do so
 +	 * (have MTU > 1500 + BAT_HEADER_LEN) */
 +	int min_mtu = ETH_DATA_LEN;
 +
++	if (atomic_read(&bat_priv->frag_enabled))
++		goto out;
++
 +	rcu_read_lock();
 +	list_for_each_entry_rcu(batman_if, &if_list, list) {
- 		if ((batman_if->if_status == IF_ACTIVE) ||
- 		    (batman_if->if_status == IF_TO_BE_ACTIVATED))
- 			min_mtu = MIN(batman_if->net_dev->mtu - BAT_HEADER_LEN,
- 				      min_mtu);
++		if ((batman_if->if_status != IF_ACTIVE) &&
++		    (batman_if->if_status != IF_TO_BE_ACTIVATED))
++			continue;
++
++		if (batman_if->soft_iface != soft_iface)
++			continue;
++
++		min_mtu = MIN(batman_if->net_dev->mtu - BAT_HEADER_LEN,
++			      min_mtu);
 +	}
 +	rcu_read_unlock();
- 
++out:
 +	return min_mtu;
 +}
 +
 +/* adjusts the MTU if a new interface with a smaller MTU appeared. */
- void update_min_mtu(void)
++void update_min_mtu(struct net_device *soft_iface)
 +{
 +	int min_mtu;
 +
- 	min_mtu = hardif_min_mtu();
- 	if (soft_device->mtu != min_mtu)
- 		soft_device->mtu = min_mtu;
++	min_mtu = hardif_min_mtu(soft_iface);
++	if (soft_iface->mtu != min_mtu)
++		soft_iface->mtu = min_mtu;
 +}
 +
- static void hardif_activate_interface(struct net_device *net_dev,
- 				      struct bat_priv *bat_priv,
- 				      struct batman_if *batman_if)
++static void hardif_activate_interface(struct batman_if *batman_if)
 +{
++	struct bat_priv *bat_priv;
++
 +	if (batman_if->if_status != IF_INACTIVE)
 +		return;
 +
++	bat_priv = netdev_priv(batman_if->soft_iface);
++
 +	update_mac_addresses(batman_if);
 +	batman_if->if_status = IF_TO_BE_ACTIVATED;
 +
 +	/**
 +	 * the first active interface becomes our primary interface or
 +	 * the next active interface after the old primay interface was removed
 +	 */
 +	if (!bat_priv->primary_if)
 +		set_primary_if(bat_priv, batman_if);
 +
- 	bat_info(net_dev, "Interface activated: %s\n", batman_if->dev);
++	bat_info(batman_if->soft_iface, "Interface activated: %s\n",
++		 batman_if->net_dev->name);
 +
- 	if (atomic_read(&module_state) == MODULE_INACTIVE)
- 		activate_module();
- 
- 	update_min_mtu();
++	update_min_mtu(batman_if->soft_iface);
 +	return;
 +}
 +
- static void hardif_deactivate_interface(struct net_device *net_dev,
- 					struct batman_if *batman_if)
++static void hardif_deactivate_interface(struct batman_if *batman_if)
 +{
 +	if ((batman_if->if_status != IF_ACTIVE) &&
 +	   (batman_if->if_status != IF_TO_BE_ACTIVATED))
 +		return;
 +
 +	batman_if->if_status = IF_INACTIVE;
 +
- 	bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev);
++	bat_info(batman_if->soft_iface, "Interface deactivated: %s\n",
++		 batman_if->net_dev->name);
 +
- 	update_min_mtu();
++	update_min_mtu(batman_if->soft_iface);
 +}
 +
- int hardif_enable_interface(struct batman_if *batman_if)
++int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
 +{
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv;
 +	struct batman_packet *batman_packet;
 +
 +	if (batman_if->if_status != IF_NOT_IN_USE)
 +		goto out;
 +
++	batman_if->soft_iface = dev_get_by_name(&init_net, iface_name);
++
++	if (!batman_if->soft_iface) {
++		batman_if->soft_iface = softif_create(iface_name);
++
++		if (!batman_if->soft_iface)
++			goto err;
++
++		/* dev_get_by_name() increases the reference counter for us */
++		dev_hold(batman_if->soft_iface);
++	}
++
++	bat_priv = netdev_priv(batman_if->soft_iface);
 +	batman_if->packet_len = BAT_PACKET_LEN;
 +	batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC);
 +
 +	if (!batman_if->packet_buff) {
- 		bat_err(soft_device, "Can't add interface packet (%s): "
- 			"out of memory\n", batman_if->dev);
++		bat_err(batman_if->soft_iface, "Can't add interface packet "
++			"(%s): out of memory\n", batman_if->net_dev->name);
 +		goto err;
 +	}
 +
 +	batman_packet = (struct batman_packet *)(batman_if->packet_buff);
 +	batman_packet->packet_type = BAT_PACKET;
 +	batman_packet->version = COMPAT_VERSION;
 +	batman_packet->flags = 0;
 +	batman_packet->ttl = 2;
 +	batman_packet->tq = TQ_MAX_VALUE;
 +	batman_packet->num_hna = 0;
 +
 +	batman_if->if_num = bat_priv->num_ifaces;
 +	bat_priv->num_ifaces++;
 +	batman_if->if_status = IF_INACTIVE;
 +	orig_hash_add_if(batman_if, bat_priv->num_ifaces);
 +
++	batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
++	batman_if->batman_adv_ptype.func = batman_skb_recv;
++	batman_if->batman_adv_ptype.dev = batman_if->net_dev;
++	dev_add_pack(&batman_if->batman_adv_ptype);
++
 +	atomic_set(&batman_if->seqno, 1);
- 	bat_info(soft_device, "Adding interface: %s\n", batman_if->dev);
++	atomic_set(&batman_if->frag_seqno, 1);
++	bat_info(batman_if->soft_iface, "Adding interface: %s\n",
++		 batman_if->net_dev->name);
++
++	if (atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
++		ETH_DATA_LEN + BAT_HEADER_LEN)
++		bat_info(batman_if->soft_iface,
++			"The MTU of interface %s is too small (%i) to handle "
++			"the transport of batman-adv packets. Packets going "
++			"over this interface will be fragmented on layer2 "
++			"which could impact the performance. Setting the MTU "
++			"to %zi would solve the problem.\n",
++			batman_if->net_dev->name, batman_if->net_dev->mtu,
++			ETH_DATA_LEN + BAT_HEADER_LEN);
++
++	if (!atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
++		ETH_DATA_LEN + BAT_HEADER_LEN)
++		bat_info(batman_if->soft_iface,
++			"The MTU of interface %s is too small (%i) to handle "
++			"the transport of batman-adv packets. If you experience"
++			" problems getting traffic through try increasing the "
++			"MTU to %zi.\n",
++			batman_if->net_dev->name, batman_if->net_dev->mtu,
++			ETH_DATA_LEN + BAT_HEADER_LEN);
 +
 +	if (hardif_is_iface_up(batman_if))
- 		hardif_activate_interface(soft_device, bat_priv, batman_if);
++		hardif_activate_interface(batman_if);
 +	else
- 		bat_err(soft_device, "Not using interface %s "
++		bat_err(batman_if->soft_iface, "Not using interface %s "
 +			"(retrying later): interface not active\n",
- 			batman_if->dev);
++			batman_if->net_dev->name);
 +
 +	/* begin scheduling originator messages on that interface */
 +	schedule_own_packet(batman_if);
 +
 +out:
 +	return 0;
 +
 +err:
 +	return -ENOMEM;
 +}
 +
 +void hardif_disable_interface(struct batman_if *batman_if)
 +{
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
 +
 +	if (batman_if->if_status == IF_ACTIVE)
- 		hardif_deactivate_interface(soft_device, batman_if);
++		hardif_deactivate_interface(batman_if);
 +
 +	if (batman_if->if_status != IF_INACTIVE)
 +		return;
 +
- 	bat_info(soft_device, "Removing interface: %s\n", batman_if->dev);
++	bat_info(batman_if->soft_iface, "Removing interface: %s\n",
++		 batman_if->net_dev->name);
++	dev_remove_pack(&batman_if->batman_adv_ptype);
++
 +	bat_priv->num_ifaces--;
 +	orig_hash_del_if(batman_if, bat_priv->num_ifaces);
 +
 +	if (batman_if == bat_priv->primary_if)
- 		set_primary_if(bat_priv, get_active_batman_if());
++		set_primary_if(bat_priv,
++			       get_active_batman_if(batman_if->soft_iface));
 +
 +	kfree(batman_if->packet_buff);
 +	batman_if->packet_buff = NULL;
 +	batman_if->if_status = IF_NOT_IN_USE;
 +
- 	if ((atomic_read(&module_state) == MODULE_ACTIVE) &&
- 	    (bat_priv->num_ifaces == 0))
- 		deactivate_module();
++	/* delete all references to this batman_if */
++	purge_orig_ref(bat_priv);
++	purge_outstanding_packets(bat_priv, batman_if);
++	dev_put(batman_if->soft_iface);
++
++	/* nobody uses this interface anymore */
++	if (!bat_priv->num_ifaces)
++		softif_destroy(batman_if->soft_iface);
++
++	batman_if->soft_iface = NULL;
 +}
 +
 +static struct batman_if *hardif_add_interface(struct net_device *net_dev)
 +{
 +	struct batman_if *batman_if;
 +	int ret;
 +
 +	ret = is_valid_iface(net_dev);
 +	if (ret != 1)
 +		goto out;
 +
 +	dev_hold(net_dev);
 +
 +	batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
 +	if (!batman_if) {
 +		pr_err("Can't add interface (%s): out of memory\n",
 +		       net_dev->name);
 +		goto release_dev;
 +	}
 +
- 	batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC);
- 	if (!batman_if->dev)
- 		goto free_if;
- 
 +	ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev);
 +	if (ret)
- 		goto free_dev;
++		goto free_if;
 +
 +	batman_if->if_num = -1;
 +	batman_if->net_dev = net_dev;
++	batman_if->soft_iface = NULL;
 +	batman_if->if_status = IF_NOT_IN_USE;
- 	batman_if->packet_buff = NULL;
 +	INIT_LIST_HEAD(&batman_if->list);
 +
 +	check_known_mac_addr(batman_if->net_dev->dev_addr);
 +	list_add_tail_rcu(&batman_if->list, &if_list);
 +	return batman_if;
 +
- free_dev:
- 	kfree(batman_if->dev);
 +free_if:
 +	kfree(batman_if);
 +release_dev:
 +	dev_put(net_dev);
 +out:
 +	return NULL;
 +}
 +
 +static void hardif_free_interface(struct rcu_head *rcu)
 +{
 +	struct batman_if *batman_if = container_of(rcu, struct batman_if, rcu);
 +
- 	/* delete all references to this batman_if */
- 	purge_orig(NULL);
- 	purge_outstanding_packets(batman_if);
- 
- 	kfree(batman_if->dev);
 +	kfree(batman_if);
 +}
 +
 +static void hardif_remove_interface(struct batman_if *batman_if)
 +{
 +	/* first deactivate interface */
 +	if (batman_if->if_status != IF_NOT_IN_USE)
 +		hardif_disable_interface(batman_if);
 +
 +	if (batman_if->if_status != IF_NOT_IN_USE)
 +		return;
 +
 +	batman_if->if_status = IF_TO_BE_REMOVED;
 +	list_del_rcu(&batman_if->list);
 +	sysfs_del_hardif(&batman_if->hardif_obj);
 +	dev_put(batman_if->net_dev);
 +	call_rcu(&batman_if->rcu, hardif_free_interface);
 +}
 +
 +void hardif_remove_interfaces(void)
 +{
 +	struct batman_if *batman_if, *batman_if_tmp;
 +
- 	list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list)
++	list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) {
++		rtnl_lock();
 +		hardif_remove_interface(batman_if);
++		rtnl_unlock();
++	}
 +}
 +
 +static int hard_if_event(struct notifier_block *this,
 +			 unsigned long event, void *ptr)
 +{
 +	struct net_device *net_dev = (struct net_device *)ptr;
 +	struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv;
 +
 +	if (!batman_if && event == NETDEV_REGISTER)
 +			batman_if = hardif_add_interface(net_dev);
 +
 +	if (!batman_if)
 +		goto out;
 +
 +	switch (event) {
 +	case NETDEV_UP:
- 		hardif_activate_interface(soft_device, bat_priv, batman_if);
++		hardif_activate_interface(batman_if);
 +		break;
 +	case NETDEV_GOING_DOWN:
 +	case NETDEV_DOWN:
- 		hardif_deactivate_interface(soft_device, batman_if);
++		hardif_deactivate_interface(batman_if);
 +		break;
 +	case NETDEV_UNREGISTER:
 +		hardif_remove_interface(batman_if);
 +		break;
- 	case NETDEV_CHANGENAME:
++	case NETDEV_CHANGEMTU:
++		if (batman_if->soft_iface)
++			update_min_mtu(batman_if->soft_iface);
 +		break;
 +	case NETDEV_CHANGEADDR:
++		if (batman_if->if_status == IF_NOT_IN_USE)
++			goto out;
++
 +		check_known_mac_addr(batman_if->net_dev->dev_addr);
 +		update_mac_addresses(batman_if);
++
++		bat_priv = netdev_priv(batman_if->soft_iface);
 +		if (batman_if == bat_priv->primary_if)
 +			set_primary_if(bat_priv, batman_if);
 +		break;
 +	default:
 +		break;
 +	};
 +
 +out:
 +	return NOTIFY_DONE;
 +}
 +
- static int batman_skb_recv_finish(struct sk_buff *skb)
- {
- 	return NF_ACCEPT;
- }
- 
 +/* receive a packet with the batman ethertype coming on a hard
 + * interface */
 +int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
 +	struct packet_type *ptype, struct net_device *orig_dev)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv;
 +	struct batman_packet *batman_packet;
 +	struct batman_if *batman_if;
 +	int ret;
 +
++	batman_if = container_of(ptype, struct batman_if, batman_adv_ptype);
 +	skb = skb_share_check(skb, GFP_ATOMIC);
 +
 +	/* skb was released by skb_share_check() */
 +	if (!skb)
 +		goto err_out;
 +
- 	if (atomic_read(&module_state) != MODULE_ACTIVE)
- 		goto err_free;
- 
- 	/* if netfilter/ebtables wants to block incoming batman
- 	 * packets then give them a chance to do so here */
- 	ret = NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, dev, NULL,
- 		      batman_skb_recv_finish);
- 	if (ret != 1)
- 		goto err_out;
- 
 +	/* packet should hold at least type and version */
- 	if (unlikely(skb_headlen(skb) < 2))
++	if (unlikely(!pskb_may_pull(skb, 2)))
 +		goto err_free;
 +
 +	/* expect a valid ethernet header here. */
 +	if (unlikely(skb->mac_len != sizeof(struct ethhdr)
 +				|| !skb_mac_header(skb)))
 +		goto err_free;
 +
- 	batman_if = get_batman_if_by_netdev(skb->dev);
- 	if (!batman_if)
++	if (!batman_if->soft_iface)
++		goto err_free;
++
++	bat_priv = netdev_priv(batman_if->soft_iface);
++
++	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
 +		goto err_free;
 +
 +	/* discard frames on not active interfaces */
 +	if (batman_if->if_status != IF_ACTIVE)
 +		goto err_free;
 +
 +	batman_packet = (struct batman_packet *)skb->data;
 +
 +	if (batman_packet->version != COMPAT_VERSION) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Drop packet: incompatible batman version (%i)\n",
 +			batman_packet->version);
 +		goto err_free;
 +	}
 +
 +	/* all receive handlers return whether they received or reused
 +	 * the supplied skb. if not, we have to free the skb. */
 +
 +	switch (batman_packet->packet_type) {
 +		/* batman originator packet */
 +	case BAT_PACKET:
 +		ret = recv_bat_packet(skb, batman_if);
 +		break;
 +
 +		/* batman icmp packet */
 +	case BAT_ICMP:
- 		ret = recv_icmp_packet(skb);
++		ret = recv_icmp_packet(skb, batman_if);
 +		break;
 +
 +		/* unicast packet */
 +	case BAT_UNICAST:
 +		ret = recv_unicast_packet(skb, batman_if);
 +		break;
 +
++		/* fragmented unicast packet */
++	case BAT_UNICAST_FRAG:
++		ret = recv_ucast_frag_packet(skb, batman_if);
++		break;
++
 +		/* broadcast packet */
 +	case BAT_BCAST:
- 		ret = recv_bcast_packet(skb);
++		ret = recv_bcast_packet(skb, batman_if);
 +		break;
 +
 +		/* vis packet */
 +	case BAT_VIS:
- 		ret = recv_vis_packet(skb);
++		ret = recv_vis_packet(skb, batman_if);
 +		break;
 +	default:
 +		ret = NET_RX_DROP;
 +	}
 +
 +	if (ret == NET_RX_DROP)
 +		kfree_skb(skb);
 +
 +	/* return NET_RX_SUCCESS in any case as we
 +	 * most probably dropped the packet for
 +	 * routing-logical reasons. */
 +
 +	return NET_RX_SUCCESS;
 +
 +err_free:
 +	kfree_skb(skb);
 +err_out:
 +	return NET_RX_DROP;
 +}
 +
 +struct notifier_block hard_if_notifier = {
 +	.notifier_call = hard_if_event,
 +};
diff --combined drivers/staging/batman-adv/hard-interface.h
index d5640b0,4b49527..4b49527
--- a/drivers/staging/batman-adv/hard-interface.h
+++ b/drivers/staging/batman-adv/hard-interface.h
@@@ -32,14 -32,14 +32,14 @@@
  extern struct notifier_block hard_if_notifier;
  
  struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev);
- int hardif_enable_interface(struct batman_if *batman_if);
+ int hardif_enable_interface(struct batman_if *batman_if, char *iface_name);
  void hardif_disable_interface(struct batman_if *batman_if);
  void hardif_remove_interfaces(void);
  int batman_skb_recv(struct sk_buff *skb,
  				struct net_device *dev,
  				struct packet_type *ptype,
  				struct net_device *orig_dev);
- int hardif_min_mtu(void);
- void update_min_mtu(void);
+ int hardif_min_mtu(struct net_device *soft_iface);
+ void update_min_mtu(struct net_device *soft_iface);
  
  #endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
diff --combined drivers/staging/batman-adv/hash.c
index 1286f8f,8ef26eb..8ef26eb
--- a/drivers/staging/batman-adv/hash.c
+++ b/drivers/staging/batman-adv/hash.c
@@@ -36,7 -36,7 +36,7 @@@ static void hash_init(struct hashtable_
  /* remove the hash structure. if hashdata_free_cb != NULL, this function will be
   * called to remove the elements inside of the hash.  if you don't remove the
   * elements, memory might be leaked. */
- void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb)
+ void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb, void *arg)
  {
  	struct element_t *bucket, *last_bucket;
  	int i;
@@@ -46,7 -46,7 +46,7 @@@
  
  		while (bucket != NULL) {
  			if (free_cb != NULL)
- 				free_cb(bucket->data);
+ 				free_cb(bucket->data, arg);
  
  			last_bucket = bucket;
  			bucket = bucket->next;
@@@ -300,7 -300,7 +300,7 @@@ struct hashtable_t *hash_resize(struct 
  
  	/* remove hash and eventual overflow buckets but not the content
  	 * itself. */
- 	hash_delete(hash, NULL);
+ 	hash_delete(hash, NULL, NULL);
  
  	return new_hash;
  }
diff --combined drivers/staging/batman-adv/hash.h
index c483e11,2c8e176..2c8e176
--- a/drivers/staging/batman-adv/hash.h
+++ b/drivers/staging/batman-adv/hash.h
@@@ -30,7 -30,7 +30,7 @@@
  
  typedef int (*hashdata_compare_cb)(void *, void *);
  typedef int (*hashdata_choose_cb)(void *, int);
- typedef void (*hashdata_free_cb)(void *);
+ typedef void (*hashdata_free_cb)(void *, void *);
  
  struct element_t {
  	void *data;		/* pointer to the data */
@@@ -70,7 -70,7 +70,7 @@@ void *hash_remove_bucket(struct hashtab
  /* remove the hash structure. if hashdata_free_cb != NULL, this function will be
   * called to remove the elements inside of the hash.  if you don't remove the
   * elements, memory might be leaked. */
- void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb);
+ void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb, void *arg);
  
  /* free only the hashtable and the hash itself. */
  void hash_destroy(struct hashtable_t *hash);
diff --combined drivers/staging/batman-adv/icmp_socket.c
index 3ae7dd2,0000000..24627be
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/icmp_socket.c
+++ b/drivers/staging/batman-adv/icmp_socket.c
@@@ -1,338 -1,0 +1,356 @@@
 +/*
 + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
 + *
 + * Marek Lindner
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of version 2 of the GNU General Public
 + * License as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 + * 02110-1301, USA
 + *
 + */
 +
 +#include "main.h"
 +#include <linux/debugfs.h>
 +#include <linux/slab.h>
 +#include "icmp_socket.h"
 +#include "send.h"
 +#include "types.h"
 +#include "hash.h"
 +#include "hard-interface.h"
 +
 +
 +static struct socket_client *socket_client_hash[256];
 +
 +static void bat_socket_add_packet(struct socket_client *socket_client,
 +				  struct icmp_packet_rr *icmp_packet,
 +				  size_t icmp_len);
 +
 +void bat_socket_init(void)
 +{
 +	memset(socket_client_hash, 0, sizeof(socket_client_hash));
 +}
 +
 +static int bat_socket_open(struct inode *inode, struct file *file)
 +{
 +	unsigned int i;
 +	struct socket_client *socket_client;
 +
 +	socket_client = kmalloc(sizeof(struct socket_client), GFP_KERNEL);
 +
 +	if (!socket_client)
 +		return -ENOMEM;
 +
 +	for (i = 0; i < ARRAY_SIZE(socket_client_hash); i++) {
 +		if (!socket_client_hash[i]) {
 +			socket_client_hash[i] = socket_client;
 +			break;
 +		}
 +	}
 +
 +	if (i == ARRAY_SIZE(socket_client_hash)) {
 +		pr_err("Error - can't add another packet client: "
 +		       "maximum number of clients reached\n");
 +		kfree(socket_client);
 +		return -EXFULL;
 +	}
 +
 +	INIT_LIST_HEAD(&socket_client->queue_list);
 +	socket_client->queue_len = 0;
 +	socket_client->index = i;
 +	socket_client->bat_priv = inode->i_private;
 +	spin_lock_init(&socket_client->lock);
 +	init_waitqueue_head(&socket_client->queue_wait);
 +
 +	file->private_data = socket_client;
 +
 +	inc_module_count();
 +	return 0;
 +}
 +
 +static int bat_socket_release(struct inode *inode, struct file *file)
 +{
 +	struct socket_client *socket_client = file->private_data;
 +	struct socket_packet *socket_packet;
 +	struct list_head *list_pos, *list_pos_tmp;
 +	unsigned long flags;
 +
 +	spin_lock_irqsave(&socket_client->lock, flags);
 +
 +	/* for all packets in the queue ... */
 +	list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) {
 +		socket_packet = list_entry(list_pos,
 +					   struct socket_packet, list);
 +
 +		list_del(list_pos);
 +		kfree(socket_packet);
 +	}
 +
 +	socket_client_hash[socket_client->index] = NULL;
 +	spin_unlock_irqrestore(&socket_client->lock, flags);
 +
 +	kfree(socket_client);
 +	dec_module_count();
 +
 +	return 0;
 +}
 +
 +static ssize_t bat_socket_read(struct file *file, char __user *buf,
 +			       size_t count, loff_t *ppos)
 +{
 +	struct socket_client *socket_client = file->private_data;
 +	struct socket_packet *socket_packet;
 +	size_t packet_len;
 +	int error;
 +	unsigned long flags;
 +
 +	if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0))
 +		return -EAGAIN;
 +
 +	if ((!buf) || (count < sizeof(struct icmp_packet)))
 +		return -EINVAL;
 +
 +	if (!access_ok(VERIFY_WRITE, buf, count))
 +		return -EFAULT;
 +
 +	error = wait_event_interruptible(socket_client->queue_wait,
 +					 socket_client->queue_len);
 +
 +	if (error)
 +		return error;
 +
 +	spin_lock_irqsave(&socket_client->lock, flags);
 +
 +	socket_packet = list_first_entry(&socket_client->queue_list,
 +					 struct socket_packet, list);
 +	list_del(&socket_packet->list);
 +	socket_client->queue_len--;
 +
 +	spin_unlock_irqrestore(&socket_client->lock, flags);
 +
 +	error = __copy_to_user(buf, &socket_packet->icmp_packet,
 +			       socket_packet->icmp_len);
 +
 +	packet_len = socket_packet->icmp_len;
 +	kfree(socket_packet);
 +
 +	if (error)
 +		return -EFAULT;
 +
 +	return packet_len;
 +}
 +
 +static ssize_t bat_socket_write(struct file *file, const char __user *buff,
 +				size_t len, loff_t *off)
 +{
 +	struct socket_client *socket_client = file->private_data;
 +	struct bat_priv *bat_priv = socket_client->bat_priv;
- 	struct icmp_packet_rr icmp_packet;
++	struct sk_buff *skb;
++	struct icmp_packet_rr *icmp_packet;
++
 +	struct orig_node *orig_node;
 +	struct batman_if *batman_if;
 +	size_t packet_len = sizeof(struct icmp_packet);
 +	uint8_t dstaddr[ETH_ALEN];
 +	unsigned long flags;
 +
 +	if (len < sizeof(struct icmp_packet)) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Error - can't send packet from char device: "
 +			"invalid packet size\n");
 +		return -EINVAL;
 +	}
 +
 +	if (!bat_priv->primary_if)
 +		return -EFAULT;
 +
 +	if (len >= sizeof(struct icmp_packet_rr))
 +		packet_len = sizeof(struct icmp_packet_rr);
 +
- 	if (!access_ok(VERIFY_READ, buff, packet_len))
- 		return -EFAULT;
++	skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr));
++	if (!skb)
++		return -ENOMEM;
 +
- 	if (__copy_from_user(&icmp_packet, buff, packet_len))
- 		return -EFAULT;
++	skb_reserve(skb, sizeof(struct ethhdr));
++	icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
++
++	if (!access_ok(VERIFY_READ, buff, packet_len)) {
++		len = -EFAULT;
++		goto free_skb;
++	}
 +
- 	if (icmp_packet.packet_type != BAT_ICMP) {
++	if (__copy_from_user(icmp_packet, buff, packet_len)) {
++		len = -EFAULT;
++		goto free_skb;
++	}
++
++	if (icmp_packet->packet_type != BAT_ICMP) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Error - can't send packet from char device: "
 +			"got bogus packet type (expected: BAT_ICMP)\n");
- 		return -EINVAL;
++		len = -EINVAL;
++		goto free_skb;
 +	}
 +
- 	if (icmp_packet.msg_type != ECHO_REQUEST) {
++	if (icmp_packet->msg_type != ECHO_REQUEST) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Error - can't send packet from char device: "
 +			"got bogus message type (expected: ECHO_REQUEST)\n");
- 		return -EINVAL;
++		len = -EINVAL;
++		goto free_skb;
 +	}
 +
- 	icmp_packet.uid = socket_client->index;
++	icmp_packet->uid = socket_client->index;
 +
- 	if (icmp_packet.version != COMPAT_VERSION) {
- 		icmp_packet.msg_type = PARAMETER_PROBLEM;
- 		icmp_packet.ttl = COMPAT_VERSION;
- 		bat_socket_add_packet(socket_client, &icmp_packet, packet_len);
- 		goto out;
++	if (icmp_packet->version != COMPAT_VERSION) {
++		icmp_packet->msg_type = PARAMETER_PROBLEM;
++		icmp_packet->ttl = COMPAT_VERSION;
++		bat_socket_add_packet(socket_client, icmp_packet, packet_len);
++		goto free_skb;
 +	}
 +
- 	if (atomic_read(&module_state) != MODULE_ACTIVE)
++	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
 +		goto dst_unreach;
 +
- 	spin_lock_irqsave(&orig_hash_lock, flags);
- 	orig_node = ((struct orig_node *)hash_find(orig_hash, icmp_packet.dst));
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
++	orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
++						   icmp_packet->dst));
 +
 +	if (!orig_node)
 +		goto unlock;
 +
 +	if (!orig_node->router)
 +		goto unlock;
 +
 +	batman_if = orig_node->router->if_incoming;
 +	memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
 +
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
 +	if (!batman_if)
 +		goto dst_unreach;
 +
 +	if (batman_if->if_status != IF_ACTIVE)
 +		goto dst_unreach;
 +
- 	memcpy(icmp_packet.orig,
++	memcpy(icmp_packet->orig,
 +	       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
 +
 +	if (packet_len == sizeof(struct icmp_packet_rr))
- 		memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN);
++		memcpy(icmp_packet->rr, batman_if->net_dev->dev_addr, ETH_ALEN);
++
 +
- 	send_raw_packet((unsigned char *)&icmp_packet,
- 			packet_len, batman_if, dstaddr);
++	send_skb_packet(skb, batman_if, dstaddr);
 +
 +	goto out;
 +
 +unlock:
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +dst_unreach:
- 	icmp_packet.msg_type = DESTINATION_UNREACHABLE;
- 	bat_socket_add_packet(socket_client, &icmp_packet, packet_len);
++	icmp_packet->msg_type = DESTINATION_UNREACHABLE;
++	bat_socket_add_packet(socket_client, icmp_packet, packet_len);
++free_skb:
++	kfree_skb(skb);
 +out:
 +	return len;
 +}
 +
 +static unsigned int bat_socket_poll(struct file *file, poll_table *wait)
 +{
 +	struct socket_client *socket_client = file->private_data;
 +
 +	poll_wait(file, &socket_client->queue_wait, wait);
 +
 +	if (socket_client->queue_len > 0)
 +		return POLLIN | POLLRDNORM;
 +
 +	return 0;
 +}
 +
 +static const struct file_operations fops = {
 +	.owner = THIS_MODULE,
 +	.open = bat_socket_open,
 +	.release = bat_socket_release,
 +	.read = bat_socket_read,
 +	.write = bat_socket_write,
 +	.poll = bat_socket_poll,
 +};
 +
 +int bat_socket_setup(struct bat_priv *bat_priv)
 +{
 +	struct dentry *d;
 +
 +	if (!bat_priv->debug_dir)
 +		goto err;
 +
 +	d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
 +				bat_priv->debug_dir, bat_priv, &fops);
 +	if (d)
 +		goto err;
 +
 +	return 0;
 +
 +err:
 +	return 1;
 +}
 +
 +static void bat_socket_add_packet(struct socket_client *socket_client,
 +				  struct icmp_packet_rr *icmp_packet,
 +				  size_t icmp_len)
 +{
 +	struct socket_packet *socket_packet;
 +	unsigned long flags;
 +
 +	socket_packet = kmalloc(sizeof(struct socket_packet), GFP_ATOMIC);
 +
 +	if (!socket_packet)
 +		return;
 +
 +	INIT_LIST_HEAD(&socket_packet->list);
 +	memcpy(&socket_packet->icmp_packet, icmp_packet, icmp_len);
 +	socket_packet->icmp_len = icmp_len;
 +
 +	spin_lock_irqsave(&socket_client->lock, flags);
 +
 +	/* while waiting for the lock the socket_client could have been
 +	 * deleted */
 +	if (!socket_client_hash[icmp_packet->uid]) {
 +		spin_unlock_irqrestore(&socket_client->lock, flags);
 +		kfree(socket_packet);
 +		return;
 +	}
 +
 +	list_add_tail(&socket_packet->list, &socket_client->queue_list);
 +	socket_client->queue_len++;
 +
 +	if (socket_client->queue_len > 100) {
 +		socket_packet = list_first_entry(&socket_client->queue_list,
 +						 struct socket_packet, list);
 +
 +		list_del(&socket_packet->list);
 +		kfree(socket_packet);
 +		socket_client->queue_len--;
 +	}
 +
 +	spin_unlock_irqrestore(&socket_client->lock, flags);
 +
 +	wake_up(&socket_client->queue_wait);
 +}
 +
 +void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet,
 +			       size_t icmp_len)
 +{
 +	struct socket_client *hash = socket_client_hash[icmp_packet->uid];
 +
 +	if (hash)
 +		bat_socket_add_packet(hash, icmp_packet, icmp_len);
 +}
diff --combined drivers/staging/batman-adv/main.c
index ef7c20a,0000000..498861f
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/main.c
+++ b/drivers/staging/batman-adv/main.c
@@@ -1,291 -1,0 +1,225 @@@
 +/*
 + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
 + *
 + * Marek Lindner, Simon Wunderlich
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of version 2 of the GNU General Public
 + * License as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 + * 02110-1301, USA
 + *
 + */
 +
 +#include "main.h"
 +#include "bat_sysfs.h"
 +#include "bat_debugfs.h"
 +#include "routing.h"
 +#include "send.h"
 +#include "originator.h"
 +#include "soft-interface.h"
 +#include "icmp_socket.h"
 +#include "translation-table.h"
 +#include "hard-interface.h"
 +#include "types.h"
 +#include "vis.h"
 +#include "hash.h"
 +
 +struct list_head if_list;
- struct hlist_head forw_bat_list;
- struct hlist_head forw_bcast_list;
- struct hashtable_t *orig_hash;
- 
- DEFINE_SPINLOCK(orig_hash_lock);
- DEFINE_SPINLOCK(forw_bat_list_lock);
- DEFINE_SPINLOCK(forw_bcast_list_lock);
- 
- atomic_t bcast_queue_left;
- atomic_t batman_queue_left;
- 
- int16_t num_hna;
- 
- struct net_device *soft_device;
 +
 +unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- atomic_t module_state;
- 
- static struct packet_type batman_adv_packet_type __read_mostly = {
- 	.type = __constant_htons(ETH_P_BATMAN),
- 	.func = batman_skb_recv,
- };
 +
 +struct workqueue_struct *bat_event_workqueue;
 +
 +static int __init batman_init(void)
 +{
- 	int retval;
- 
 +	INIT_LIST_HEAD(&if_list);
- 	INIT_HLIST_HEAD(&forw_bat_list);
- 	INIT_HLIST_HEAD(&forw_bcast_list);
- 
- 	atomic_set(&module_state, MODULE_INACTIVE);
- 
- 	atomic_set(&bcast_queue_left, BCAST_QUEUE_LEN);
- 	atomic_set(&batman_queue_left, BATMAN_QUEUE_LEN);
 +
 +	/* the name should not be longer than 10 chars - see
 +	 * http://lwn.net/Articles/23634/ */
 +	bat_event_workqueue = create_singlethread_workqueue("bat_events");
 +
 +	if (!bat_event_workqueue)
 +		return -ENOMEM;
 +
 +	bat_socket_init();
 +	debugfs_init();
 +
- 	/* initialize layer 2 interface */
- 	soft_device = alloc_netdev(sizeof(struct bat_priv) , "bat%d",
- 				   interface_setup);
- 
- 	if (!soft_device) {
- 		pr_err("Unable to allocate the batman interface\n");
- 		goto end;
- 	}
- 
- 	retval = register_netdev(soft_device);
- 
- 	if (retval < 0) {
- 		pr_err("Unable to register the batman interface: %i\n", retval);
- 		goto free_soft_device;
- 	}
- 
- 	retval = sysfs_add_meshif(soft_device);
- 
- 	if (retval < 0)
- 		goto unreg_soft_device;
- 
- 	retval = debugfs_add_meshif(soft_device);
- 
- 	if (retval < 0)
- 		goto unreg_sysfs;
- 
 +	register_netdevice_notifier(&hard_if_notifier);
- 	dev_add_pack(&batman_adv_packet_type);
 +
 +	pr_info("B.A.T.M.A.N. advanced %s%s (compatibility version %i) "
 +		"loaded\n", SOURCE_VERSION, REVISION_VERSION_STR,
 +		COMPAT_VERSION);
 +
 +	return 0;
- 
- unreg_sysfs:
- 	sysfs_del_meshif(soft_device);
- unreg_soft_device:
- 	unregister_netdev(soft_device);
- 	soft_device = NULL;
- 	return -ENOMEM;
- 
- free_soft_device:
- 	free_netdev(soft_device);
- 	soft_device = NULL;
- end:
- 	return -ENOMEM;
 +}
 +
 +static void __exit batman_exit(void)
 +{
- 	deactivate_module();
- 
 +	debugfs_destroy();
 +	unregister_netdevice_notifier(&hard_if_notifier);
 +	hardif_remove_interfaces();
 +
- 	if (soft_device) {
- 		debugfs_del_meshif(soft_device);
- 		sysfs_del_meshif(soft_device);
- 		unregister_netdev(soft_device);
- 		soft_device = NULL;
- 	}
- 
- 	dev_remove_pack(&batman_adv_packet_type);
- 
++	flush_workqueue(bat_event_workqueue);
 +	destroy_workqueue(bat_event_workqueue);
 +	bat_event_workqueue = NULL;
 +}
 +
- /* activates the module, starts timer ... */
- void activate_module(void)
++int mesh_init(struct net_device *soft_iface)
 +{
- 	if (originator_init() < 1)
++	struct bat_priv *bat_priv = netdev_priv(soft_iface);
++
++	spin_lock_init(&bat_priv->orig_hash_lock);
++	spin_lock_init(&bat_priv->forw_bat_list_lock);
++	spin_lock_init(&bat_priv->forw_bcast_list_lock);
++	spin_lock_init(&bat_priv->hna_lhash_lock);
++	spin_lock_init(&bat_priv->hna_ghash_lock);
++	spin_lock_init(&bat_priv->gw_list_lock);
++	spin_lock_init(&bat_priv->vis_hash_lock);
++	spin_lock_init(&bat_priv->vis_list_lock);
++
++	INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
++	INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
++	INIT_HLIST_HEAD(&bat_priv->gw_list);
++
++	if (originator_init(bat_priv) < 1)
 +		goto err;
 +
- 	if (hna_local_init() < 1)
++	if (hna_local_init(bat_priv) < 1)
 +		goto err;
 +
- 	if (hna_global_init() < 1)
++	if (hna_global_init(bat_priv) < 1)
 +		goto err;
 +
- 	hna_local_add(soft_device->dev_addr);
++	hna_local_add(soft_iface, soft_iface->dev_addr);
 +
- 	if (vis_init() < 1)
++	if (vis_init(bat_priv) < 1)
 +		goto err;
 +
- 	update_min_mtu();
- 	atomic_set(&module_state, MODULE_ACTIVE);
++	atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
 +	goto end;
 +
 +err:
 +	pr_err("Unable to allocate memory for mesh information structures: "
 +	       "out of mem ?\n");
- 	deactivate_module();
++	mesh_free(soft_iface);
++	return -1;
++
 +end:
- 	return;
++	return 0;
 +}
 +
- /* shuts down the whole module.*/
- void deactivate_module(void)
++void mesh_free(struct net_device *soft_iface)
 +{
- 	atomic_set(&module_state, MODULE_DEACTIVATING);
++	struct bat_priv *bat_priv = netdev_priv(soft_iface);
 +
- 	purge_outstanding_packets(NULL);
- 	flush_workqueue(bat_event_workqueue);
++	atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING);
 +
- 	vis_quit();
++	purge_outstanding_packets(bat_priv, NULL);
 +
- 	/* TODO: unregister BATMAN pack */
++	vis_quit(bat_priv);
 +
- 	originator_free();
++	originator_free(bat_priv);
 +
- 	hna_local_free();
- 	hna_global_free();
++	hna_local_free(bat_priv);
++	hna_global_free(bat_priv);
 +
 +	synchronize_net();
 +
 +	synchronize_rcu();
- 	atomic_set(&module_state, MODULE_INACTIVE);
++	atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
 +}
 +
 +void inc_module_count(void)
 +{
 +	try_module_get(THIS_MODULE);
 +}
 +
 +void dec_module_count(void)
 +{
 +	module_put(THIS_MODULE);
 +}
 +
 +int addr_to_string(char *buff, uint8_t *addr)
 +{
 +	return sprintf(buff, "%pM", addr);
 +}
 +
 +/* returns 1 if they are the same originator */
 +
 +int compare_orig(void *data1, void *data2)
 +{
 +	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 +}
 +
 +/* hashfunction to choose an entry in a hash table of given size */
 +/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
 +int choose_orig(void *data, int32_t size)
 +{
 +	unsigned char *key = data;
 +	uint32_t hash = 0;
 +	size_t i;
 +
 +	for (i = 0; i < 6; i++) {
 +		hash += key[i];
 +		hash += (hash << 10);
 +		hash ^= (hash >> 6);
 +	}
 +
 +	hash += (hash << 3);
 +	hash ^= (hash >> 11);
 +	hash += (hash << 15);
 +
 +	return hash % size;
 +}
 +
 +int is_my_mac(uint8_t *addr)
 +{
 +	struct batman_if *batman_if;
 +
 +	rcu_read_lock();
 +	list_for_each_entry_rcu(batman_if, &if_list, list) {
 +		if (batman_if->if_status != IF_ACTIVE)
 +			continue;
 +
 +		if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
 +			rcu_read_unlock();
 +			return 1;
 +		}
 +	}
 +	rcu_read_unlock();
 +	return 0;
 +
 +}
 +
 +int is_bcast(uint8_t *addr)
 +{
 +	return (addr[0] == (uint8_t)0xff) && (addr[1] == (uint8_t)0xff);
 +}
 +
 +int is_mcast(uint8_t *addr)
 +{
 +	return *addr & 0x01;
 +}
 +
 +module_init(batman_init);
 +module_exit(batman_exit);
 +
 +MODULE_LICENSE("GPL");
 +
 +MODULE_AUTHOR(DRIVER_AUTHOR);
 +MODULE_DESCRIPTION(DRIVER_DESC);
 +MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE);
 +#ifdef REVISION_VERSION
 +MODULE_VERSION(SOURCE_VERSION "-" REVISION_VERSION);
 +#else
 +MODULE_VERSION(SOURCE_VERSION);
 +#endif
diff --combined drivers/staging/batman-adv/main.h
index 8513261,ca97589..ca97589
--- a/drivers/staging/batman-adv/main.h
+++ b/drivers/staging/batman-adv/main.h
@@@ -30,7 -30,7 +30,7 @@@
  #define DRIVER_DESC   "B.A.T.M.A.N. advanced"
  #define DRIVER_DEVICE "batman-adv"
  
- #define SOURCE_VERSION "maint"
+ #define SOURCE_VERSION "next"
  
  
  /* B.A.T.M.A.N. parameters */
@@@ -76,9 -76,9 +76,9 @@@
  #define EXPECTED_SEQNO_RANGE	65536
  /* don't reset again within 30 seconds */
  
- #define MODULE_INACTIVE 0
- #define MODULE_ACTIVE 1
- #define MODULE_DEACTIVATING 2
+ #define MESH_INACTIVE 0
+ #define MESH_ACTIVE 1
+ #define MESH_DEACTIVATING 2
  
  #define BCAST_QUEUE_LEN		256
  #define BATMAN_QUEUE_LEN	256
@@@ -128,26 -128,12 +128,12 @@@
  #endif
  
  extern struct list_head if_list;
- extern struct hlist_head forw_bat_list;
- extern struct hlist_head forw_bcast_list;
- extern struct hashtable_t *orig_hash;
- 
- extern spinlock_t orig_hash_lock;
- extern spinlock_t forw_bat_list_lock;
- extern spinlock_t forw_bcast_list_lock;
- 
- extern atomic_t bcast_queue_left;
- extern atomic_t batman_queue_left;
- extern int16_t num_hna;
- 
- extern struct net_device *soft_device;
  
  extern unsigned char broadcast_addr[];
- extern atomic_t module_state;
  extern struct workqueue_struct *bat_event_workqueue;
  
- void activate_module(void);
- void deactivate_module(void);
+ int mesh_init(struct net_device *soft_iface);
+ void mesh_free(struct net_device *soft_iface);
  void inc_module_count(void);
  void dec_module_count(void);
  int addr_to_string(char *buff, uint8_t *addr);
@@@ -158,7 -144,7 +144,7 @@@ int is_bcast(uint8_t *addr)
  int is_mcast(uint8_t *addr);
  
  #ifdef CONFIG_BATMAN_ADV_DEBUG
- extern int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
+ int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
  
  #define bat_dbg(type, bat_priv, fmt, arg...)			\
  	do {							\
diff --combined drivers/staging/batman-adv/originator.c
index de5a8c1,0000000..f25d7fd
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/originator.c
+++ b/drivers/staging/batman-adv/originator.c
@@@ -1,511 -1,0 +1,539 @@@
 +/*
 + * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
 + *
 + * Marek Lindner, Simon Wunderlich
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of version 2 of the GNU General Public
 + * License as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 + * 02110-1301, USA
 + *
 + */
 +
 +/* increase the reference counter for this originator */
 +
 +#include "main.h"
 +#include "originator.h"
 +#include "hash.h"
 +#include "translation-table.h"
 +#include "routing.h"
 +#include "hard-interface.h"
++#include "unicast.h"
 +
- static DECLARE_DELAYED_WORK(purge_orig_wq, purge_orig);
++static void purge_orig(struct work_struct *work);
 +
- static void start_purge_timer(void)
++static void start_purge_timer(struct bat_priv *bat_priv)
 +{
- 	queue_delayed_work(bat_event_workqueue, &purge_orig_wq, 1 * HZ);
++	INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
++	queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
 +}
 +
- int originator_init(void)
++int originator_init(struct bat_priv *bat_priv)
 +{
 +	unsigned long flags;
- 	if (orig_hash)
++	if (bat_priv->orig_hash)
 +		return 1;
 +
- 	spin_lock_irqsave(&orig_hash_lock, flags);
- 	orig_hash = hash_new(128, compare_orig, choose_orig);
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
++	bat_priv->orig_hash = hash_new(128, compare_orig, choose_orig);
 +
- 	if (!orig_hash)
++	if (!bat_priv->orig_hash)
 +		goto err;
 +
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
- 	start_purge_timer();
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
++	start_purge_timer(bat_priv);
 +	return 1;
 +
 +err:
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +	return 0;
 +}
 +
 +struct neigh_node *
 +create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
 +		uint8_t *neigh, struct batman_if *if_incoming)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +	struct neigh_node *neigh_node;
 +
 +	bat_dbg(DBG_BATMAN, bat_priv,
 +		"Creating new last-hop neighbor of originator\n");
 +
 +	neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC);
 +	if (!neigh_node)
 +		return NULL;
 +
 +	INIT_LIST_HEAD(&neigh_node->list);
 +
 +	memcpy(neigh_node->addr, neigh, ETH_ALEN);
 +	neigh_node->orig_node = orig_neigh_node;
 +	neigh_node->if_incoming = if_incoming;
 +
 +	list_add_tail(&neigh_node->list, &orig_node->neigh_list);
 +	return neigh_node;
 +}
 +
- static void free_orig_node(void *data)
++static void free_orig_node(void *data, void *arg)
 +{
 +	struct list_head *list_pos, *list_pos_tmp;
 +	struct neigh_node *neigh_node;
 +	struct orig_node *orig_node = (struct orig_node *)data;
++	struct bat_priv *bat_priv = (struct bat_priv *)arg;
 +
 +	/* for all neighbors towards this originator ... */
 +	list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
 +		neigh_node = list_entry(list_pos, struct neigh_node, list);
 +
 +		list_del(list_pos);
 +		kfree(neigh_node);
 +	}
 +
- 	hna_global_del_orig(orig_node, "originator timed out");
++	frag_list_free(&orig_node->frag_list);
++	hna_global_del_orig(bat_priv, orig_node, "originator timed out");
 +
 +	kfree(orig_node->bcast_own);
 +	kfree(orig_node->bcast_own_sum);
 +	kfree(orig_node);
 +}
 +
- void originator_free(void)
++void originator_free(struct bat_priv *bat_priv)
 +{
 +	unsigned long flags;
 +
- 	if (!orig_hash)
++	if (!bat_priv->orig_hash)
 +		return;
 +
- 	cancel_delayed_work_sync(&purge_orig_wq);
++	cancel_delayed_work_sync(&bat_priv->orig_work);
 +
- 	spin_lock_irqsave(&orig_hash_lock, flags);
- 	hash_delete(orig_hash, free_orig_node);
- 	orig_hash = NULL;
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
++	hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
++	bat_priv->orig_hash = NULL;
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +}
 +
 +/* this function finds or creates an originator entry for the given
 + * address if it does not exits */
- struct orig_node *get_orig_node(uint8_t *addr)
++struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
 +{
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +	struct orig_node *orig_node;
 +	struct hashtable_t *swaphash;
 +	int size;
 +
- 	orig_node = ((struct orig_node *)hash_find(orig_hash, addr));
++	orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, addr));
 +
- 	if (orig_node != NULL)
++	if (orig_node)
 +		return orig_node;
 +
 +	bat_dbg(DBG_BATMAN, bat_priv,
 +		"Creating new originator: %pM\n", addr);
 +
 +	orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
 +	if (!orig_node)
 +		return NULL;
 +
 +	INIT_LIST_HEAD(&orig_node->neigh_list);
 +
 +	memcpy(orig_node->orig, addr, ETH_ALEN);
 +	orig_node->router = NULL;
 +	orig_node->hna_buff = NULL;
 +	orig_node->bcast_seqno_reset = jiffies - 1
 +					- msecs_to_jiffies(RESET_PROTECTION_MS);
 +	orig_node->batman_seqno_reset = jiffies - 1
 +					- msecs_to_jiffies(RESET_PROTECTION_MS);
 +
 +	size = bat_priv->num_ifaces * sizeof(TYPE_OF_WORD) * NUM_WORDS;
 +
 +	orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
 +	if (!orig_node->bcast_own)
 +		goto free_orig_node;
 +
 +	size = bat_priv->num_ifaces * sizeof(uint8_t);
 +	orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
++
++	INIT_LIST_HEAD(&orig_node->frag_list);
++	orig_node->last_frag_packet = 0;
++
 +	if (!orig_node->bcast_own_sum)
 +		goto free_bcast_own;
 +
- 	if (hash_add(orig_hash, orig_node) < 0)
++	if (hash_add(bat_priv->orig_hash, orig_node) < 0)
 +		goto free_bcast_own_sum;
 +
- 	if (orig_hash->elements * 4 > orig_hash->size) {
- 		swaphash = hash_resize(orig_hash, orig_hash->size * 2);
++	if (bat_priv->orig_hash->elements * 4 > bat_priv->orig_hash->size) {
++		swaphash = hash_resize(bat_priv->orig_hash,
++				       bat_priv->orig_hash->size * 2);
 +
- 		if (swaphash == NULL)
- 			bat_err(soft_device,
++		if (!swaphash)
++			bat_dbg(DBG_BATMAN, bat_priv,
 +				"Couldn't resize orig hash table\n");
 +		else
- 			orig_hash = swaphash;
++			bat_priv->orig_hash = swaphash;
 +	}
 +
 +	return orig_node;
 +free_bcast_own_sum:
 +	kfree(orig_node->bcast_own_sum);
 +free_bcast_own:
 +	kfree(orig_node->bcast_own);
 +free_orig_node:
 +	kfree(orig_node);
 +	return NULL;
 +}
 +
- static bool purge_orig_neighbors(struct orig_node *orig_node,
++static bool purge_orig_neighbors(struct bat_priv *bat_priv,
++				 struct orig_node *orig_node,
 +				 struct neigh_node **best_neigh_node)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +	struct list_head *list_pos, *list_pos_tmp;
 +	struct neigh_node *neigh_node;
 +	bool neigh_purged = false;
 +
 +	*best_neigh_node = NULL;
 +
 +	/* for all neighbors towards this originator ... */
 +	list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
 +		neigh_node = list_entry(list_pos, struct neigh_node, list);
 +
 +		if ((time_after(jiffies,
 +			neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
- 		    (neigh_node->if_incoming->if_status ==
- 						IF_TO_BE_REMOVED)) {
++		    (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
++		    (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
 +
 +			if (neigh_node->if_incoming->if_status ==
 +							IF_TO_BE_REMOVED)
 +				bat_dbg(DBG_BATMAN, bat_priv,
 +					"neighbor purge: originator %pM, "
 +					"neighbor: %pM, iface: %s\n",
 +					orig_node->orig, neigh_node->addr,
- 					neigh_node->if_incoming->dev);
++					neigh_node->if_incoming->net_dev->name);
 +			else
 +				bat_dbg(DBG_BATMAN, bat_priv,
 +					"neighbor timeout: originator %pM, "
 +					"neighbor: %pM, last_valid: %lu\n",
 +					orig_node->orig, neigh_node->addr,
 +					(neigh_node->last_valid / HZ));
 +
 +			neigh_purged = true;
 +			list_del(list_pos);
 +			kfree(neigh_node);
 +		} else {
 +			if ((*best_neigh_node == NULL) ||
 +			    (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
 +				*best_neigh_node = neigh_node;
 +		}
 +	}
 +	return neigh_purged;
 +}
 +
- static bool purge_orig_node(struct orig_node *orig_node)
++static bool purge_orig_node(struct bat_priv *bat_priv,
++			    struct orig_node *orig_node)
 +{
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +	struct neigh_node *best_neigh_node;
 +
 +	if (time_after(jiffies,
 +		orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
 +
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Originator timeout: originator %pM, last_valid %lu\n",
 +			orig_node->orig, (orig_node->last_valid / HZ));
 +		return true;
 +	} else {
- 		if (purge_orig_neighbors(orig_node, &best_neigh_node)) {
- 			update_routes(orig_node, best_neigh_node,
++		if (purge_orig_neighbors(bat_priv, orig_node,
++							&best_neigh_node)) {
++			update_routes(bat_priv, orig_node,
++				      best_neigh_node,
 +				      orig_node->hna_buff,
 +				      orig_node->hna_buff_len);
 +			/* update bonding candidates, we could have lost
 +			 * some candidates. */
 +			update_bonding_candidates(bat_priv, orig_node);
 +		}
 +	}
 +
 +	return false;
 +}
 +
- void purge_orig(struct work_struct *work)
++static void _purge_orig(struct bat_priv *bat_priv)
 +{
 +	HASHIT(hashit);
 +	struct orig_node *orig_node;
 +	unsigned long flags;
 +
- 	spin_lock_irqsave(&orig_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
 +
 +	/* for all origins... */
- 	while (hash_iterate(orig_hash, &hashit)) {
++	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
 +		orig_node = hashit.bucket->data;
- 		if (purge_orig_node(orig_node)) {
- 			hash_remove_bucket(orig_hash, &hashit);
- 			free_orig_node(orig_node);
++
++		if (purge_orig_node(bat_priv, orig_node)) {
++			hash_remove_bucket(bat_priv->orig_hash, &hashit);
++			free_orig_node(orig_node, bat_priv);
 +		}
++
++		if (time_after(jiffies, (orig_node->last_frag_packet +
++					msecs_to_jiffies(FRAG_TIMEOUT))))
++			frag_list_free(&orig_node->frag_list);
 +	}
 +
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
- 	/* if work == NULL we were not called by the timer
- 	 * and thus do not need to re-arm the timer */
- 	if (work)
- 		start_purge_timer();
++}
++
++static void purge_orig(struct work_struct *work)
++{
++	struct delayed_work *delayed_work =
++		container_of(work, struct delayed_work, work);
++	struct bat_priv *bat_priv =
++		container_of(delayed_work, struct bat_priv, orig_work);
++
++	_purge_orig(bat_priv);
++	start_purge_timer(bat_priv);
++}
++
++void purge_orig_ref(struct bat_priv *bat_priv)
++{
++	_purge_orig(bat_priv);
 +}
 +
 +int orig_seq_print_text(struct seq_file *seq, void *offset)
 +{
 +	HASHIT(hashit);
 +	struct net_device *net_dev = (struct net_device *)seq->private;
 +	struct bat_priv *bat_priv = netdev_priv(net_dev);
 +	struct orig_node *orig_node;
 +	struct neigh_node *neigh_node;
 +	int batman_count = 0;
 +	int last_seen_secs;
 +	int last_seen_msecs;
 +	unsigned long flags;
 +	char orig_str[ETH_STR_LEN], router_str[ETH_STR_LEN];
 +
 +	if ((!bat_priv->primary_if) ||
 +	    (bat_priv->primary_if->if_status != IF_ACTIVE)) {
 +		if (!bat_priv->primary_if)
 +			return seq_printf(seq, "BATMAN mesh %s disabled - "
 +				     "please specify interfaces to enable it\n",
 +				     net_dev->name);
 +
 +		return seq_printf(seq, "BATMAN mesh %s "
 +				  "disabled - primary interface not active\n",
 +				  net_dev->name);
 +	}
 +
 +	rcu_read_lock();
 +	seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s (%s)]\n",
 +		   SOURCE_VERSION, REVISION_VERSION_STR,
- 		   bat_priv->primary_if->dev, bat_priv->primary_if->addr_str,
- 		   net_dev->name);
++		   bat_priv->primary_if->net_dev->name,
++		   bat_priv->primary_if->addr_str, net_dev->name);
 +	seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
 +		   "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
 +		   "outgoingIF", "Potential nexthops");
 +	rcu_read_unlock();
 +
- 	spin_lock_irqsave(&orig_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
 +
- 	while (hash_iterate(orig_hash, &hashit)) {
++	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
 +
 +		orig_node = hashit.bucket->data;
 +
 +		if (!orig_node->router)
 +			continue;
 +
 +		if (orig_node->router->tq_avg == 0)
 +			continue;
 +
 +		addr_to_string(orig_str, orig_node->orig);
 +		addr_to_string(router_str, orig_node->router->addr);
 +		last_seen_secs = jiffies_to_msecs(jiffies -
 +						orig_node->last_valid) / 1000;
 +		last_seen_msecs = jiffies_to_msecs(jiffies -
 +						orig_node->last_valid) % 1000;
 +
 +		seq_printf(seq, "%-17s %4i.%03is   (%3i) %17s [%10s]:",
 +			   orig_str, last_seen_secs, last_seen_msecs,
 +			   orig_node->router->tq_avg, router_str,
- 			   orig_node->router->if_incoming->dev);
++			   orig_node->router->if_incoming->net_dev->name);
 +
 +		list_for_each_entry(neigh_node, &orig_node->neigh_list, list) {
 +			addr_to_string(orig_str, neigh_node->addr);
 +			seq_printf(seq, " %17s (%3i)", orig_str,
 +					   neigh_node->tq_avg);
 +		}
 +
 +		seq_printf(seq, "\n");
 +		batman_count++;
 +	}
 +
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
 +	if ((batman_count == 0))
 +		seq_printf(seq, "No batman nodes in range ...\n");
 +
 +	return 0;
 +}
 +
 +static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
 +{
 +	void *data_ptr;
 +
 +	data_ptr = kmalloc(max_if_num * sizeof(TYPE_OF_WORD) * NUM_WORDS,
 +			   GFP_ATOMIC);
 +	if (!data_ptr) {
 +		pr_err("Can't resize orig: out of memory\n");
 +		return -1;
 +	}
 +
 +	memcpy(data_ptr, orig_node->bcast_own,
 +	       (max_if_num - 1) * sizeof(TYPE_OF_WORD) * NUM_WORDS);
 +	kfree(orig_node->bcast_own);
 +	orig_node->bcast_own = data_ptr;
 +
 +	data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
 +	if (!data_ptr) {
 +		pr_err("Can't resize orig: out of memory\n");
 +		return -1;
 +	}
 +
 +	memcpy(data_ptr, orig_node->bcast_own_sum,
 +	       (max_if_num - 1) * sizeof(uint8_t));
 +	kfree(orig_node->bcast_own_sum);
 +	orig_node->bcast_own_sum = data_ptr;
 +
 +	return 0;
 +}
 +
 +int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
 +{
++	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
 +	struct orig_node *orig_node;
 +	unsigned long flags;
 +	HASHIT(hashit);
 +
 +	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
 +	 * if_num */
- 	spin_lock_irqsave(&orig_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
 +
- 	while (hash_iterate(orig_hash, &hashit)) {
++	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
 +		orig_node = hashit.bucket->data;
 +
 +		if (orig_node_add_if(orig_node, max_if_num) == -1)
 +			goto err;
 +	}
 +
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +	return 0;
 +
 +err:
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +	return -ENOMEM;
 +}
 +
 +static int orig_node_del_if(struct orig_node *orig_node,
 +		     int max_if_num, int del_if_num)
 +{
 +	void *data_ptr = NULL;
 +	int chunk_size;
 +
 +	/* last interface was removed */
 +	if (max_if_num == 0)
 +		goto free_bcast_own;
 +
 +	chunk_size = sizeof(TYPE_OF_WORD) * NUM_WORDS;
 +	data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
 +	if (!data_ptr) {
 +		pr_err("Can't resize orig: out of memory\n");
 +		return -1;
 +	}
 +
 +	/* copy first part */
 +	memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
 +
 +	/* copy second part */
 +	memcpy(data_ptr,
 +	       orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
 +	       (max_if_num - del_if_num) * chunk_size);
 +
 +free_bcast_own:
 +	kfree(orig_node->bcast_own);
 +	orig_node->bcast_own = data_ptr;
 +
 +	if (max_if_num == 0)
 +		goto free_own_sum;
 +
 +	data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
 +	if (!data_ptr) {
 +		pr_err("Can't resize orig: out of memory\n");
 +		return -1;
 +	}
 +
 +	memcpy(data_ptr, orig_node->bcast_own_sum,
 +	       del_if_num * sizeof(uint8_t));
 +
 +	memcpy(data_ptr,
 +	       orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
 +	       (max_if_num - del_if_num) * sizeof(uint8_t));
 +
 +free_own_sum:
 +	kfree(orig_node->bcast_own_sum);
 +	orig_node->bcast_own_sum = data_ptr;
 +
 +	return 0;
 +}
 +
 +int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
 +{
++	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
 +	struct batman_if *batman_if_tmp;
 +	struct orig_node *orig_node;
 +	unsigned long flags;
 +	HASHIT(hashit);
 +	int ret;
 +
 +	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
 +	 * if_num */
- 	spin_lock_irqsave(&orig_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
 +
- 	while (hash_iterate(orig_hash, &hashit)) {
++	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
 +		orig_node = hashit.bucket->data;
 +
 +		ret = orig_node_del_if(orig_node, max_if_num,
 +				       batman_if->if_num);
 +
 +		if (ret == -1)
 +			goto err;
 +	}
 +
 +	/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
 +	rcu_read_lock();
 +	list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
 +		if (batman_if_tmp->if_status == IF_NOT_IN_USE)
 +			continue;
 +
 +		if (batman_if == batman_if_tmp)
 +			continue;
 +
++		if (batman_if->soft_iface != batman_if_tmp->soft_iface)
++			continue;
++
 +		if (batman_if_tmp->if_num > batman_if->if_num)
 +			batman_if_tmp->if_num--;
 +	}
 +	rcu_read_unlock();
 +
 +	batman_if->if_num = -1;
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +	return 0;
 +
 +err:
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +	return -ENOMEM;
 +}
diff --combined drivers/staging/batman-adv/originator.h
index e88411d,a97c400..a97c400
--- a/drivers/staging/batman-adv/originator.h
+++ b/drivers/staging/batman-adv/originator.h
@@@ -22,10 -22,10 +22,10 @@@
  #ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
  #define _NET_BATMAN_ADV_ORIGINATOR_H_
  
- int originator_init(void);
- void originator_free(void);
- void purge_orig(struct work_struct *work);
- struct orig_node *get_orig_node(uint8_t *addr);
+ int originator_init(struct bat_priv *bat_priv);
+ void originator_free(struct bat_priv *bat_priv);
+ void purge_orig_ref(struct bat_priv *bat_priv);
+ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr);
  struct neigh_node *
  create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
  		uint8_t *neigh, struct batman_if *if_incoming);
diff --combined drivers/staging/batman-adv/packet.h
index abb5e46,44de1bf..44de1bf
--- a/drivers/staging/batman-adv/packet.h
+++ b/drivers/staging/batman-adv/packet.h
@@@ -24,14 -24,15 +24,15 @@@
  
  #define ETH_P_BATMAN  0x4305	/* unofficial/not registered Ethertype */
  
- #define BAT_PACKET    0x01
- #define BAT_ICMP      0x02
- #define BAT_UNICAST   0x03
- #define BAT_BCAST     0x04
- #define BAT_VIS       0x05
+ #define BAT_PACKET       0x01
+ #define BAT_ICMP         0x02
+ #define BAT_UNICAST      0x03
+ #define BAT_BCAST        0x04
+ #define BAT_VIS          0x05
+ #define BAT_UNICAST_FRAG 0x06
  
  /* this file is included by batctl which needs these defines */
- #define COMPAT_VERSION 11
+ #define COMPAT_VERSION 13
  #define DIRECTLINK 0x40
  #define VIS_SERVER 0x20
  #define PRIMARIES_FIRST_HOP 0x10
@@@ -47,6 -48,9 +48,9 @@@
  #define VIS_TYPE_SERVER_SYNC		0
  #define VIS_TYPE_CLIENT_UPDATE		1
  
+ /* fragmentation defines */
+ #define UNI_FRAG_HEAD 0x01
+ 
  struct batman_packet {
  	uint8_t  packet_type;
  	uint8_t  version;  /* batman version field */
@@@ -96,6 -100,16 +100,16 @@@ struct unicast_packet 
  	uint8_t  ttl;
  } __attribute__((packed));
  
+ struct unicast_frag_packet {
+ 	uint8_t  packet_type;
+ 	uint8_t  version;  /* batman version field */
+ 	uint8_t  dest[6];
+ 	uint8_t  ttl;
+ 	uint8_t  flags;
+ 	uint8_t  orig[6];
+ 	uint16_t seqno;
+ } __attribute__((packed));
+ 
  struct bcast_packet {
  	uint8_t  packet_type;
  	uint8_t  version;  /* batman version field */
diff --combined drivers/staging/batman-adv/routing.c
index 032195e,0000000..e12fd99
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@@ -1,1317 -1,0 +1,1387 @@@
 +/*
 + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
 + *
 + * Marek Lindner, Simon Wunderlich
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of version 2 of the GNU General Public
 + * License as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 + * 02110-1301, USA
 + *
 + */
 +
 +#include "main.h"
 +#include "routing.h"
 +#include "send.h"
 +#include "hash.h"
 +#include "soft-interface.h"
 +#include "hard-interface.h"
 +#include "icmp_socket.h"
 +#include "translation-table.h"
 +#include "originator.h"
 +#include "types.h"
 +#include "ring_buffer.h"
 +#include "vis.h"
 +#include "aggregation.h"
- 
- static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
++#include "unicast.h"
 +
 +void slide_own_bcast_window(struct batman_if *batman_if)
 +{
++	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
 +	HASHIT(hashit);
 +	struct orig_node *orig_node;
 +	TYPE_OF_WORD *word;
 +	unsigned long flags;
 +
- 	spin_lock_irqsave(&orig_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
 +
- 	while (hash_iterate(orig_hash, &hashit)) {
++	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
 +		orig_node = hashit.bucket->data;
 +		word = &(orig_node->bcast_own[batman_if->if_num * NUM_WORDS]);
 +
- 		bit_get_packet(word, 1, 0);
++		bit_get_packet(bat_priv, word, 1, 0);
 +		orig_node->bcast_own_sum[batman_if->if_num] =
 +			bit_packet_count(word);
 +	}
 +
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +}
 +
- static void update_HNA(struct orig_node *orig_node,
++static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
 +		       unsigned char *hna_buff, int hna_buff_len)
 +{
 +	if ((hna_buff_len != orig_node->hna_buff_len) ||
 +	    ((hna_buff_len > 0) &&
 +	     (orig_node->hna_buff_len > 0) &&
 +	     (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
 +
 +		if (orig_node->hna_buff_len > 0)
- 			hna_global_del_orig(orig_node,
++			hna_global_del_orig(bat_priv, orig_node,
 +					    "originator changed hna");
 +
 +		if ((hna_buff_len > 0) && (hna_buff != NULL))
- 			hna_global_add_orig(orig_node, hna_buff, hna_buff_len);
++			hna_global_add_orig(bat_priv, orig_node,
++					    hna_buff, hna_buff_len);
 +	}
 +}
 +
- static void update_route(struct orig_node *orig_node,
++static void update_route(struct bat_priv *bat_priv,
++			 struct orig_node *orig_node,
 +			 struct neigh_node *neigh_node,
 +			 unsigned char *hna_buff, int hna_buff_len)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
- 
 +	/* route deleted */
 +	if ((orig_node->router != NULL) && (neigh_node == NULL)) {
 +
 +		bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
 +			orig_node->orig);
- 		hna_global_del_orig(orig_node, "originator timed out");
++		hna_global_del_orig(bat_priv, orig_node,
++				    "originator timed out");
 +
 +		/* route added */
 +	} else if ((orig_node->router == NULL) && (neigh_node != NULL)) {
 +
 +		bat_dbg(DBG_ROUTES, bat_priv,
 +			"Adding route towards: %pM (via %pM)\n",
 +			orig_node->orig, neigh_node->addr);
- 		hna_global_add_orig(orig_node, hna_buff, hna_buff_len);
++		hna_global_add_orig(bat_priv, orig_node,
++				    hna_buff, hna_buff_len);
 +
 +		/* route changed */
 +	} else {
 +		bat_dbg(DBG_ROUTES, bat_priv,
 +			"Changing route towards: %pM "
 +			"(now via %pM - was via %pM)\n",
 +			orig_node->orig, neigh_node->addr,
 +			orig_node->router->addr);
 +	}
 +
 +	orig_node->router = neigh_node;
 +}
 +
 +
- void update_routes(struct orig_node *orig_node,
- 			  struct neigh_node *neigh_node,
- 			  unsigned char *hna_buff, int hna_buff_len)
++void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
++		   struct neigh_node *neigh_node, unsigned char *hna_buff,
++		   int hna_buff_len)
 +{
 +
 +	if (orig_node == NULL)
 +		return;
 +
 +	if (orig_node->router != neigh_node)
- 		update_route(orig_node, neigh_node, hna_buff, hna_buff_len);
++		update_route(bat_priv, orig_node, neigh_node,
++			     hna_buff, hna_buff_len);
 +	/* may be just HNA changed */
 +	else
- 		update_HNA(orig_node, hna_buff, hna_buff_len);
++		update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
 +}
 +
 +static int is_bidirectional_neigh(struct orig_node *orig_node,
 +				struct orig_node *orig_neigh_node,
 +				struct batman_packet *batman_packet,
 +				struct batman_if *if_incoming)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +	struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
 +	unsigned char total_count;
 +
 +	if (orig_node == orig_neigh_node) {
 +		list_for_each_entry(tmp_neigh_node,
 +				    &orig_node->neigh_list,
 +				    list) {
 +
 +			if (compare_orig(tmp_neigh_node->addr,
 +					 orig_neigh_node->orig) &&
 +			    (tmp_neigh_node->if_incoming == if_incoming))
 +				neigh_node = tmp_neigh_node;
 +		}
 +
 +		if (!neigh_node)
 +			neigh_node = create_neighbor(orig_node,
 +						     orig_neigh_node,
 +						     orig_neigh_node->orig,
 +						     if_incoming);
 +		/* create_neighbor failed, return 0 */
 +		if (!neigh_node)
 +			return 0;
 +
 +		neigh_node->last_valid = jiffies;
 +	} else {
 +		/* find packet count of corresponding one hop neighbor */
 +		list_for_each_entry(tmp_neigh_node,
 +				    &orig_neigh_node->neigh_list, list) {
 +
 +			if (compare_orig(tmp_neigh_node->addr,
 +					 orig_neigh_node->orig) &&
 +			    (tmp_neigh_node->if_incoming == if_incoming))
 +				neigh_node = tmp_neigh_node;
 +		}
 +
 +		if (!neigh_node)
 +			neigh_node = create_neighbor(orig_neigh_node,
 +						     orig_neigh_node,
 +						     orig_neigh_node->orig,
 +						     if_incoming);
 +		/* create_neighbor failed, return 0 */
 +		if (!neigh_node)
 +			return 0;
 +	}
 +
 +	orig_node->last_valid = jiffies;
 +
 +	/* pay attention to not get a value bigger than 100 % */
 +	total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
 +		       neigh_node->real_packet_count ?
 +		       neigh_node->real_packet_count :
 +		       orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
 +
 +	/* if we have too few packets (too less data) we set tq_own to zero */
 +	/* if we receive too few packets it is not considered bidirectional */
 +	if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
 +	    (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
 +		orig_neigh_node->tq_own = 0;
 +	else
 +		/* neigh_node->real_packet_count is never zero as we
 +		 * only purge old information when getting new
 +		 * information */
 +		orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
 +			neigh_node->real_packet_count;
 +
 +	/*
 +	 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
 +	 * affect the nearly-symmetric links only a little, but
 +	 * punishes asymmetric links more.  This will give a value
 +	 * between 0 and TQ_MAX_VALUE
 +	 */
 +	orig_neigh_node->tq_asym_penalty =
 +		TQ_MAX_VALUE -
 +		(TQ_MAX_VALUE *
 +		 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
 +		 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
 +		 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
 +		(TQ_LOCAL_WINDOW_SIZE *
 +		 TQ_LOCAL_WINDOW_SIZE *
 +		 TQ_LOCAL_WINDOW_SIZE);
 +
 +	batman_packet->tq = ((batman_packet->tq *
 +			      orig_neigh_node->tq_own *
 +			      orig_neigh_node->tq_asym_penalty) /
 +			     (TQ_MAX_VALUE * TQ_MAX_VALUE));
 +
 +	bat_dbg(DBG_BATMAN, bat_priv,
 +		"bidirectional: "
 +		"orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
 +		"real recv = %2i, local tq: %3i, asym_penalty: %3i, "
 +		"total tq: %3i\n",
 +		orig_node->orig, orig_neigh_node->orig, total_count,
 +		neigh_node->real_packet_count, orig_neigh_node->tq_own,
 +		orig_neigh_node->tq_asym_penalty, batman_packet->tq);
 +
 +	/* if link has the minimum required transmission quality
 +	 * consider it bidirectional */
 +	if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
 +		return 1;
 +
 +	return 0;
 +}
 +
- static void update_orig(struct orig_node *orig_node, struct ethhdr *ethhdr,
++static void update_orig(struct bat_priv *bat_priv,
++			struct orig_node *orig_node,
++			struct ethhdr *ethhdr,
 +			struct batman_packet *batman_packet,
 +			struct batman_if *if_incoming,
 +			unsigned char *hna_buff, int hna_buff_len,
 +			char is_duplicate)
 +{
- 	/* FIXME: get bat_priv */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +	struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
 +	int tmp_hna_buff_len;
 +
 +	bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
 +		"Searching and updating originator entry of received packet\n");
 +
 +	list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
 +		if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
 +		    (tmp_neigh_node->if_incoming == if_incoming)) {
 +			neigh_node = tmp_neigh_node;
 +			continue;
 +		}
 +
 +		if (is_duplicate)
 +			continue;
 +
 +		ring_buffer_set(tmp_neigh_node->tq_recv,
 +				&tmp_neigh_node->tq_index, 0);
 +		tmp_neigh_node->tq_avg =
 +			ring_buffer_avg(tmp_neigh_node->tq_recv);
 +	}
 +
 +	if (!neigh_node) {
 +		struct orig_node *orig_tmp;
 +
- 		orig_tmp = get_orig_node(ethhdr->h_source);
++		orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
 +		if (!orig_tmp)
 +			return;
 +
- 		neigh_node = create_neighbor(orig_node,
- 					     orig_tmp,
++		neigh_node = create_neighbor(orig_node, orig_tmp,
 +					     ethhdr->h_source, if_incoming);
 +		if (!neigh_node)
 +			return;
 +	} else
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Updating existing last-hop neighbor of originator\n");
 +
 +	orig_node->flags = batman_packet->flags;
 +	neigh_node->last_valid = jiffies;
 +
 +	ring_buffer_set(neigh_node->tq_recv,
 +			&neigh_node->tq_index,
 +			batman_packet->tq);
 +	neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
 +
 +	if (!is_duplicate) {
 +		orig_node->last_ttl = batman_packet->ttl;
 +		neigh_node->last_ttl = batman_packet->ttl;
 +	}
 +
 +	tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
 +			    batman_packet->num_hna * ETH_ALEN : hna_buff_len);
 +
 +	/* if this neighbor already is our next hop there is nothing
 +	 * to change */
 +	if (orig_node->router == neigh_node)
 +		goto update_hna;
 +
 +	/* if this neighbor does not offer a better TQ we won't consider it */
 +	if ((orig_node->router) &&
 +	    (orig_node->router->tq_avg > neigh_node->tq_avg))
 +		goto update_hna;
 +
 +	/* if the TQ is the same and the link not more symetric we
 +	 * won't consider it either */
 +	if ((orig_node->router) &&
 +	     ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
 +	     (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
 +	      >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
 +		goto update_hna;
 +
- 	update_routes(orig_node, neigh_node, hna_buff, tmp_hna_buff_len);
++	update_routes(bat_priv, orig_node, neigh_node,
++		      hna_buff, tmp_hna_buff_len);
 +	return;
 +
 +update_hna:
- 	update_routes(orig_node, orig_node->router, hna_buff, tmp_hna_buff_len);
++	update_routes(bat_priv, orig_node, orig_node->router,
++		      hna_buff, tmp_hna_buff_len);
 +}
 +
 +/* checks whether the host restarted and is in the protection time.
 + * returns:
 + *  0 if the packet is to be accepted
 + *  1 if the packet is to be ignored.
 + */
- static int window_protected(int32_t seq_num_diff,
- 				unsigned long *last_reset)
++static int window_protected(struct bat_priv *bat_priv,
++			    int32_t seq_num_diff,
++			    unsigned long *last_reset)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
- 
 +	if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
 +		|| (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
 +		if (time_after(jiffies, *last_reset +
 +			msecs_to_jiffies(RESET_PROTECTION_MS))) {
 +
 +			*last_reset = jiffies;
 +			bat_dbg(DBG_BATMAN, bat_priv,
 +				"old packet received, start protection\n");
 +
 +			return 0;
 +		} else
 +			return 1;
 +	}
 +	return 0;
 +}
 +
 +/* processes a batman packet for all interfaces, adjusts the sequence number and
 + * finds out whether it is a duplicate.
 + * returns:
 + *   1 the packet is a duplicate
 + *   0 the packet has not yet been received
 + *  -1 the packet is old and has been received while the seqno window
 + *     was protected. Caller should drop it.
 + */
 +static char count_real_packets(struct ethhdr *ethhdr,
 +			       struct batman_packet *batman_packet,
 +			       struct batman_if *if_incoming)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +	struct orig_node *orig_node;
 +	struct neigh_node *tmp_neigh_node;
 +	char is_duplicate = 0;
 +	int32_t seq_diff;
 +	int need_update = 0;
 +	int set_mark;
 +
- 	orig_node = get_orig_node(batman_packet->orig);
++	orig_node = get_orig_node(bat_priv, batman_packet->orig);
 +	if (orig_node == NULL)
 +		return 0;
 +
 +	seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
 +
 +	/* signalize caller that the packet is to be dropped. */
- 	if (window_protected(seq_diff, &orig_node->batman_seqno_reset))
++	if (window_protected(bat_priv, seq_diff,
++			     &orig_node->batman_seqno_reset))
 +		return -1;
 +
 +	list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
 +
 +		is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
 +					       orig_node->last_real_seqno,
 +					       batman_packet->seqno);
 +
 +		if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
 +		    (tmp_neigh_node->if_incoming == if_incoming))
 +			set_mark = 1;
 +		else
 +			set_mark = 0;
 +
 +		/* if the window moved, set the update flag. */
- 		need_update |= bit_get_packet(tmp_neigh_node->real_bits,
- 						seq_diff, set_mark);
++		need_update |= bit_get_packet(bat_priv,
++					      tmp_neigh_node->real_bits,
++					      seq_diff, set_mark);
 +
 +		tmp_neigh_node->real_packet_count =
 +			bit_packet_count(tmp_neigh_node->real_bits);
 +	}
 +
 +	if (need_update) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"updating last_seqno: old %d, new %d\n",
 +			orig_node->last_real_seqno, batman_packet->seqno);
 +		orig_node->last_real_seqno = batman_packet->seqno;
 +	}
 +
 +	return is_duplicate;
 +}
 +
 +/* copy primary address for bonding */
 +static void mark_bonding_address(struct bat_priv *bat_priv,
 +				 struct orig_node *orig_node,
 +				 struct orig_node *orig_neigh_node,
 +				 struct batman_packet *batman_packet)
 +
 +{
 +	if (batman_packet->flags & PRIMARIES_FIRST_HOP)
 +		memcpy(orig_neigh_node->primary_addr,
 +		       orig_node->orig, ETH_ALEN);
 +
 +	return;
 +}
 +
 +/* mark possible bond.candidates in the neighbor list */
 +void update_bonding_candidates(struct bat_priv *bat_priv,
 +			       struct orig_node *orig_node)
 +{
 +	int candidates;
 +	int interference_candidate;
 +	int best_tq;
 +	struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
 +	struct neigh_node *first_candidate, *last_candidate;
 +
 +	/* update the candidates for this originator */
 +	if (!orig_node->router) {
 +		orig_node->bond.candidates = 0;
 +		return;
 +	}
 +
 +	best_tq = orig_node->router->tq_avg;
 +
 +	/* update bond.candidates */
 +
 +	candidates = 0;
 +
 +	/* mark other nodes which also received "PRIMARIES FIRST HOP" packets
 +	 * as "bonding partner" */
 +
 +	/* first, zero the list */
 +	list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
 +		tmp_neigh_node->next_bond_candidate = NULL;
 +	}
 +
 +	first_candidate = NULL;
 +	last_candidate = NULL;
 +	list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
 +
 +		/* only consider if it has the same primary address ...  */
 +		if (memcmp(orig_node->orig,
 +				tmp_neigh_node->orig_node->primary_addr,
 +				ETH_ALEN) != 0)
 +			continue;
 +
 +		/* ... and is good enough to be considered */
 +		if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
 +			continue;
 +
 +		/* check if we have another candidate with the same
 +		 * mac address or interface. If we do, we won't
 +		 * select this candidate because of possible interference. */
 +
 +		interference_candidate = 0;
 +		list_for_each_entry(tmp_neigh_node2,
 +				&orig_node->neigh_list, list) {
 +
 +			if (tmp_neigh_node2 == tmp_neigh_node)
 +				continue;
 +
 +			/* we only care if the other candidate is even
 +			 * considered as candidate. */
 +			if (tmp_neigh_node2->next_bond_candidate == NULL)
 +				continue;
 +
 +
 +			if ((tmp_neigh_node->if_incoming ==
 +				tmp_neigh_node2->if_incoming)
 +				|| (memcmp(tmp_neigh_node->addr,
 +				tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
 +
 +				interference_candidate = 1;
 +				break;
 +			}
 +		}
 +		/* don't care further if it is an interference candidate */
 +		if (interference_candidate)
 +			continue;
 +
 +		if (first_candidate == NULL) {
 +			first_candidate = tmp_neigh_node;
 +			tmp_neigh_node->next_bond_candidate = first_candidate;
 +		} else
 +			tmp_neigh_node->next_bond_candidate = last_candidate;
 +
 +		last_candidate = tmp_neigh_node;
 +
 +		candidates++;
 +	}
 +
 +	if (candidates > 0) {
 +		first_candidate->next_bond_candidate = last_candidate;
 +		orig_node->bond.selected = first_candidate;
 +	}
 +
 +	orig_node->bond.candidates = candidates;
 +}
 +
 +void receive_bat_packet(struct ethhdr *ethhdr,
 +				struct batman_packet *batman_packet,
 +				unsigned char *hna_buff, int hna_buff_len,
 +				struct batman_if *if_incoming)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +	struct batman_if *batman_if;
 +	struct orig_node *orig_neigh_node, *orig_node;
 +	char has_directlink_flag;
 +	char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
 +	char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
 +	char is_duplicate;
 +	uint32_t if_incoming_seqno;
 +
 +	/* Silently drop when the batman packet is actually not a
 +	 * correct packet.
 +	 *
 +	 * This might happen if a packet is padded (e.g. Ethernet has a
 +	 * minimum frame length of 64 byte) and the aggregation interprets
 +	 * it as an additional length.
 +	 *
 +	 * TODO: A more sane solution would be to have a bit in the
 +	 * batman_packet to detect whether the packet is the last
 +	 * packet in an aggregation.  Here we expect that the padding
 +	 * is always zero (or not 0x01)
 +	 */
 +	if (batman_packet->packet_type != BAT_PACKET)
 +		return;
 +
 +	/* could be changed by schedule_own_packet() */
 +	if_incoming_seqno = atomic_read(&if_incoming->seqno);
 +
 +	has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
 +
 +	is_single_hop_neigh = (compare_orig(ethhdr->h_source,
 +					    batman_packet->orig) ? 1 : 0);
 +
 +	bat_dbg(DBG_BATMAN, bat_priv,
 +		"Received BATMAN packet via NB: %pM, IF: %s [%s] "
 +		"(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
 +		"TTL %d, V %d, IDF %d)\n",
- 		ethhdr->h_source, if_incoming->dev, if_incoming->addr_str,
- 		batman_packet->orig, batman_packet->prev_sender,
- 		batman_packet->seqno, batman_packet->tq, batman_packet->ttl,
- 		batman_packet->version, has_directlink_flag);
++		ethhdr->h_source, if_incoming->net_dev->name,
++		if_incoming->addr_str, batman_packet->orig,
++		batman_packet->prev_sender, batman_packet->seqno,
++		batman_packet->tq, batman_packet->ttl, batman_packet->version,
++		has_directlink_flag);
 +
 +	list_for_each_entry_rcu(batman_if, &if_list, list) {
 +		if (batman_if->if_status != IF_ACTIVE)
 +			continue;
 +
++		if (batman_if->soft_iface != if_incoming->soft_iface)
++			continue;
++
 +		if (compare_orig(ethhdr->h_source,
 +				 batman_if->net_dev->dev_addr))
 +			is_my_addr = 1;
 +
 +		if (compare_orig(batman_packet->orig,
 +				 batman_if->net_dev->dev_addr))
 +			is_my_orig = 1;
 +
 +		if (compare_orig(batman_packet->prev_sender,
 +				 batman_if->net_dev->dev_addr))
 +			is_my_oldorig = 1;
 +
 +		if (compare_orig(ethhdr->h_source, broadcast_addr))
 +			is_broadcast = 1;
 +	}
 +
 +	if (batman_packet->version != COMPAT_VERSION) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Drop packet: incompatible batman version (%i)\n",
 +			batman_packet->version);
 +		return;
 +	}
 +
 +	if (is_my_addr) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Drop packet: received my own broadcast (sender: %pM"
 +			")\n",
 +			ethhdr->h_source);
 +		return;
 +	}
 +
 +	if (is_broadcast) {
 +		bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
 +		"ignoring all packets with broadcast source addr (sender: %pM"
 +		")\n", ethhdr->h_source);
 +		return;
 +	}
 +
 +	if (is_my_orig) {
 +		TYPE_OF_WORD *word;
 +		int offset;
 +
- 		orig_neigh_node = get_orig_node(ethhdr->h_source);
++		orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
 +
 +		if (!orig_neigh_node)
 +			return;
 +
 +		/* neighbor has to indicate direct link and it has to
 +		 * come via the corresponding interface */
 +		/* if received seqno equals last send seqno save new
 +		 * seqno for bidirectional check */
 +		if (has_directlink_flag &&
 +		    compare_orig(if_incoming->net_dev->dev_addr,
 +				 batman_packet->orig) &&
 +		    (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
 +			offset = if_incoming->if_num * NUM_WORDS;
 +			word = &(orig_neigh_node->bcast_own[offset]);
 +			bit_mark(word, 0);
 +			orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
 +				bit_packet_count(word);
 +		}
 +
 +		bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
 +			"originator packet from myself (via neighbor)\n");
 +		return;
 +	}
 +
 +	if (is_my_oldorig) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Drop packet: ignoring all rebroadcast echos (sender: "
 +			"%pM)\n", ethhdr->h_source);
 +		return;
 +	}
 +
- 	orig_node = get_orig_node(batman_packet->orig);
++	orig_node = get_orig_node(bat_priv, batman_packet->orig);
 +	if (orig_node == NULL)
 +		return;
 +
 +	is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
 +
 +	if (is_duplicate == -1) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Drop packet: packet within seqno protection time "
 +			"(sender: %pM)\n", ethhdr->h_source);
 +		return;
 +	}
 +
 +	if (batman_packet->tq == 0) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Drop packet: originator packet with tq equal 0\n");
 +		return;
 +	}
 +
 +	/* avoid temporary routing loops */
 +	if ((orig_node->router) &&
 +	    (orig_node->router->orig_node->router) &&
 +	    (compare_orig(orig_node->router->addr,
 +			  batman_packet->prev_sender)) &&
 +	    !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
 +	    (compare_orig(orig_node->router->addr,
 +			  orig_node->router->orig_node->router->addr))) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Drop packet: ignoring all rebroadcast packets that "
 +			"may make me loop (sender: %pM)\n", ethhdr->h_source);
 +		return;
 +	}
 +
 +	/* if sender is a direct neighbor the sender mac equals
 +	 * originator mac */
 +	orig_neigh_node = (is_single_hop_neigh ?
- 			   orig_node : get_orig_node(ethhdr->h_source));
++			   orig_node :
++			   get_orig_node(bat_priv, ethhdr->h_source));
 +	if (orig_neigh_node == NULL)
 +		return;
 +
 +	/* drop packet if sender is not a direct neighbor and if we
 +	 * don't route towards it */
 +	if (!is_single_hop_neigh &&
 +	    (orig_neigh_node->router == NULL)) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Drop packet: OGM via unknown neighbor!\n");
 +		return;
 +	}
 +
 +	is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
 +						batman_packet, if_incoming);
 +
 +	/* update ranking if it is not a duplicate or has the same
 +	 * seqno and similar ttl as the non-duplicate */
 +	if (is_bidirectional &&
 +	    (!is_duplicate ||
 +	     ((orig_node->last_real_seqno == batman_packet->seqno) &&
 +	      (orig_node->last_ttl - 3 <= batman_packet->ttl))))
- 		update_orig(orig_node, ethhdr, batman_packet,
++		update_orig(bat_priv, orig_node, ethhdr, batman_packet,
 +			    if_incoming, hna_buff, hna_buff_len, is_duplicate);
 +
 +	mark_bonding_address(bat_priv, orig_node,
 +			     orig_neigh_node, batman_packet);
 +	update_bonding_candidates(bat_priv, orig_node);
 +
 +	/* is single hop (direct) neighbor */
 +	if (is_single_hop_neigh) {
 +
 +		/* mark direct link on incoming interface */
 +		schedule_forward_packet(orig_node, ethhdr, batman_packet,
 +					1, hna_buff_len, if_incoming);
 +
 +		bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
 +			"rebroadcast neighbor packet with direct link flag\n");
 +		return;
 +	}
 +
 +	/* multihop originator */
 +	if (!is_bidirectional) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Drop packet: not received via bidirectional link\n");
 +		return;
 +	}
 +
 +	if (is_duplicate) {
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"Drop packet: duplicate packet received\n");
 +		return;
 +	}
 +
 +	bat_dbg(DBG_BATMAN, bat_priv,
 +		"Forwarding packet: rebroadcast originator packet\n");
 +	schedule_forward_packet(orig_node, ethhdr, batman_packet,
 +				0, hna_buff_len, if_incoming);
 +}
 +
- int recv_bat_packet(struct sk_buff *skb,
- 				struct batman_if *batman_if)
++int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
 +{
++	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
 +	struct ethhdr *ethhdr;
 +	unsigned long flags;
- 	struct sk_buff *skb_old;
 +
 +	/* drop packet if it has not necessary minimum size */
- 	if (skb_headlen(skb) < sizeof(struct batman_packet))
++	if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
 +		return NET_RX_DROP;
 +
 +	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
 +	/* packet with broadcast indication but unicast recipient */
 +	if (!is_bcast(ethhdr->h_dest))
 +		return NET_RX_DROP;
 +
 +	/* packet with broadcast sender address */
 +	if (is_bcast(ethhdr->h_source))
 +		return NET_RX_DROP;
 +
- 	/* TODO: we use headlen instead of "length", because
- 	 * only this data is paged in. */
- 
 +	/* create a copy of the skb, if needed, to modify it. */
- 	if (!skb_clone_writable(skb, skb_headlen(skb))) {
- 		skb_old = skb;
- 		skb = skb_copy(skb, GFP_ATOMIC);
- 		if (!skb)
- 			return NET_RX_DROP;
- 		ethhdr = (struct ethhdr *)skb_mac_header(skb);
- 		kfree_skb(skb_old);
- 	}
++	if (skb_cow(skb, 0) < 0)
++		return NET_RX_DROP;
++
++	/* keep skb linear */
++	if (skb_linearize(skb) < 0)
++		return NET_RX_DROP;
++
++	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
- 	spin_lock_irqsave(&orig_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
 +	receive_aggr_bat_packet(ethhdr,
 +				skb->data,
 +				skb_headlen(skb),
 +				batman_if);
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
 +	kfree_skb(skb);
 +	return NET_RX_SUCCESS;
 +}
 +
- static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
++static int recv_my_icmp_packet(struct bat_priv *bat_priv,
++			       struct sk_buff *skb, size_t icmp_len)
 +{
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +	struct orig_node *orig_node;
 +	struct icmp_packet_rr *icmp_packet;
 +	struct ethhdr *ethhdr;
- 	struct sk_buff *skb_old;
 +	struct batman_if *batman_if;
 +	int ret;
 +	unsigned long flags;
 +	uint8_t dstaddr[ETH_ALEN];
 +
 +	icmp_packet = (struct icmp_packet_rr *)skb->data;
 +	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
 +	/* add data to device queue */
 +	if (icmp_packet->msg_type != ECHO_REQUEST) {
 +		bat_socket_receive_packet(icmp_packet, icmp_len);
 +		return NET_RX_DROP;
 +	}
 +
 +	if (!bat_priv->primary_if)
 +		return NET_RX_DROP;
 +
 +	/* answer echo request (ping) */
 +	/* get routing information */
- 	spin_lock_irqsave(&orig_hash_lock, flags);
- 	orig_node = ((struct orig_node *)hash_find(orig_hash,
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
++	orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
 +						   icmp_packet->orig));
 +	ret = NET_RX_DROP;
 +
 +	if ((orig_node != NULL) &&
 +	    (orig_node->router != NULL)) {
 +
 +		/* don't lock while sending the packets ... we therefore
 +		 * copy the required data before sending */
 +		batman_if = orig_node->router->if_incoming;
 +		memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- 		spin_unlock_irqrestore(&orig_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
 +		/* create a copy of the skb, if needed, to modify it. */
- 		skb_old = NULL;
- 		if (!skb_clone_writable(skb, icmp_len)) {
- 			skb_old = skb;
- 			skb = skb_copy(skb, GFP_ATOMIC);
- 			if (!skb)
- 				return NET_RX_DROP;
- 			icmp_packet = (struct icmp_packet_rr *)skb->data;
- 			ethhdr = (struct ethhdr *)skb_mac_header(skb);
- 			kfree_skb(skb_old);
- 		}
++		if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
++			return NET_RX_DROP;
++
++		icmp_packet = (struct icmp_packet_rr *)skb->data;
++		ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
 +		memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
 +		memcpy(icmp_packet->orig,
 +		       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
 +		icmp_packet->msg_type = ECHO_REPLY;
 +		icmp_packet->ttl = TTL;
 +
 +		send_skb_packet(skb, batman_if, dstaddr);
 +		ret = NET_RX_SUCCESS;
 +
 +	} else
- 		spin_unlock_irqrestore(&orig_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
 +	return ret;
 +}
 +
- static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
++static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
++				  struct sk_buff *skb, size_t icmp_len)
 +{
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +	struct orig_node *orig_node;
 +	struct icmp_packet *icmp_packet;
 +	struct ethhdr *ethhdr;
- 	struct sk_buff *skb_old;
 +	struct batman_if *batman_if;
 +	int ret;
 +	unsigned long flags;
 +	uint8_t dstaddr[ETH_ALEN];
 +
 +	icmp_packet = (struct icmp_packet *)skb->data;
 +	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
 +	/* send TTL exceeded if packet is an echo request (traceroute) */
 +	if (icmp_packet->msg_type != ECHO_REQUEST) {
- 		pr_warning("Warning - can't forward icmp packet from %pM to "
- 			   "%pM: ttl exceeded\n", icmp_packet->orig,
- 			   icmp_packet->dst);
++		pr_debug("Warning - can't forward icmp packet from %pM to "
++			 "%pM: ttl exceeded\n", icmp_packet->orig,
++			 icmp_packet->dst);
 +		return NET_RX_DROP;
 +	}
 +
 +	if (!bat_priv->primary_if)
 +		return NET_RX_DROP;
 +
 +	/* get routing information */
- 	spin_lock_irqsave(&orig_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
 +	orig_node = ((struct orig_node *)
- 		     hash_find(orig_hash, icmp_packet->orig));
++		     hash_find(bat_priv->orig_hash, icmp_packet->orig));
 +	ret = NET_RX_DROP;
 +
 +	if ((orig_node != NULL) &&
 +	    (orig_node->router != NULL)) {
 +
 +		/* don't lock while sending the packets ... we therefore
 +		 * copy the required data before sending */
 +		batman_if = orig_node->router->if_incoming;
 +		memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- 		spin_unlock_irqrestore(&orig_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
 +		/* create a copy of the skb, if needed, to modify it. */
- 		if (!skb_clone_writable(skb, icmp_len)) {
- 			skb_old = skb;
- 			skb = skb_copy(skb, GFP_ATOMIC);
- 			if (!skb)
- 				return NET_RX_DROP;
- 			icmp_packet = (struct icmp_packet *) skb->data;
- 			ethhdr = (struct ethhdr *)skb_mac_header(skb);
- 			kfree_skb(skb_old);
- 		}
++		if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
++			return NET_RX_DROP;
++
++		icmp_packet = (struct icmp_packet *) skb->data;
++		ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
 +		memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
 +		memcpy(icmp_packet->orig,
 +		       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
 +		icmp_packet->msg_type = TTL_EXCEEDED;
 +		icmp_packet->ttl = TTL;
 +
 +		send_skb_packet(skb, batman_if, dstaddr);
 +		ret = NET_RX_SUCCESS;
 +
 +	} else
- 		spin_unlock_irqrestore(&orig_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
 +	return ret;
 +}
 +
 +
- int recv_icmp_packet(struct sk_buff *skb)
++int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
 +{
++	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 +	struct icmp_packet_rr *icmp_packet;
 +	struct ethhdr *ethhdr;
 +	struct orig_node *orig_node;
- 	struct sk_buff *skb_old;
 +	struct batman_if *batman_if;
 +	int hdr_size = sizeof(struct icmp_packet);
 +	int ret;
 +	unsigned long flags;
 +	uint8_t dstaddr[ETH_ALEN];
 +
 +	/**
 +	 * we truncate all incoming icmp packets if they don't match our size
 +	 */
- 	if (skb_headlen(skb) >= sizeof(struct icmp_packet_rr))
++	if (skb->len >= sizeof(struct icmp_packet_rr))
 +		hdr_size = sizeof(struct icmp_packet_rr);
 +
 +	/* drop packet if it has not necessary minimum size */
- 	if (skb_headlen(skb) < hdr_size)
++	if (unlikely(!pskb_may_pull(skb, hdr_size)))
 +		return NET_RX_DROP;
 +
 +	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
 +	/* packet with unicast indication but broadcast recipient */
 +	if (is_bcast(ethhdr->h_dest))
 +		return NET_RX_DROP;
 +
 +	/* packet with broadcast sender address */
 +	if (is_bcast(ethhdr->h_source))
 +		return NET_RX_DROP;
 +
 +	/* not for me */
 +	if (!is_my_mac(ethhdr->h_dest))
 +		return NET_RX_DROP;
 +
 +	icmp_packet = (struct icmp_packet_rr *)skb->data;
 +
 +	/* add record route information if not full */
 +	if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
 +	    (icmp_packet->rr_cur < BAT_RR_LEN)) {
 +		memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
 +			ethhdr->h_dest, ETH_ALEN);
 +		icmp_packet->rr_cur++;
 +	}
 +
 +	/* packet for me */
 +	if (is_my_mac(icmp_packet->dst))
- 		return recv_my_icmp_packet(skb, hdr_size);
++		return recv_my_icmp_packet(bat_priv, skb, hdr_size);
 +
 +	/* TTL exceeded */
 +	if (icmp_packet->ttl < 2)
- 		return recv_icmp_ttl_exceeded(skb, hdr_size);
++		return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size);
 +
 +	ret = NET_RX_DROP;
 +
 +	/* get routing information */
- 	spin_lock_irqsave(&orig_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
 +	orig_node = ((struct orig_node *)
- 		     hash_find(orig_hash, icmp_packet->dst));
++		     hash_find(bat_priv->orig_hash, icmp_packet->dst));
 +
 +	if ((orig_node != NULL) &&
 +	    (orig_node->router != NULL)) {
 +
 +		/* don't lock while sending the packets ... we therefore
 +		 * copy the required data before sending */
 +		batman_if = orig_node->router->if_incoming;
 +		memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- 		spin_unlock_irqrestore(&orig_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
 +		/* create a copy of the skb, if needed, to modify it. */
- 		if (!skb_clone_writable(skb, hdr_size)) {
- 			skb_old = skb;
- 			skb = skb_copy(skb, GFP_ATOMIC);
- 			if (!skb)
- 				return NET_RX_DROP;
- 			icmp_packet = (struct icmp_packet_rr *)skb->data;
- 			ethhdr = (struct ethhdr *)skb_mac_header(skb);
- 			kfree_skb(skb_old);
- 		}
++		if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
++			return NET_RX_DROP;
++
++		icmp_packet = (struct icmp_packet_rr *)skb->data;
++		ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
 +		/* decrement ttl */
 +		icmp_packet->ttl--;
 +
 +		/* route it */
 +		send_skb_packet(skb, batman_if, dstaddr);
 +		ret = NET_RX_SUCCESS;
 +
 +	} else
- 		spin_unlock_irqrestore(&orig_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
 +	return ret;
 +}
 +
 +/* find a suitable router for this originator, and use
 + * bonding if possible. */
 +struct neigh_node *find_router(struct orig_node *orig_node,
- 		struct batman_if *recv_if)
++			       struct batman_if *recv_if)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv;
 +	struct orig_node *primary_orig_node;
 +	struct orig_node *router_orig;
 +	struct neigh_node *router, *first_candidate, *best_router;
 +	static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
 +	int bonding_enabled;
 +
 +	if (!orig_node)
 +		return NULL;
 +
 +	if (!orig_node->router)
 +		return NULL;
 +
 +	/* without bonding, the first node should
 +	 * always choose the default router. */
 +
++	if (!recv_if)
++		return orig_node->router;
++
++	bat_priv = netdev_priv(recv_if->soft_iface);
 +	bonding_enabled = atomic_read(&bat_priv->bonding_enabled);
- 	if (!bonding_enabled && (recv_if == NULL))
- 			return orig_node->router;
++
++	if (!bonding_enabled)
++		return orig_node->router;
 +
 +	router_orig = orig_node->router->orig_node;
 +
 +	/* if we have something in the primary_addr, we can search
 +	 * for a potential bonding candidate. */
 +	if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
 +		return orig_node->router;
 +
 +	/* find the orig_node which has the primary interface. might
 +	 * even be the same as our router_orig in many cases */
 +
 +	if (memcmp(router_orig->primary_addr,
 +				router_orig->orig, ETH_ALEN) == 0) {
 +		primary_orig_node = router_orig;
 +	} else {
- 		primary_orig_node = hash_find(orig_hash,
++		primary_orig_node = hash_find(bat_priv->orig_hash,
 +						router_orig->primary_addr);
++
 +		if (!primary_orig_node)
 +			return orig_node->router;
 +	}
 +
 +	/* with less than 2 candidates, we can't do any
 +	 * bonding and prefer the original router. */
 +
 +	if (primary_orig_node->bond.candidates < 2)
 +		return orig_node->router;
 +
 +
 +	/* all nodes between should choose a candidate which
 +	 * is is not on the interface where the packet came
 +	 * in. */
 +	first_candidate = primary_orig_node->bond.selected;
 +	router = first_candidate;
 +
 +	if (bonding_enabled) {
 +		/* in the bonding case, send the packets in a round
 +		 * robin fashion over the remaining interfaces. */
 +		do {
 +			/* recv_if == NULL on the first node. */
 +			if (router->if_incoming != recv_if)
 +				break;
 +
 +			router = router->next_bond_candidate;
 +		} while (router != first_candidate);
 +
 +		primary_orig_node->bond.selected = router->next_bond_candidate;
 +
 +	} else {
 +		/* if bonding is disabled, use the best of the
 +		 * remaining candidates which are not using
 +		 * this interface. */
 +		best_router = first_candidate;
 +
 +		do {
 +			/* recv_if == NULL on the first node. */
 +			if ((router->if_incoming != recv_if) &&
 +				(router->tq_avg > best_router->tq_avg))
 +					best_router = router;
 +
 +			router = router->next_bond_candidate;
 +		} while (router != first_candidate);
 +
 +		router = best_router;
 +	}
 +
 +	return router;
 +}
 +
- int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
++static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
 +{
- 	struct unicast_packet *unicast_packet;
- 	struct orig_node *orig_node;
- 	struct neigh_node *router;
 +	struct ethhdr *ethhdr;
- 	struct batman_if *batman_if;
- 	struct sk_buff *skb_old;
- 	uint8_t dstaddr[ETH_ALEN];
- 	int hdr_size = sizeof(struct unicast_packet);
- 	unsigned long flags;
 +
 +	/* drop packet if it has not necessary minimum size */
- 	if (skb_headlen(skb) < hdr_size)
- 		return NET_RX_DROP;
++	if (unlikely(!pskb_may_pull(skb, hdr_size)))
++		return -1;
 +
- 	ethhdr = (struct ethhdr *) skb_mac_header(skb);
++	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
 +	/* packet with unicast indication but broadcast recipient */
 +	if (is_bcast(ethhdr->h_dest))
- 		return NET_RX_DROP;
++		return -1;
 +
 +	/* packet with broadcast sender address */
 +	if (is_bcast(ethhdr->h_source))
- 		return NET_RX_DROP;
++		return -1;
 +
 +	/* not for me */
 +	if (!is_my_mac(ethhdr->h_dest))
- 		return NET_RX_DROP;
++		return -1;
++
++	return 0;
++}
++
++static int route_unicast_packet(struct sk_buff *skb,
++				struct batman_if *recv_if, int hdr_size)
++{
++	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
++	struct orig_node *orig_node;
++	struct neigh_node *router;
++	struct batman_if *batman_if;
++	uint8_t dstaddr[ETH_ALEN];
++	unsigned long flags;
++	struct unicast_packet *unicast_packet;
++	struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
- 	unicast_packet = (struct unicast_packet *) skb->data;
++	unicast_packet = (struct unicast_packet *)skb->data;
 +
 +	/* packet for me */
 +	if (is_my_mac(unicast_packet->dest)) {
- 		interface_rx(skb, hdr_size);
++		interface_rx(recv_if->soft_iface, skb, hdr_size);
 +		return NET_RX_SUCCESS;
 +	}
 +
 +	/* TTL exceeded */
 +	if (unicast_packet->ttl < 2) {
- 		pr_warning("Warning - can't forward unicast packet from %pM to "
- 			   "%pM: ttl exceeded\n", ethhdr->h_source,
- 			   unicast_packet->dest);
++		pr_debug("Warning - can't forward unicast packet from %pM to "
++			 "%pM: ttl exceeded\n", ethhdr->h_source,
++			 unicast_packet->dest);
 +		return NET_RX_DROP;
 +	}
 +
 +	/* get routing information */
- 	spin_lock_irqsave(&orig_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
 +	orig_node = ((struct orig_node *)
- 		     hash_find(orig_hash, unicast_packet->dest));
++		     hash_find(bat_priv->orig_hash, unicast_packet->dest));
 +
 +	router = find_router(orig_node, recv_if);
 +
 +	if (!router) {
- 		spin_unlock_irqrestore(&orig_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +		return NET_RX_DROP;
 +	}
 +
 +	/* don't lock while sending the packets ... we therefore
 +	 * copy the required data before sending */
 +
 +	batman_if = router->if_incoming;
 +	memcpy(dstaddr, router->addr, ETH_ALEN);
 +
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
 +	/* create a copy of the skb, if needed, to modify it. */
- 	if (!skb_clone_writable(skb, sizeof(struct unicast_packet))) {
- 		skb_old = skb;
- 		skb = skb_copy(skb, GFP_ATOMIC);
- 		if (!skb)
- 			return NET_RX_DROP;
- 		unicast_packet = (struct unicast_packet *) skb->data;
- 		ethhdr = (struct ethhdr *)skb_mac_header(skb);
- 		kfree_skb(skb_old);
- 	}
++	if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
++		return NET_RX_DROP;
++
++	unicast_packet = (struct unicast_packet *)skb->data;
++	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
 +	/* decrement ttl */
 +	unicast_packet->ttl--;
 +
 +	/* route it */
 +	send_skb_packet(skb, batman_if, dstaddr);
 +
 +	return NET_RX_SUCCESS;
 +}
 +
- int recv_bcast_packet(struct sk_buff *skb)
++int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
++{
++	struct unicast_packet *unicast_packet;
++	int hdr_size = sizeof(struct unicast_packet);
++
++	if (check_unicast_packet(skb, hdr_size) < 0)
++		return NET_RX_DROP;
++
++	unicast_packet = (struct unicast_packet *)skb->data;
++
++	/* packet for me */
++	if (is_my_mac(unicast_packet->dest)) {
++		interface_rx(recv_if->soft_iface, skb, hdr_size);
++		return NET_RX_SUCCESS;
++	}
++
++	return route_unicast_packet(skb, recv_if, hdr_size);
++}
++
++int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
++{
++	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
++	struct unicast_frag_packet *unicast_packet;
++	struct orig_node *orig_node;
++	struct frag_packet_list_entry *tmp_frag_entry;
++	int hdr_size = sizeof(struct unicast_frag_packet);
++	unsigned long flags;
++
++	if (check_unicast_packet(skb, hdr_size) < 0)
++		return NET_RX_DROP;
++
++	unicast_packet = (struct unicast_frag_packet *)skb->data;
++
++	/* packet for me */
++	if (is_my_mac(unicast_packet->dest)) {
++
++		spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
++		orig_node = ((struct orig_node *)
++			hash_find(bat_priv->orig_hash, unicast_packet->orig));
++
++		if (!orig_node) {
++			pr_debug("couldn't find orig node for fragmentation\n");
++			spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
++					       flags);
++			return NET_RX_DROP;
++		}
++
++		orig_node->last_frag_packet = jiffies;
++
++		if (list_empty(&orig_node->frag_list))
++			create_frag_buffer(&orig_node->frag_list);
++
++		tmp_frag_entry =
++			search_frag_packet(&orig_node->frag_list,
++					   unicast_packet);
++
++		if (!tmp_frag_entry) {
++			create_frag_entry(&orig_node->frag_list, skb);
++			spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
++					       flags);
++			return NET_RX_SUCCESS;
++		}
++
++		skb = merge_frag_packet(&orig_node->frag_list,
++					tmp_frag_entry, skb);
++		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
++		if (!skb)
++			return NET_RX_DROP;
++
++		interface_rx(recv_if->soft_iface, skb, hdr_size);
++		return NET_RX_SUCCESS;
++	}
++
++	return route_unicast_packet(skb, recv_if, hdr_size);
++}
++
++
++int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
 +{
++	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 +	struct orig_node *orig_node;
 +	struct bcast_packet *bcast_packet;
 +	struct ethhdr *ethhdr;
 +	int hdr_size = sizeof(struct bcast_packet);
 +	int32_t seq_diff;
 +	unsigned long flags;
 +
 +	/* drop packet if it has not necessary minimum size */
- 	if (skb_headlen(skb) < hdr_size)
++	if (unlikely(!pskb_may_pull(skb, hdr_size)))
 +		return NET_RX_DROP;
 +
 +	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
 +	/* packet with broadcast indication but unicast recipient */
 +	if (!is_bcast(ethhdr->h_dest))
 +		return NET_RX_DROP;
 +
 +	/* packet with broadcast sender address */
 +	if (is_bcast(ethhdr->h_source))
 +		return NET_RX_DROP;
 +
 +	/* ignore broadcasts sent by myself */
 +	if (is_my_mac(ethhdr->h_source))
 +		return NET_RX_DROP;
 +
 +	bcast_packet = (struct bcast_packet *)skb->data;
 +
 +	/* ignore broadcasts originated by myself */
 +	if (is_my_mac(bcast_packet->orig))
 +		return NET_RX_DROP;
 +
 +	if (bcast_packet->ttl < 2)
 +		return NET_RX_DROP;
 +
- 	spin_lock_irqsave(&orig_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
 +	orig_node = ((struct orig_node *)
- 		     hash_find(orig_hash, bcast_packet->orig));
++		     hash_find(bat_priv->orig_hash, bcast_packet->orig));
 +
 +	if (orig_node == NULL) {
- 		spin_unlock_irqrestore(&orig_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +		return NET_RX_DROP;
 +	}
 +
 +	/* check whether the packet is a duplicate */
 +	if (get_bit_status(orig_node->bcast_bits,
 +			   orig_node->last_bcast_seqno,
 +			   ntohl(bcast_packet->seqno))) {
- 		spin_unlock_irqrestore(&orig_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +		return NET_RX_DROP;
 +	}
 +
 +	seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
 +
 +	/* check whether the packet is old and the host just restarted. */
- 	if (window_protected(seq_diff, &orig_node->bcast_seqno_reset)) {
- 		spin_unlock_irqrestore(&orig_hash_lock, flags);
++	if (window_protected(bat_priv, seq_diff,
++			     &orig_node->bcast_seqno_reset)) {
++		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +		return NET_RX_DROP;
 +	}
 +
 +	/* mark broadcast in flood history, update window position
 +	 * if required. */
- 	if (bit_get_packet(orig_node->bcast_bits, seq_diff, 1))
++	if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
 +		orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
 +
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +	/* rebroadcast packet */
- 	add_bcast_packet_to_list(skb);
++	add_bcast_packet_to_list(bat_priv, skb);
 +
 +	/* broadcast for me */
- 	interface_rx(skb, hdr_size);
++	interface_rx(recv_if->soft_iface, skb, hdr_size);
 +
 +	return NET_RX_SUCCESS;
 +}
 +
- int recv_vis_packet(struct sk_buff *skb)
++int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
 +{
 +	struct vis_packet *vis_packet;
 +	struct ethhdr *ethhdr;
- 	struct bat_priv *bat_priv;
++	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 +	int hdr_size = sizeof(struct vis_packet);
 +
- 	if (skb_headlen(skb) < hdr_size)
++	/* keep skb linear */
++	if (skb_linearize(skb) < 0)
 +		return NET_RX_DROP;
 +
- 	vis_packet = (struct vis_packet *) skb->data;
++	if (unlikely(!pskb_may_pull(skb, hdr_size)))
++		return NET_RX_DROP;
++
++	vis_packet = (struct vis_packet *)skb->data;
 +	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 +
 +	/* not for me */
 +	if (!is_my_mac(ethhdr->h_dest))
 +		return NET_RX_DROP;
 +
 +	/* ignore own packets */
 +	if (is_my_mac(vis_packet->vis_orig))
 +		return NET_RX_DROP;
 +
 +	if (is_my_mac(vis_packet->sender_orig))
 +		return NET_RX_DROP;
 +
- 	/* FIXME: each batman_if will be attached to a softif */
- 	bat_priv = netdev_priv(soft_device);
- 
 +	switch (vis_packet->vis_type) {
 +	case VIS_TYPE_SERVER_SYNC:
- 		/* TODO: handle fragmented skbs properly */
 +		receive_server_sync_packet(bat_priv, vis_packet,
 +					   skb_headlen(skb));
 +		break;
 +
 +	case VIS_TYPE_CLIENT_UPDATE:
- 		/* TODO: handle fragmented skbs properly */
 +		receive_client_update_packet(bat_priv, vis_packet,
 +					     skb_headlen(skb));
 +		break;
 +
 +	default:	/* ignore unknown packet */
 +		break;
 +	}
 +
 +	/* We take a copy of the data in the packet, so we should
 +	   always free the skbuf. */
 +	return NET_RX_DROP;
 +}
diff --combined drivers/staging/batman-adv/routing.h
index 3eac64e,06ea99d..06ea99d
--- a/drivers/staging/batman-adv/routing.h
+++ b/drivers/staging/batman-adv/routing.h
@@@ -29,15 -29,15 +29,15 @@@ void receive_bat_packet(struct ethhdr *
  				struct batman_packet *batman_packet,
  				unsigned char *hna_buff, int hna_buff_len,
  				struct batman_if *if_incoming);
- void update_routes(struct orig_node *orig_node,
- 				struct neigh_node *neigh_node,
- 				unsigned char *hna_buff, int hna_buff_len);
- int recv_icmp_packet(struct sk_buff *skb);
+ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ 		   struct neigh_node *neigh_node, unsigned char *hna_buff,
+ 		   int hna_buff_len);
+ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if);
  int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if);
- int recv_bcast_packet(struct sk_buff *skb);
- int recv_vis_packet(struct sk_buff *skb);
- int recv_bat_packet(struct sk_buff *skb,
- 				struct batman_if *batman_if);
+ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
+ int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
+ int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
+ int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
  struct neigh_node *find_router(struct orig_node *orig_node,
  		struct batman_if *recv_if);
  void update_bonding_candidates(struct bat_priv *bat_priv,
diff --combined drivers/staging/batman-adv/send.c
index 055edee,0000000..5d57ef5
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@@ -1,585 -1,0 +1,577 @@@
 +/*
 + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
 + *
 + * Marek Lindner, Simon Wunderlich
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of version 2 of the GNU General Public
 + * License as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 + * 02110-1301, USA
 + *
 + */
 +
 +#include "main.h"
 +#include "send.h"
 +#include "routing.h"
 +#include "translation-table.h"
 +#include "soft-interface.h"
 +#include "hard-interface.h"
 +#include "types.h"
 +#include "vis.h"
 +#include "aggregation.h"
 +
- #include <linux/netfilter_bridge.h>
 +
 +static void send_outstanding_bcast_packet(struct work_struct *work);
 +
 +/* apply hop penalty for a normal link */
 +static uint8_t hop_penalty(const uint8_t tq)
 +{
 +	return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE);
 +}
 +
 +/* when do we schedule our own packet to be sent */
 +static unsigned long own_send_time(struct bat_priv *bat_priv)
 +{
 +	return jiffies + msecs_to_jiffies(
 +		   atomic_read(&bat_priv->orig_interval) -
 +		   JITTER + (random32() % 2*JITTER));
 +}
 +
 +/* when do we schedule a forwarded packet to be sent */
 +static unsigned long forward_send_time(struct bat_priv *bat_priv)
 +{
 +	return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
 +}
 +
 +/* send out an already prepared packet to the given address via the
 + * specified batman interface */
 +int send_skb_packet(struct sk_buff *skb,
 +				struct batman_if *batman_if,
 +				uint8_t *dst_addr)
 +{
 +	struct ethhdr *ethhdr;
 +
 +	if (batman_if->if_status != IF_ACTIVE)
 +		goto send_skb_err;
 +
 +	if (unlikely(!batman_if->net_dev))
 +		goto send_skb_err;
 +
 +	if (!(batman_if->net_dev->flags & IFF_UP)) {
 +		pr_warning("Interface %s is not up - can't send packet via "
- 			   "that interface!\n", batman_if->dev);
++			   "that interface!\n", batman_if->net_dev->name);
 +		goto send_skb_err;
 +	}
 +
 +	/* push to the ethernet header. */
- 	if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
++	if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0)
 +		goto send_skb_err;
 +
 +	skb_reset_mac_header(skb);
 +
 +	ethhdr = (struct ethhdr *) skb_mac_header(skb);
 +	memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
 +	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
 +	ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
 +
 +	skb_set_network_header(skb, ETH_HLEN);
 +	skb->priority = TC_PRIO_CONTROL;
 +	skb->protocol = __constant_htons(ETH_P_BATMAN);
 +
 +	skb->dev = batman_if->net_dev;
 +
 +	/* dev_queue_xmit() returns a negative result on error.	 However on
 +	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
- 	 * (which is > 0). This will not be treated as an error.
- 	 * Also, if netfilter/ebtables wants to block outgoing batman
- 	 * packets then giving them a chance to do so here */
++	 * (which is > 0). This will not be treated as an error. */
 +
- 	return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
- 		       dev_queue_xmit);
++	return dev_queue_xmit(skb);
 +send_skb_err:
 +	kfree_skb(skb);
 +	return NET_XMIT_DROP;
 +}
 +
- /* sends a raw packet. */
- void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
- 		     struct batman_if *batman_if, uint8_t *dst_addr)
- {
- 	struct sk_buff *skb;
- 	char *data;
- 
- 	skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
- 	if (!skb)
- 		return;
- 	data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
- 	memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
- 	/* pull back to the batman "network header" */
- 	skb_pull(skb, sizeof(struct ethhdr));
- 	send_skb_packet(skb, batman_if, dst_addr);
- }
- 
 +/* Send a packet to a given interface */
 +static void send_packet_to_if(struct forw_packet *forw_packet,
 +			      struct batman_if *batman_if)
 +{
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
 +	char *fwd_str;
 +	uint8_t packet_num;
 +	int16_t buff_pos;
 +	struct batman_packet *batman_packet;
++	struct sk_buff *skb;
 +
 +	if (batman_if->if_status != IF_ACTIVE)
 +		return;
 +
 +	packet_num = 0;
 +	buff_pos = 0;
- 	batman_packet = (struct batman_packet *)
- 		(forw_packet->packet_buff);
++	batman_packet = (struct batman_packet *)forw_packet->skb->data;
 +
 +	/* adjust all flags and log packets */
 +	while (aggregated_packet(buff_pos,
 +				 forw_packet->packet_len,
 +				 batman_packet->num_hna)) {
 +
 +		/* we might have aggregated direct link packets with an
 +		 * ordinary base packet */
 +		if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
 +		    (forw_packet->if_incoming == batman_if))
 +			batman_packet->flags |= DIRECTLINK;
 +		else
 +			batman_packet->flags &= ~DIRECTLINK;
 +
 +		fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
 +							    "Sending own" :
 +							    "Forwarding"));
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
 +			" IDF %s) on interface %s [%s]\n",
 +			fwd_str, (packet_num > 0 ? "aggregated " : ""),
 +			batman_packet->orig, ntohl(batman_packet->seqno),
 +			batman_packet->tq, batman_packet->ttl,
 +			(batman_packet->flags & DIRECTLINK ?
 +			 "on" : "off"),
- 			batman_if->dev, batman_if->addr_str);
++			batman_if->net_dev->name, batman_if->addr_str);
 +
 +		buff_pos += sizeof(struct batman_packet) +
 +			(batman_packet->num_hna * ETH_ALEN);
 +		packet_num++;
 +		batman_packet = (struct batman_packet *)
- 			(forw_packet->packet_buff + buff_pos);
++			(forw_packet->skb->data + buff_pos);
 +	}
 +
- 	send_raw_packet(forw_packet->packet_buff,
- 			forw_packet->packet_len,
- 			batman_if, broadcast_addr);
++	/* create clone because function is called more than once */
++	skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
++	if (skb)
++		send_skb_packet(skb, batman_if, broadcast_addr);
 +}
 +
 +/* send a batman packet */
 +static void send_packet(struct forw_packet *forw_packet)
 +{
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +	struct batman_if *batman_if;
++	struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
++	struct bat_priv *bat_priv = netdev_priv(soft_iface);
 +	struct batman_packet *batman_packet =
- 		(struct batman_packet *)(forw_packet->packet_buff);
++		(struct batman_packet *)(forw_packet->skb->data);
 +	unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
 +
 +	if (!forw_packet->if_incoming) {
 +		pr_err("Error - can't forward packet: incoming iface not "
 +		       "specified\n");
 +		return;
 +	}
 +
 +	if (forw_packet->if_incoming->if_status != IF_ACTIVE)
 +		return;
 +
 +	/* multihomed peer assumed */
 +	/* non-primary OGMs are only broadcasted on their interface */
 +	if ((directlink && (batman_packet->ttl == 1)) ||
 +	    (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
 +
 +		/* FIXME: what about aggregated packets ? */
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"%s packet (originator %pM, seqno %d, TTL %d) "
 +			"on interface %s [%s]\n",
 +			(forw_packet->own ? "Sending own" : "Forwarding"),
 +			batman_packet->orig, ntohl(batman_packet->seqno),
- 			batman_packet->ttl, forw_packet->if_incoming->dev,
++			batman_packet->ttl,
++			forw_packet->if_incoming->net_dev->name,
 +			forw_packet->if_incoming->addr_str);
 +
- 		send_raw_packet(forw_packet->packet_buff,
- 				forw_packet->packet_len,
- 				forw_packet->if_incoming,
++		/* skb is only used once and than forw_packet is free'd */
++		send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
 +				broadcast_addr);
++		forw_packet->skb = NULL;
++
 +		return;
 +	}
 +
 +	/* broadcast on every interface */
 +	rcu_read_lock();
- 	list_for_each_entry_rcu(batman_if, &if_list, list)
++	list_for_each_entry_rcu(batman_if, &if_list, list) {
++		if (batman_if->soft_iface != soft_iface)
++			continue;
++
 +		send_packet_to_if(forw_packet, batman_if);
++	}
 +	rcu_read_unlock();
 +}
 +
- static void rebuild_batman_packet(struct batman_if *batman_if)
++static void rebuild_batman_packet(struct bat_priv *bat_priv,
++				  struct batman_if *batman_if)
 +{
 +	int new_len;
 +	unsigned char *new_buff;
 +	struct batman_packet *batman_packet;
 +
- 	new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN);
++	new_len = sizeof(struct batman_packet) +
++			(bat_priv->num_local_hna * ETH_ALEN);
 +	new_buff = kmalloc(new_len, GFP_ATOMIC);
 +
 +	/* keep old buffer if kmalloc should fail */
 +	if (new_buff) {
 +		memcpy(new_buff, batman_if->packet_buff,
 +		       sizeof(struct batman_packet));
 +		batman_packet = (struct batman_packet *)new_buff;
 +
- 		batman_packet->num_hna = hna_local_fill_buffer(
- 			new_buff + sizeof(struct batman_packet),
- 			new_len - sizeof(struct batman_packet));
++		batman_packet->num_hna = hna_local_fill_buffer(bat_priv,
++				new_buff + sizeof(struct batman_packet),
++				new_len - sizeof(struct batman_packet));
 +
 +		kfree(batman_if->packet_buff);
 +		batman_if->packet_buff = new_buff;
 +		batman_if->packet_len = new_len;
 +	}
 +}
 +
 +void schedule_own_packet(struct batman_if *batman_if)
 +{
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
 +	unsigned long send_time;
 +	struct batman_packet *batman_packet;
 +	int vis_server;
 +
 +	if ((batman_if->if_status == IF_NOT_IN_USE) ||
 +	    (batman_if->if_status == IF_TO_BE_REMOVED))
 +		return;
 +
 +	vis_server = atomic_read(&bat_priv->vis_mode);
 +
 +	/**
 +	 * the interface gets activated here to avoid race conditions between
 +	 * the moment of activating the interface in
 +	 * hardif_activate_interface() where the originator mac is set and
 +	 * outdated packets (especially uninitialized mac addresses) in the
 +	 * packet queue
 +	 */
 +	if (batman_if->if_status == IF_TO_BE_ACTIVATED)
 +		batman_if->if_status = IF_ACTIVE;
 +
 +	/* if local hna has changed and interface is a primary interface */
- 	if ((atomic_read(&hna_local_changed)) &&
++	if ((atomic_read(&bat_priv->hna_local_changed)) &&
 +	    (batman_if == bat_priv->primary_if))
- 		rebuild_batman_packet(batman_if);
++		rebuild_batman_packet(bat_priv, batman_if);
 +
 +	/**
 +	 * NOTE: packet_buff might just have been re-allocated in
 +	 * rebuild_batman_packet()
 +	 */
 +	batman_packet = (struct batman_packet *)batman_if->packet_buff;
 +
 +	/* change sequence number to network order */
 +	batman_packet->seqno =
 +		htonl((uint32_t)atomic_read(&batman_if->seqno));
 +
 +	if (vis_server == VIS_TYPE_SERVER_SYNC)
 +		batman_packet->flags |= VIS_SERVER;
 +	else
 +		batman_packet->flags &= ~VIS_SERVER;
 +
 +	atomic_inc(&batman_if->seqno);
 +
 +	slide_own_bcast_window(batman_if);
 +	send_time = own_send_time(bat_priv);
 +	add_bat_packet_to_list(bat_priv,
 +			       batman_if->packet_buff,
 +			       batman_if->packet_len,
 +			       batman_if, 1, send_time);
 +}
 +
 +void schedule_forward_packet(struct orig_node *orig_node,
 +			     struct ethhdr *ethhdr,
 +			     struct batman_packet *batman_packet,
 +			     uint8_t directlink, int hna_buff_len,
 +			     struct batman_if *if_incoming)
 +{
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 +	unsigned char in_tq, in_ttl, tq_avg = 0;
 +	unsigned long send_time;
 +
 +	if (batman_packet->ttl <= 1) {
 +		bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
 +		return;
 +	}
 +
 +	in_tq = batman_packet->tq;
 +	in_ttl = batman_packet->ttl;
 +
 +	batman_packet->ttl--;
 +	memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
 +
 +	/* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
 +	 * of our best tq value */
 +	if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
 +
 +		/* rebroadcast ogm of best ranking neighbor as is */
 +		if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
 +			batman_packet->tq = orig_node->router->tq_avg;
 +
 +			if (orig_node->router->last_ttl)
 +				batman_packet->ttl = orig_node->router->last_ttl
 +							- 1;
 +		}
 +
 +		tq_avg = orig_node->router->tq_avg;
 +	}
 +
 +	/* apply hop penalty */
 +	batman_packet->tq = hop_penalty(batman_packet->tq);
 +
 +	bat_dbg(DBG_BATMAN, bat_priv,
 +		"Forwarding packet: tq_orig: %i, tq_avg: %i, "
 +		"tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
 +		in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
 +		batman_packet->ttl);
 +
 +	batman_packet->seqno = htonl(batman_packet->seqno);
 +
 +	/* switch of primaries first hop flag when forwarding */
 +	batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
 +	if (directlink)
 +		batman_packet->flags |= DIRECTLINK;
 +	else
 +		batman_packet->flags &= ~DIRECTLINK;
 +
 +	send_time = forward_send_time(bat_priv);
 +	add_bat_packet_to_list(bat_priv,
 +			       (unsigned char *)batman_packet,
 +			       sizeof(struct batman_packet) + hna_buff_len,
 +			       if_incoming, 0, send_time);
 +}
 +
 +static void forw_packet_free(struct forw_packet *forw_packet)
 +{
 +	if (forw_packet->skb)
 +		kfree_skb(forw_packet->skb);
- 	kfree(forw_packet->packet_buff);
 +	kfree(forw_packet);
 +}
 +
- static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
++static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
++				      struct forw_packet *forw_packet,
 +				      unsigned long send_time)
 +{
 +	unsigned long flags;
 +	INIT_HLIST_NODE(&forw_packet->list);
 +
 +	/* add new packet to packet list */
- 	spin_lock_irqsave(&forw_bcast_list_lock, flags);
- 	hlist_add_head(&forw_packet->list, &forw_bcast_list);
- 	spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
++	spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
++	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
++	spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
 +
 +	/* start timer for this packet */
 +	INIT_DELAYED_WORK(&forw_packet->delayed_work,
 +			  send_outstanding_bcast_packet);
 +	queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
 +			   send_time);
 +}
 +
 +#define atomic_dec_not_zero(v)          atomic_add_unless((v), -1, 0)
 +/* add a broadcast packet to the queue and setup timers. broadcast packets
 + * are sent multiple times to increase probability for beeing received.
 + *
 + * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
 + * errors.
 + *
 + * The skb is not consumed, so the caller should make sure that the
 + * skb is freed. */
- int add_bcast_packet_to_list(struct sk_buff *skb)
++int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
 +{
 +	struct forw_packet *forw_packet;
 +	struct bcast_packet *bcast_packet;
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +
- 	if (!atomic_dec_not_zero(&bcast_queue_left)) {
++	if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
 +		bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
 +		goto out;
 +	}
 +
++	if (!bat_priv->primary_if)
++		goto out;
++
 +	forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
 +
 +	if (!forw_packet)
 +		goto out_and_inc;
 +
 +	skb = skb_copy(skb, GFP_ATOMIC);
 +	if (!skb)
 +		goto packet_free;
 +
 +	/* as we have a copy now, it is safe to decrease the TTL */
 +	bcast_packet = (struct bcast_packet *)skb->data;
 +	bcast_packet->ttl--;
 +
 +	skb_reset_mac_header(skb);
 +
 +	forw_packet->skb = skb;
- 	forw_packet->packet_buff = NULL;
++	forw_packet->if_incoming = bat_priv->primary_if;
 +
 +	/* how often did we send the bcast packet ? */
 +	forw_packet->num_packets = 0;
 +
- 	_add_bcast_packet_to_list(forw_packet, 1);
++	_add_bcast_packet_to_list(bat_priv, forw_packet, 1);
 +	return NETDEV_TX_OK;
 +
 +packet_free:
 +	kfree(forw_packet);
 +out_and_inc:
- 	atomic_inc(&bcast_queue_left);
++	atomic_inc(&bat_priv->bcast_queue_left);
 +out:
 +	return NETDEV_TX_BUSY;
 +}
 +
 +static void send_outstanding_bcast_packet(struct work_struct *work)
 +{
 +	struct batman_if *batman_if;
 +	struct delayed_work *delayed_work =
 +		container_of(work, struct delayed_work, work);
 +	struct forw_packet *forw_packet =
 +		container_of(delayed_work, struct forw_packet, delayed_work);
 +	unsigned long flags;
 +	struct sk_buff *skb1;
++	struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
++	struct bat_priv *bat_priv = netdev_priv(soft_iface);
 +
- 	spin_lock_irqsave(&forw_bcast_list_lock, flags);
++	spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
 +	hlist_del(&forw_packet->list);
- 	spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
 +
- 	if (atomic_read(&module_state) == MODULE_DEACTIVATING)
++	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
 +		goto out;
 +
 +	/* rebroadcast packet */
 +	rcu_read_lock();
 +	list_for_each_entry_rcu(batman_if, &if_list, list) {
++		if (batman_if->soft_iface != soft_iface)
++			continue;
++
 +		/* send a copy of the saved skb */
- 		skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
++		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
 +		if (skb1)
- 			send_skb_packet(skb1,
- 				batman_if, broadcast_addr);
++			send_skb_packet(skb1, batman_if, broadcast_addr);
 +	}
 +	rcu_read_unlock();
 +
 +	forw_packet->num_packets++;
 +
 +	/* if we still have some more bcasts to send */
 +	if (forw_packet->num_packets < 3) {
- 		_add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
++		_add_bcast_packet_to_list(bat_priv, forw_packet,
++					  ((5 * HZ) / 1000));
 +		return;
 +	}
 +
 +out:
 +	forw_packet_free(forw_packet);
- 	atomic_inc(&bcast_queue_left);
++	atomic_inc(&bat_priv->bcast_queue_left);
 +}
 +
 +void send_outstanding_bat_packet(struct work_struct *work)
 +{
 +	struct delayed_work *delayed_work =
 +		container_of(work, struct delayed_work, work);
 +	struct forw_packet *forw_packet =
 +		container_of(delayed_work, struct forw_packet, delayed_work);
 +	unsigned long flags;
++	struct bat_priv *bat_priv;
 +
- 	spin_lock_irqsave(&forw_bat_list_lock, flags);
++	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
++	spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
 +	hlist_del(&forw_packet->list);
- 	spin_unlock_irqrestore(&forw_bat_list_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
 +
- 	if (atomic_read(&module_state) == MODULE_DEACTIVATING)
++	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
 +		goto out;
 +
 +	send_packet(forw_packet);
 +
 +	/**
 +	 * we have to have at least one packet in the queue
 +	 * to determine the queues wake up time unless we are
 +	 * shutting down
 +	 */
 +	if (forw_packet->own)
 +		schedule_own_packet(forw_packet->if_incoming);
 +
 +out:
 +	/* don't count own packet */
 +	if (!forw_packet->own)
- 		atomic_inc(&batman_queue_left);
++		atomic_inc(&bat_priv->batman_queue_left);
 +
 +	forw_packet_free(forw_packet);
 +}
 +
- void purge_outstanding_packets(struct batman_if *batman_if)
++void purge_outstanding_packets(struct bat_priv *bat_priv,
++			       struct batman_if *batman_if)
 +{
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +	struct forw_packet *forw_packet;
 +	struct hlist_node *tmp_node, *safe_tmp_node;
 +	unsigned long flags;
 +
 +	if (batman_if)
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"purge_outstanding_packets(): %s\n",
- 			batman_if->dev);
++			batman_if->net_dev->name);
 +	else
 +		bat_dbg(DBG_BATMAN, bat_priv,
 +			"purge_outstanding_packets()\n");
 +
 +	/* free bcast list */
- 	spin_lock_irqsave(&forw_bcast_list_lock, flags);
++	spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
 +	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
- 				  &forw_bcast_list, list) {
++				  &bat_priv->forw_bcast_list, list) {
 +
 +		/**
 +		 * if purge_outstanding_packets() was called with an argmument
 +		 * we delete only packets belonging to the given interface
 +		 */
 +		if ((batman_if) &&
 +		    (forw_packet->if_incoming != batman_if))
 +			continue;
 +
- 		spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
 +
 +		/**
 +		 * send_outstanding_bcast_packet() will lock the list to
 +		 * delete the item from the list
 +		 */
 +		cancel_delayed_work_sync(&forw_packet->delayed_work);
- 		spin_lock_irqsave(&forw_bcast_list_lock, flags);
++		spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
 +	}
- 	spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
 +
 +	/* free batman packet list */
- 	spin_lock_irqsave(&forw_bat_list_lock, flags);
++	spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
 +	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
- 				  &forw_bat_list, list) {
++				  &bat_priv->forw_bat_list, list) {
 +
 +		/**
 +		 * if purge_outstanding_packets() was called with an argmument
 +		 * we delete only packets belonging to the given interface
 +		 */
 +		if ((batman_if) &&
 +		    (forw_packet->if_incoming != batman_if))
 +			continue;
 +
- 		spin_unlock_irqrestore(&forw_bat_list_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
 +
 +		/**
 +		 * send_outstanding_bat_packet() will lock the list to
 +		 * delete the item from the list
 +		 */
 +		cancel_delayed_work_sync(&forw_packet->delayed_work);
- 		spin_lock_irqsave(&forw_bat_list_lock, flags);
++		spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
 +	}
- 	spin_unlock_irqrestore(&forw_bat_list_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
 +}
diff --combined drivers/staging/batman-adv/send.h
index b64c627,c4cefa8..c4cefa8
--- a/drivers/staging/batman-adv/send.h
+++ b/drivers/staging/batman-adv/send.h
@@@ -27,16 -27,15 +27,15 @@@
  int send_skb_packet(struct sk_buff *skb,
  				struct batman_if *batman_if,
  				uint8_t *dst_addr);
- void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
- 		     struct batman_if *batman_if, uint8_t *dst_addr);
  void schedule_own_packet(struct batman_if *batman_if);
  void schedule_forward_packet(struct orig_node *orig_node,
  			     struct ethhdr *ethhdr,
  			     struct batman_packet *batman_packet,
  			     uint8_t directlink, int hna_buff_len,
  			     struct batman_if *if_outgoing);
- int  add_bcast_packet_to_list(struct sk_buff *skb);
+ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
  void send_outstanding_bat_packet(struct work_struct *work);
- void purge_outstanding_packets(struct batman_if *batman_if);
+ void purge_outstanding_packets(struct bat_priv *bat_priv,
+ 			       struct batman_if *batman_if);
  
  #endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --combined drivers/staging/batman-adv/soft-interface.c
index 2ea97de,0000000..15a7c1e
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/soft-interface.c
+++ b/drivers/staging/batman-adv/soft-interface.c
@@@ -1,361 -1,0 +1,393 @@@
 +/*
 + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
 + *
 + * Marek Lindner, Simon Wunderlich
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of version 2 of the GNU General Public
 + * License as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 + * 02110-1301, USA
 + *
 + */
 +
 +#include "main.h"
 +#include "soft-interface.h"
 +#include "hard-interface.h"
 +#include "routing.h"
 +#include "send.h"
++#include "bat_debugfs.h"
 +#include "translation-table.h"
 +#include "types.h"
 +#include "hash.h"
++#include "send.h"
 +#include <linux/slab.h>
 +#include <linux/ethtool.h>
 +#include <linux/etherdevice.h>
++#include "unicast.h"
 +
- static uint32_t bcast_seqno = 1; /* give own bcast messages seq numbers to avoid
- 				  * broadcast storms */
- static int32_t skb_packets;
- static int32_t skb_bad_packets;
 +
- unsigned char main_if_addr[ETH_ALEN];
 +static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
 +static void bat_get_drvinfo(struct net_device *dev,
 +			    struct ethtool_drvinfo *info);
 +static u32 bat_get_msglevel(struct net_device *dev);
 +static void bat_set_msglevel(struct net_device *dev, u32 value);
 +static u32 bat_get_link(struct net_device *dev);
 +static u32 bat_get_rx_csum(struct net_device *dev);
 +static int bat_set_rx_csum(struct net_device *dev, u32 data);
 +
 +static const struct ethtool_ops bat_ethtool_ops = {
 +	.get_settings = bat_get_settings,
 +	.get_drvinfo = bat_get_drvinfo,
 +	.get_msglevel = bat_get_msglevel,
 +	.set_msglevel = bat_set_msglevel,
 +	.get_link = bat_get_link,
 +	.get_rx_csum = bat_get_rx_csum,
 +	.set_rx_csum = bat_set_rx_csum
 +};
 +
- void set_main_if_addr(uint8_t *addr)
++int my_skb_head_push(struct sk_buff *skb, unsigned int len)
 +{
- 	memcpy(main_if_addr, addr, ETH_ALEN);
- }
- 
- int my_skb_push(struct sk_buff *skb, unsigned int len)
- {
- 	int result = 0;
- 
- 	skb_packets++;
- 	if (skb_headroom(skb) < len) {
- 		skb_bad_packets++;
- 		result = pskb_expand_head(skb, len, 0, GFP_ATOMIC);
- 
- 		if (result < 0)
- 			return result;
- 	}
++	int result;
++
++	/**
++	 * TODO: We must check if we can release all references to non-payload
++	 * data using skb_header_release in our skbs to allow skb_cow_header to
++	 * work optimally. This means that those skbs are not allowed to read
++	 * or write any data which is before the current position of skb->data
++	 * after that call and thus allow other skbs with the same data buffer
++	 * to write freely in that area.
++	 */
++	result = skb_cow_head(skb, len);
++	if (result < 0)
++		return result;
 +
 +	skb_push(skb, len);
 +	return 0;
 +}
 +
 +static int interface_open(struct net_device *dev)
 +{
 +	netif_start_queue(dev);
 +	return 0;
 +}
 +
 +static int interface_release(struct net_device *dev)
 +{
 +	netif_stop_queue(dev);
 +	return 0;
 +}
 +
 +static struct net_device_stats *interface_stats(struct net_device *dev)
 +{
- 	struct bat_priv *priv = netdev_priv(dev);
- 	return &priv->stats;
++	struct bat_priv *bat_priv = netdev_priv(dev);
++	return &bat_priv->stats;
 +}
 +
 +static int interface_set_mac_addr(struct net_device *dev, void *p)
 +{
++	struct bat_priv *bat_priv = netdev_priv(dev);
 +	struct sockaddr *addr = p;
 +
 +	if (!is_valid_ether_addr(addr->sa_data))
 +		return -EADDRNOTAVAIL;
 +
 +	/* only modify hna-table if it has been initialised before */
- 	if (atomic_read(&module_state) == MODULE_ACTIVE) {
- 		hna_local_remove(dev->dev_addr, "mac address changed");
- 		hna_local_add(addr->sa_data);
++	if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
++		hna_local_remove(bat_priv, dev->dev_addr,
++				 "mac address changed");
++		hna_local_add(dev, addr->sa_data);
 +	}
 +
 +	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 +
 +	return 0;
 +}
 +
 +static int interface_change_mtu(struct net_device *dev, int new_mtu)
 +{
 +	/* check ranges */
- 	if ((new_mtu < 68) || (new_mtu > hardif_min_mtu()))
++	if ((new_mtu < 68) || (new_mtu > hardif_min_mtu(dev)))
 +		return -EINVAL;
 +
 +	dev->mtu = new_mtu;
 +
 +	return 0;
 +}
 +
- int interface_tx(struct sk_buff *skb, struct net_device *dev)
++int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
 +{
- 	struct unicast_packet *unicast_packet;
- 	struct bcast_packet *bcast_packet;
- 	struct orig_node *orig_node;
- 	struct neigh_node *router;
 +	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
- 	struct bat_priv *priv = netdev_priv(dev);
- 	struct batman_if *batman_if;
- 	struct bat_priv *bat_priv;
- 	uint8_t dstaddr[6];
- 	int data_len = skb->len;
- 	unsigned long flags;
++	struct bat_priv *bat_priv = netdev_priv(soft_iface);
++	struct bcast_packet *bcast_packet;
++	int data_len = skb->len, ret;
 +
- 	if (atomic_read(&module_state) != MODULE_ACTIVE)
++	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
 +		goto dropped;
 +
- 	/* FIXME: each batman_if will be attached to a softif */
- 	bat_priv = netdev_priv(soft_device);
++	soft_iface->trans_start = jiffies;
 +
- 	dev->trans_start = jiffies;
 +	/* TODO: check this for locks */
- 	hna_local_add(ethhdr->h_source);
++	hna_local_add(soft_iface, ethhdr->h_source);
 +
 +	/* ethernet packet should be broadcasted */
 +	if (is_bcast(ethhdr->h_dest) || is_mcast(ethhdr->h_dest)) {
++		if (!bat_priv->primary_if)
++			goto dropped;
 +
- 		if (my_skb_push(skb, sizeof(struct bcast_packet)) < 0)
++		if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0)
 +			goto dropped;
 +
 +		bcast_packet = (struct bcast_packet *)skb->data;
 +		bcast_packet->version = COMPAT_VERSION;
 +		bcast_packet->ttl = TTL;
 +
 +		/* batman packet type: broadcast */
 +		bcast_packet->packet_type = BAT_BCAST;
 +
 +		/* hw address of first interface is the orig mac because only
 +		 * this mac is known throughout the mesh */
- 		memcpy(bcast_packet->orig, main_if_addr, ETH_ALEN);
++		memcpy(bcast_packet->orig,
++		       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
 +
 +		/* set broadcast sequence number */
- 		bcast_packet->seqno = htonl(bcast_seqno);
++		bcast_packet->seqno =
++			htonl(atomic_inc_return(&bat_priv->bcast_seqno));
 +
- 		/* broadcast packet. on success, increase seqno. */
- 		if (add_bcast_packet_to_list(skb) == NETDEV_TX_OK)
- 			bcast_seqno++;
++		add_bcast_packet_to_list(bat_priv, skb);
 +
 +		/* a copy is stored in the bcast list, therefore removing
 +		 * the original skb. */
 +		kfree_skb(skb);
 +
 +	/* unicast packet */
 +	} else {
- 		spin_lock_irqsave(&orig_hash_lock, flags);
- 		/* get routing information */
- 		orig_node = ((struct orig_node *)hash_find(orig_hash,
- 							   ethhdr->h_dest));
- 
- 		/* check for hna host */
- 		if (!orig_node)
- 			orig_node = transtable_search(ethhdr->h_dest);
- 
- 		router = find_router(orig_node, NULL);
- 
- 		if (!router)
- 			goto unlock;
- 
- 		/* don't lock while sending the packets ... we therefore
- 		 * copy the required data before sending */
- 
- 		batman_if = router->if_incoming;
- 		memcpy(dstaddr, router->addr, ETH_ALEN);
- 
- 		spin_unlock_irqrestore(&orig_hash_lock, flags);
- 
- 		if (batman_if->if_status != IF_ACTIVE)
++		ret = unicast_send_skb(skb, bat_priv);
++		if (ret != 0)
 +			goto dropped;
- 
- 		if (my_skb_push(skb, sizeof(struct unicast_packet)) < 0)
- 			goto dropped;
- 
- 		unicast_packet = (struct unicast_packet *)skb->data;
- 
- 		unicast_packet->version = COMPAT_VERSION;
- 		/* batman packet type: unicast */
- 		unicast_packet->packet_type = BAT_UNICAST;
- 		/* set unicast ttl */
- 		unicast_packet->ttl = TTL;
- 		/* copy the destination for faster routing */
- 		memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
- 
- 		send_skb_packet(skb, batman_if, dstaddr);
 +	}
 +
- 	priv->stats.tx_packets++;
- 	priv->stats.tx_bytes += data_len;
++	bat_priv->stats.tx_packets++;
++	bat_priv->stats.tx_bytes += data_len;
 +	goto end;
 +
- unlock:
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
 +dropped:
- 	priv->stats.tx_dropped++;
++	bat_priv->stats.tx_dropped++;
 +	kfree_skb(skb);
 +end:
 +	return NETDEV_TX_OK;
 +}
 +
- void interface_rx(struct sk_buff *skb, int hdr_size)
++void interface_rx(struct net_device *soft_iface,
++		  struct sk_buff *skb, int hdr_size)
 +{
- 	struct net_device *dev = soft_device;
- 	struct bat_priv *priv = netdev_priv(dev);
++	struct bat_priv *priv = netdev_priv(soft_iface);
 +
 +	/* check if enough space is available for pulling, and pull */
 +	if (!pskb_may_pull(skb, hdr_size)) {
 +		kfree_skb(skb);
 +		return;
 +	}
 +	skb_pull_rcsum(skb, hdr_size);
 +/*	skb_set_mac_header(skb, -sizeof(struct ethhdr));*/
 +
- 	skb->dev = dev;
- 	skb->protocol = eth_type_trans(skb, dev);
++	skb->dev = soft_iface;
++	skb->protocol = eth_type_trans(skb, soft_iface);
 +
 +	/* should not be neccesary anymore as we use skb_pull_rcsum()
 +	 * TODO: please verify this and remove this TODO
 +	 * -- Dec 21st 2009, Simon Wunderlich */
 +
 +/*	skb->ip_summed = CHECKSUM_UNNECESSARY;*/
 +
 +	/* TODO: set skb->pkt_type to PACKET_BROADCAST, PACKET_MULTICAST,
 +	 * PACKET_OTHERHOST or PACKET_HOST */
 +
 +	priv->stats.rx_packets++;
- 	priv->stats.rx_bytes += skb->len;
++	priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
 +
- 	dev->last_rx = jiffies;
++	soft_iface->last_rx = jiffies;
 +
 +	netif_rx(skb);
 +}
 +
 +#ifdef HAVE_NET_DEVICE_OPS
 +static const struct net_device_ops bat_netdev_ops = {
 +	.ndo_open = interface_open,
 +	.ndo_stop = interface_release,
 +	.ndo_get_stats = interface_stats,
 +	.ndo_set_mac_address = interface_set_mac_addr,
 +	.ndo_change_mtu = interface_change_mtu,
 +	.ndo_start_xmit = interface_tx,
 +	.ndo_validate_addr = eth_validate_addr
 +};
 +#endif
 +
- void interface_setup(struct net_device *dev)
++static void interface_setup(struct net_device *dev)
 +{
 +	struct bat_priv *priv = netdev_priv(dev);
 +	char dev_addr[ETH_ALEN];
 +
 +	ether_setup(dev);
 +
 +#ifdef HAVE_NET_DEVICE_OPS
 +	dev->netdev_ops = &bat_netdev_ops;
 +#else
 +	dev->open = interface_open;
 +	dev->stop = interface_release;
 +	dev->get_stats = interface_stats;
 +	dev->set_mac_address = interface_set_mac_addr;
 +	dev->change_mtu = interface_change_mtu;
 +	dev->hard_start_xmit = interface_tx;
 +#endif
 +	dev->destructor = free_netdev;
 +
- 	dev->mtu = hardif_min_mtu();
++	/**
++	 * can't call min_mtu, because the needed variables
++	 * have not been initialized yet
++	 */
++	dev->mtu = ETH_DATA_LEN;
 +	dev->hard_header_len = BAT_HEADER_LEN; /* reserve more space in the
 +						* skbuff for our header */
 +
 +	/* generate random address */
 +	random_ether_addr(dev_addr);
 +	memcpy(dev->dev_addr, dev_addr, ETH_ALEN);
 +
 +	SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
 +
 +	memset(priv, 0, sizeof(struct bat_priv));
 +}
 +
++struct net_device *softif_create(char *name)
++{
++	struct net_device *soft_iface;
++	struct bat_priv *bat_priv;
++	int ret;
++
++	soft_iface = alloc_netdev(sizeof(struct bat_priv) , name,
++				   interface_setup);
++
++	if (!soft_iface) {
++		pr_err("Unable to allocate the batman interface: %s\n", name);
++		goto out;
++	}
++
++	ret = register_netdev(soft_iface);
++	if (ret < 0) {
++		pr_err("Unable to register the batman interface '%s': %i\n",
++		       name, ret);
++		goto free_soft_iface;
++	}
++
++	bat_priv = netdev_priv(soft_iface);
++
++	atomic_set(&bat_priv->aggregation_enabled, 1);
++	atomic_set(&bat_priv->bonding_enabled, 0);
++	atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
++	atomic_set(&bat_priv->orig_interval, 1000);
++	atomic_set(&bat_priv->log_level, 0);
++	atomic_set(&bat_priv->frag_enabled, 1);
++	atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
++	atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
++
++	atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
++	atomic_set(&bat_priv->bcast_seqno, 1);
++	atomic_set(&bat_priv->hna_local_changed, 0);
++
++	bat_priv->primary_if = NULL;
++	bat_priv->num_ifaces = 0;
++
++	ret = sysfs_add_meshif(soft_iface);
++	if (ret < 0)
++		goto unreg_soft_iface;
++
++	ret = debugfs_add_meshif(soft_iface);
++	if (ret < 0)
++		goto unreg_sysfs;
++
++	ret = mesh_init(soft_iface);
++	if (ret < 0)
++		goto unreg_debugfs;
++
++	return soft_iface;
++
++unreg_debugfs:
++	debugfs_del_meshif(soft_iface);
++unreg_sysfs:
++	sysfs_del_meshif(soft_iface);
++unreg_soft_iface:
++	unregister_netdev(soft_iface);
++	return NULL;
++
++free_soft_iface:
++	free_netdev(soft_iface);
++out:
++	return NULL;
++}
++
++void softif_destroy(struct net_device *soft_iface)
++{
++	debugfs_del_meshif(soft_iface);
++	sysfs_del_meshif(soft_iface);
++	mesh_free(soft_iface);
++	unregister_netdevice(soft_iface);
++}
++
 +/* ethtool */
 +static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +	cmd->supported = 0;
 +	cmd->advertising = 0;
 +	cmd->speed = SPEED_10;
 +	cmd->duplex = DUPLEX_FULL;
 +	cmd->port = PORT_TP;
 +	cmd->phy_address = 0;
 +	cmd->transceiver = XCVR_INTERNAL;
 +	cmd->autoneg = AUTONEG_DISABLE;
 +	cmd->maxtxpkt = 0;
 +	cmd->maxrxpkt = 0;
 +
 +	return 0;
 +}
 +
 +static void bat_get_drvinfo(struct net_device *dev,
 +			    struct ethtool_drvinfo *info)
 +{
 +	strcpy(info->driver, "B.A.T.M.A.N. advanced");
 +	strcpy(info->version, SOURCE_VERSION);
 +	strcpy(info->fw_version, "N/A");
 +	strcpy(info->bus_info, "batman");
 +}
 +
 +static u32 bat_get_msglevel(struct net_device *dev)
 +{
 +	return -EOPNOTSUPP;
 +}
 +
 +static void bat_set_msglevel(struct net_device *dev, u32 value)
 +{
 +}
 +
 +static u32 bat_get_link(struct net_device *dev)
 +{
 +	return 1;
 +}
 +
 +static u32 bat_get_rx_csum(struct net_device *dev)
 +{
 +	return 0;
 +}
 +
 +static int bat_set_rx_csum(struct net_device *dev, u32 data)
 +{
 +	return -EOPNOTSUPP;
 +}
diff --combined drivers/staging/batman-adv/soft-interface.h
index 6364854,843a7ec..843a7ec
--- a/drivers/staging/batman-adv/soft-interface.h
+++ b/drivers/staging/batman-adv/soft-interface.h
@@@ -22,12 -22,11 +22,11 @@@
  #ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
  #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
  
- void set_main_if_addr(uint8_t *addr);
- void interface_setup(struct net_device *dev);
- int interface_tx(struct sk_buff *skb, struct net_device *dev);
- void interface_rx(struct sk_buff *skb, int hdr_size);
- int my_skb_push(struct sk_buff *skb, unsigned int len);
- 
- extern unsigned char main_if_addr[];
+ int my_skb_head_push(struct sk_buff *skb, unsigned int len);
+ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
+ void interface_rx(struct net_device *soft_iface,
+ 		  struct sk_buff *skb, int hdr_size);
+ struct net_device *softif_create(char *name);
+ void softif_destroy(struct net_device *soft_iface);
  
  #endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --combined drivers/staging/batman-adv/translation-table.c
index b233377,0000000..12b2325
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/translation-table.c
+++ b/drivers/staging/batman-adv/translation-table.c
@@@ -1,505 -1,0 +1,513 @@@
 +/*
 + * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
 + *
 + * Marek Lindner, Simon Wunderlich
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of version 2 of the GNU General Public
 + * License as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 + * 02110-1301, USA
 + *
 + */
 +
 +#include "main.h"
 +#include "translation-table.h"
 +#include "soft-interface.h"
 +#include "types.h"
 +#include "hash.h"
 +
- struct hashtable_t *hna_local_hash;
- static struct hashtable_t *hna_global_hash;
- atomic_t hna_local_changed;
- 
- DEFINE_SPINLOCK(hna_local_hash_lock);
- static DEFINE_SPINLOCK(hna_global_hash_lock);
- 
 +static void hna_local_purge(struct work_struct *work);
- static DECLARE_DELAYED_WORK(hna_local_purge_wq, hna_local_purge);
- static void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
++static void _hna_global_del_orig(struct bat_priv *bat_priv,
++				 struct hna_global_entry *hna_global_entry,
 +				 char *message);
 +
- static void hna_local_start_timer(void)
++static void hna_local_start_timer(struct bat_priv *bat_priv)
 +{
- 	queue_delayed_work(bat_event_workqueue, &hna_local_purge_wq, 10 * HZ);
++	INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
++	queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
 +}
 +
- int hna_local_init(void)
++int hna_local_init(struct bat_priv *bat_priv)
 +{
- 	if (hna_local_hash)
++	if (bat_priv->hna_local_hash)
 +		return 1;
 +
- 	hna_local_hash = hash_new(128, compare_orig, choose_orig);
++	bat_priv->hna_local_hash = hash_new(128, compare_orig, choose_orig);
 +
- 	if (!hna_local_hash)
++	if (!bat_priv->hna_local_hash)
 +		return 0;
 +
- 	atomic_set(&hna_local_changed, 0);
- 	hna_local_start_timer();
++	atomic_set(&bat_priv->hna_local_changed, 0);
++	hna_local_start_timer(bat_priv);
 +
 +	return 1;
 +}
 +
- void hna_local_add(uint8_t *addr)
++void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
++	struct bat_priv *bat_priv = netdev_priv(soft_iface);
 +	struct hna_local_entry *hna_local_entry;
 +	struct hna_global_entry *hna_global_entry;
 +	struct hashtable_t *swaphash;
 +	unsigned long flags;
 +
- 	spin_lock_irqsave(&hna_local_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
 +	hna_local_entry =
- 		((struct hna_local_entry *)hash_find(hna_local_hash, addr));
- 	spin_unlock_irqrestore(&hna_local_hash_lock, flags);
++		((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
++						     addr));
++	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
 +
- 	if (hna_local_entry != NULL) {
++	if (hna_local_entry) {
 +		hna_local_entry->last_seen = jiffies;
 +		return;
 +	}
 +
 +	/* only announce as many hosts as possible in the batman-packet and
 +	   space in batman_packet->num_hna That also should give a limit to
 +	   MAC-flooding. */
- 	if ((num_hna + 1 > (ETH_DATA_LEN - BAT_PACKET_LEN) / ETH_ALEN) ||
- 	    (num_hna + 1 > 255)) {
++	if ((bat_priv->num_local_hna + 1 > (ETH_DATA_LEN - BAT_PACKET_LEN)
++								/ ETH_ALEN) ||
++	    (bat_priv->num_local_hna + 1 > 255)) {
 +		bat_dbg(DBG_ROUTES, bat_priv,
 +			"Can't add new local hna entry (%pM): "
 +			"number of local hna entries exceeds packet size\n",
 +			addr);
 +		return;
 +	}
 +
 +	bat_dbg(DBG_ROUTES, bat_priv,
 +		"Creating new local hna entry: %pM\n", addr);
 +
 +	hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
 +	if (!hna_local_entry)
 +		return;
 +
 +	memcpy(hna_local_entry->addr, addr, ETH_ALEN);
 +	hna_local_entry->last_seen = jiffies;
 +
 +	/* the batman interface mac address should never be purged */
- 	if (compare_orig(addr, soft_device->dev_addr))
++	if (compare_orig(addr, soft_iface->dev_addr))
 +		hna_local_entry->never_purge = 1;
 +	else
 +		hna_local_entry->never_purge = 0;
 +
- 	spin_lock_irqsave(&hna_local_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
 +
- 	hash_add(hna_local_hash, hna_local_entry);
- 	num_hna++;
- 	atomic_set(&hna_local_changed, 1);
++	hash_add(bat_priv->hna_local_hash, hna_local_entry);
++	bat_priv->num_local_hna++;
++	atomic_set(&bat_priv->hna_local_changed, 1);
 +
- 	if (hna_local_hash->elements * 4 > hna_local_hash->size) {
- 		swaphash = hash_resize(hna_local_hash,
- 				       hna_local_hash->size * 2);
++	if (bat_priv->hna_local_hash->elements * 4 >
++					bat_priv->hna_local_hash->size) {
++		swaphash = hash_resize(bat_priv->hna_local_hash,
++				       bat_priv->hna_local_hash->size * 2);
 +
- 		if (swaphash == NULL)
++		if (!swaphash)
 +			pr_err("Couldn't resize local hna hash table\n");
 +		else
- 			hna_local_hash = swaphash;
++			bat_priv->hna_local_hash = swaphash;
 +	}
 +
- 	spin_unlock_irqrestore(&hna_local_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
 +
 +	/* remove address from global hash if present */
- 	spin_lock_irqsave(&hna_global_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
 +
- 	hna_global_entry =
- 		((struct hna_global_entry *)hash_find(hna_global_hash, addr));
++	hna_global_entry = ((struct hna_global_entry *)
++				hash_find(bat_priv->hna_global_hash, addr));
 +
- 	if (hna_global_entry != NULL)
- 		_hna_global_del_orig(hna_global_entry, "local hna received");
++	if (hna_global_entry)
++		_hna_global_del_orig(bat_priv, hna_global_entry,
++				     "local hna received");
 +
- 	spin_unlock_irqrestore(&hna_global_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
 +}
 +
- int hna_local_fill_buffer(unsigned char *buff, int buff_len)
++int hna_local_fill_buffer(struct bat_priv *bat_priv,
++			  unsigned char *buff, int buff_len)
 +{
 +	struct hna_local_entry *hna_local_entry;
 +	HASHIT(hashit);
 +	int i = 0;
 +	unsigned long flags;
 +
- 	spin_lock_irqsave(&hna_local_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
 +
- 	while (hash_iterate(hna_local_hash, &hashit)) {
++	while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
 +
 +		if (buff_len < (i + 1) * ETH_ALEN)
 +			break;
 +
 +		hna_local_entry = hashit.bucket->data;
 +		memcpy(buff + (i * ETH_ALEN), hna_local_entry->addr, ETH_ALEN);
 +
 +		i++;
 +	}
 +
 +	/* if we did not get all new local hnas see you next time  ;-) */
- 	if (i == num_hna)
- 		atomic_set(&hna_local_changed, 0);
- 
- 	spin_unlock_irqrestore(&hna_local_hash_lock, flags);
++	if (i == bat_priv->num_local_hna)
++		atomic_set(&bat_priv->hna_local_changed, 0);
 +
++	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
 +	return i;
 +}
 +
 +int hna_local_seq_print_text(struct seq_file *seq, void *offset)
 +{
 +	struct net_device *net_dev = (struct net_device *)seq->private;
 +	struct bat_priv *bat_priv = netdev_priv(net_dev);
 +	struct hna_local_entry *hna_local_entry;
 +	HASHIT(hashit);
 +	HASHIT(hashit_count);
 +	unsigned long flags;
 +	size_t buf_size, pos;
 +	char *buff;
 +
 +	if (!bat_priv->primary_if) {
 +		return seq_printf(seq, "BATMAN mesh %s disabled - "
 +			       "please specify interfaces to enable it\n",
 +			       net_dev->name);
 +	}
 +
 +	seq_printf(seq, "Locally retrieved addresses (from %s) "
 +		   "announced via HNA:\n",
 +		   net_dev->name);
 +
- 	spin_lock_irqsave(&hna_local_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
 +
 +	buf_size = 1;
 +	/* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
- 	while (hash_iterate(hna_local_hash, &hashit_count))
++	while (hash_iterate(bat_priv->hna_local_hash, &hashit_count))
 +		buf_size += 21;
 +
 +	buff = kmalloc(buf_size, GFP_ATOMIC);
 +	if (!buff) {
- 		spin_unlock_irqrestore(&hna_local_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
 +		return -ENOMEM;
 +	}
 +	buff[0] = '\0';
 +	pos = 0;
 +
- 	while (hash_iterate(hna_local_hash, &hashit)) {
++	while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
 +		hna_local_entry = hashit.bucket->data;
 +
 +		pos += snprintf(buff + pos, 22, " * %pM\n",
 +				hna_local_entry->addr);
 +	}
 +
- 	spin_unlock_irqrestore(&hna_local_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
 +
 +	seq_printf(seq, "%s", buff);
 +	kfree(buff);
 +	return 0;
 +}
 +
- static void _hna_local_del(void *data)
++static void _hna_local_del(void *data, void *arg)
 +{
++	struct bat_priv *bat_priv = (struct bat_priv *)arg;
++
 +	kfree(data);
- 	num_hna--;
- 	atomic_set(&hna_local_changed, 1);
++	bat_priv->num_local_hna--;
++	atomic_set(&bat_priv->hna_local_changed, 1);
 +}
 +
- static void hna_local_del(struct hna_local_entry *hna_local_entry,
++static void hna_local_del(struct bat_priv *bat_priv,
++			  struct hna_local_entry *hna_local_entry,
 +			  char *message)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +	bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
 +		hna_local_entry->addr, message);
 +
- 	hash_remove(hna_local_hash, hna_local_entry->addr);
- 	_hna_local_del(hna_local_entry);
++	hash_remove(bat_priv->hna_local_hash, hna_local_entry->addr);
++	_hna_local_del(hna_local_entry, bat_priv);
 +}
 +
- void hna_local_remove(uint8_t *addr, char *message)
++void hna_local_remove(struct bat_priv *bat_priv,
++		      uint8_t *addr, char *message)
 +{
 +	struct hna_local_entry *hna_local_entry;
 +	unsigned long flags;
 +
- 	spin_lock_irqsave(&hna_local_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
 +
 +	hna_local_entry = (struct hna_local_entry *)
- 		hash_find(hna_local_hash, addr);
++		hash_find(bat_priv->hna_local_hash, addr);
 +	if (hna_local_entry)
- 		hna_local_del(hna_local_entry, message);
++		hna_local_del(bat_priv, hna_local_entry, message);
 +
- 	spin_unlock_irqrestore(&hna_local_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
 +}
 +
 +static void hna_local_purge(struct work_struct *work)
 +{
++	struct delayed_work *delayed_work =
++		container_of(work, struct delayed_work, work);
++	struct bat_priv *bat_priv =
++		container_of(delayed_work, struct bat_priv, hna_work);
 +	struct hna_local_entry *hna_local_entry;
 +	HASHIT(hashit);
 +	unsigned long flags;
 +	unsigned long timeout;
 +
- 	spin_lock_irqsave(&hna_local_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
 +
- 	while (hash_iterate(hna_local_hash, &hashit)) {
++	while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
 +		hna_local_entry = hashit.bucket->data;
 +
 +		timeout = hna_local_entry->last_seen + LOCAL_HNA_TIMEOUT * HZ;
++
 +		if ((!hna_local_entry->never_purge) &&
 +		    time_after(jiffies, timeout))
- 			hna_local_del(hna_local_entry, "address timed out");
++			hna_local_del(bat_priv, hna_local_entry,
++				      "address timed out");
 +	}
 +
- 	spin_unlock_irqrestore(&hna_local_hash_lock, flags);
- 	hna_local_start_timer();
++	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
++	hna_local_start_timer(bat_priv);
 +}
 +
- void hna_local_free(void)
++void hna_local_free(struct bat_priv *bat_priv)
 +{
- 	if (!hna_local_hash)
++	if (!bat_priv->hna_local_hash)
 +		return;
 +
- 	cancel_delayed_work_sync(&hna_local_purge_wq);
- 	hash_delete(hna_local_hash, _hna_local_del);
- 	hna_local_hash = NULL;
++	cancel_delayed_work_sync(&bat_priv->hna_work);
++	hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv);
++	bat_priv->hna_local_hash = NULL;
 +}
 +
- int hna_global_init(void)
++int hna_global_init(struct bat_priv *bat_priv)
 +{
- 	if (hna_global_hash)
++	if (bat_priv->hna_global_hash)
 +		return 1;
 +
- 	hna_global_hash = hash_new(128, compare_orig, choose_orig);
++	bat_priv->hna_global_hash = hash_new(128, compare_orig, choose_orig);
 +
- 	if (!hna_global_hash)
++	if (!bat_priv->hna_global_hash)
 +		return 0;
 +
 +	return 1;
 +}
 +
- void hna_global_add_orig(struct orig_node *orig_node,
++void hna_global_add_orig(struct bat_priv *bat_priv,
++			 struct orig_node *orig_node,
 +			 unsigned char *hna_buff, int hna_buff_len)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +	struct hna_global_entry *hna_global_entry;
 +	struct hna_local_entry *hna_local_entry;
 +	struct hashtable_t *swaphash;
 +	int hna_buff_count = 0;
 +	unsigned long flags;
 +	unsigned char *hna_ptr;
 +
 +	while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
- 		spin_lock_irqsave(&hna_global_hash_lock, flags);
++		spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
 +
 +		hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
 +		hna_global_entry = (struct hna_global_entry *)
- 			hash_find(hna_global_hash, hna_ptr);
++			hash_find(bat_priv->hna_global_hash, hna_ptr);
 +
- 		if (hna_global_entry == NULL) {
- 			spin_unlock_irqrestore(&hna_global_hash_lock, flags);
++		if (!hna_global_entry) {
++			spin_unlock_irqrestore(&bat_priv->hna_ghash_lock,
++					       flags);
 +
 +			hna_global_entry =
 +				kmalloc(sizeof(struct hna_global_entry),
 +					GFP_ATOMIC);
 +
 +			if (!hna_global_entry)
 +				break;
 +
 +			memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
 +
 +			bat_dbg(DBG_ROUTES, bat_priv,
 +				"Creating new global hna entry: "
 +				"%pM (via %pM)\n",
 +				hna_global_entry->addr, orig_node->orig);
 +
- 			spin_lock_irqsave(&hna_global_hash_lock, flags);
- 			hash_add(hna_global_hash, hna_global_entry);
++			spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
++			hash_add(bat_priv->hna_global_hash, hna_global_entry);
 +
 +		}
 +
 +		hna_global_entry->orig_node = orig_node;
- 		spin_unlock_irqrestore(&hna_global_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
 +
 +		/* remove address from local hash if present */
- 		spin_lock_irqsave(&hna_local_hash_lock, flags);
++		spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
 +
 +		hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
 +		hna_local_entry = (struct hna_local_entry *)
- 			hash_find(hna_local_hash, hna_ptr);
++			hash_find(bat_priv->hna_local_hash, hna_ptr);
 +
- 		if (hna_local_entry != NULL)
- 			hna_local_del(hna_local_entry, "global hna received");
++		if (hna_local_entry)
++			hna_local_del(bat_priv, hna_local_entry,
++				      "global hna received");
 +
- 		spin_unlock_irqrestore(&hna_local_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
 +
 +		hna_buff_count++;
 +	}
 +
 +	/* initialize, and overwrite if malloc succeeds */
 +	orig_node->hna_buff = NULL;
 +	orig_node->hna_buff_len = 0;
 +
 +	if (hna_buff_len > 0) {
 +		orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
 +		if (orig_node->hna_buff) {
 +			memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
 +			orig_node->hna_buff_len = hna_buff_len;
 +		}
 +	}
 +
- 	spin_lock_irqsave(&hna_global_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
 +
- 	if (hna_global_hash->elements * 4 > hna_global_hash->size) {
- 		swaphash = hash_resize(hna_global_hash,
- 				       hna_global_hash->size * 2);
++	if (bat_priv->hna_global_hash->elements * 4 >
++					bat_priv->hna_global_hash->size) {
++		swaphash = hash_resize(bat_priv->hna_global_hash,
++				       bat_priv->hna_global_hash->size * 2);
 +
- 		if (swaphash == NULL)
++		if (!swaphash)
 +			pr_err("Couldn't resize global hna hash table\n");
 +		else
- 			hna_global_hash = swaphash;
++			bat_priv->hna_global_hash = swaphash;
 +	}
 +
- 	spin_unlock_irqrestore(&hna_global_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
 +}
 +
 +int hna_global_seq_print_text(struct seq_file *seq, void *offset)
 +{
 +	struct net_device *net_dev = (struct net_device *)seq->private;
 +	struct bat_priv *bat_priv = netdev_priv(net_dev);
 +	struct hna_global_entry *hna_global_entry;
 +	HASHIT(hashit);
 +	HASHIT(hashit_count);
 +	unsigned long flags;
 +	size_t buf_size, pos;
 +	char *buff;
 +
 +	if (!bat_priv->primary_if) {
 +		return seq_printf(seq, "BATMAN mesh %s disabled - "
 +				  "please specify interfaces to enable it\n",
 +				  net_dev->name);
 +	}
 +
 +	seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
 +		   net_dev->name);
 +
- 	spin_lock_irqsave(&hna_global_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
 +
 +	buf_size = 1;
 +	/* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
- 	while (hash_iterate(hna_global_hash, &hashit_count))
++	while (hash_iterate(bat_priv->hna_global_hash, &hashit_count))
 +		buf_size += 43;
 +
 +	buff = kmalloc(buf_size, GFP_ATOMIC);
 +	if (!buff) {
- 		spin_unlock_irqrestore(&hna_global_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
 +		return -ENOMEM;
 +	}
 +	buff[0] = '\0';
 +	pos = 0;
 +
- 	while (hash_iterate(hna_global_hash, &hashit)) {
++	while (hash_iterate(bat_priv->hna_global_hash, &hashit)) {
 +		hna_global_entry = hashit.bucket->data;
 +
 +		pos += snprintf(buff + pos, 44,
 +				" * %pM via %pM\n", hna_global_entry->addr,
 +				hna_global_entry->orig_node->orig);
 +	}
 +
- 	spin_unlock_irqrestore(&hna_global_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
 +
 +	seq_printf(seq, "%s", buff);
 +	kfree(buff);
 +	return 0;
 +}
 +
- static void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
++static void _hna_global_del_orig(struct bat_priv *bat_priv,
++				 struct hna_global_entry *hna_global_entry,
 +				 char *message)
 +{
- 	/* FIXME: each orig_node->batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +	bat_dbg(DBG_ROUTES, bat_priv,
 +		"Deleting global hna entry %pM (via %pM): %s\n",
 +		hna_global_entry->addr, hna_global_entry->orig_node->orig,
 +		message);
 +
- 	hash_remove(hna_global_hash, hna_global_entry->addr);
++	hash_remove(bat_priv->hna_global_hash, hna_global_entry->addr);
 +	kfree(hna_global_entry);
 +}
 +
- void hna_global_del_orig(struct orig_node *orig_node, char *message)
++void hna_global_del_orig(struct bat_priv *bat_priv,
++			 struct orig_node *orig_node, char *message)
 +{
 +	struct hna_global_entry *hna_global_entry;
 +	int hna_buff_count = 0;
 +	unsigned long flags;
 +	unsigned char *hna_ptr;
 +
 +	if (orig_node->hna_buff_len == 0)
 +		return;
 +
- 	spin_lock_irqsave(&hna_global_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
 +
 +	while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
 +		hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
 +		hna_global_entry = (struct hna_global_entry *)
- 			hash_find(hna_global_hash, hna_ptr);
++			hash_find(bat_priv->hna_global_hash, hna_ptr);
 +
- 		if ((hna_global_entry != NULL) &&
++		if ((hna_global_entry) &&
 +		    (hna_global_entry->orig_node == orig_node))
- 			_hna_global_del_orig(hna_global_entry, message);
++			_hna_global_del_orig(bat_priv, hna_global_entry,
++					     message);
 +
 +		hna_buff_count++;
 +	}
 +
- 	spin_unlock_irqrestore(&hna_global_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
 +
 +	orig_node->hna_buff_len = 0;
 +	kfree(orig_node->hna_buff);
 +	orig_node->hna_buff = NULL;
 +}
 +
- static void hna_global_del(void *data)
++static void hna_global_del(void *data, void *arg)
 +{
 +	kfree(data);
 +}
 +
- void hna_global_free(void)
++void hna_global_free(struct bat_priv *bat_priv)
 +{
- 	if (!hna_global_hash)
++	if (!bat_priv->hna_global_hash)
 +		return;
 +
- 	hash_delete(hna_global_hash, hna_global_del);
- 	hna_global_hash = NULL;
++	hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL);
++	bat_priv->hna_global_hash = NULL;
 +}
 +
- struct orig_node *transtable_search(uint8_t *addr)
++struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
 +{
 +	struct hna_global_entry *hna_global_entry;
 +	unsigned long flags;
 +
- 	spin_lock_irqsave(&hna_global_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
 +	hna_global_entry = (struct hna_global_entry *)
- 		hash_find(hna_global_hash, addr);
- 	spin_unlock_irqrestore(&hna_global_hash_lock, flags);
++				hash_find(bat_priv->hna_global_hash, addr);
++	spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
 +
- 	if (hna_global_entry == NULL)
++	if (!hna_global_entry)
 +		return NULL;
 +
 +	return hna_global_entry->orig_node;
 +}
diff --combined drivers/staging/batman-adv/translation-table.h
index fa93e37,10c4c5c..10c4c5c
--- a/drivers/staging/batman-adv/translation-table.h
+++ b/drivers/staging/batman-adv/translation-table.h
@@@ -24,22 -24,22 +24,22 @@@
  
  #include "types.h"
  
- int hna_local_init(void);
- void hna_local_add(uint8_t *addr);
- void hna_local_remove(uint8_t *addr, char *message);
- int hna_local_fill_buffer(unsigned char *buff, int buff_len);
+ int hna_local_init(struct bat_priv *bat_priv);
+ void hna_local_add(struct net_device *soft_iface, uint8_t *addr);
+ void hna_local_remove(struct bat_priv *bat_priv,
+ 		      uint8_t *addr, char *message);
+ int hna_local_fill_buffer(struct bat_priv *bat_priv,
+ 			  unsigned char *buff, int buff_len);
  int hna_local_seq_print_text(struct seq_file *seq, void *offset);
- void hna_local_free(void);
- int hna_global_init(void);
- void hna_global_add_orig(struct orig_node *orig_node, unsigned char *hna_buff,
- 			 int hna_buff_len);
+ void hna_local_free(struct bat_priv *bat_priv);
+ int hna_global_init(struct bat_priv *bat_priv);
+ void hna_global_add_orig(struct bat_priv *bat_priv,
+ 			 struct orig_node *orig_node,
+ 			 unsigned char *hna_buff, int hna_buff_len);
  int hna_global_seq_print_text(struct seq_file *seq, void *offset);
- void hna_global_del_orig(struct orig_node *orig_node, char *message);
- void hna_global_free(void);
- struct orig_node *transtable_search(uint8_t *addr);
- 
- extern spinlock_t hna_local_hash_lock;
- extern struct hashtable_t *hna_local_hash;
- extern atomic_t hna_local_changed;
+ void hna_global_del_orig(struct bat_priv *bat_priv,
+ 			 struct orig_node *orig_node, char *message);
+ void hna_global_free(struct bat_priv *bat_priv);
+ struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr);
  
  #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --combined drivers/staging/batman-adv/types.h
index 9aa9d36,e779c4a..e779c4a
--- a/drivers/staging/batman-adv/types.h
+++ b/drivers/staging/batman-adv/types.h
@@@ -36,31 -36,32 +36,32 @@@
  struct batman_if {
  	struct list_head list;
  	int16_t if_num;
- 	char *dev;
  	char if_status;
  	char addr_str[ETH_STR_LEN];
  	struct net_device *net_dev;
  	atomic_t seqno;
+ 	atomic_t frag_seqno;
  	unsigned char *packet_buff;
  	int packet_len;
  	struct kobject *hardif_obj;
  	struct rcu_head rcu;
- 
+ 	struct packet_type batman_adv_ptype;
+ 	struct net_device *soft_iface;
  };
  
  /**
-   *	orig_node - structure for orig_list maintaining nodes of mesh
-   *	@primary_addr: hosts primary interface address
-   *	@last_valid: when last packet from this node was received
-   *	@bcast_seqno_reset: time when the broadcast seqno window was reset
-   *	@batman_seqno_reset: time when the batman seqno window was reset
-   *	@flags: for now only VIS_SERVER flag
-   *	@last_real_seqno: last and best known squence number
-   *	@last_ttl: ttl of last received packet
-   *	@last_bcast_seqno: last broadcast sequence number received by this host
-   *
-   *	@candidates: how many candidates are available
-   *	@selected: next bonding candidate
+  *	orig_node - structure for orig_list maintaining nodes of mesh
+  *	@primary_addr: hosts primary interface address
+  *	@last_valid: when last packet from this node was received
+  *	@bcast_seqno_reset: time when the broadcast seqno window was reset
+  *	@batman_seqno_reset: time when the batman seqno window was reset
+  *	@flags: for now only VIS_SERVER flag
+  *	@last_real_seqno: last and best known squence number
+  *	@last_ttl: ttl of last received packet
+  *	@last_bcast_seqno: last broadcast sequence number received by this host
+  *
+  *	@candidates: how many candidates are available
+  *	@selected: next bonding candidate
   */
  struct orig_node {
  	uint8_t orig[ETH_ALEN];
@@@ -81,6 -82,8 +82,8 @@@
  	TYPE_OF_WORD bcast_bits[NUM_WORDS];
  	uint32_t last_bcast_seqno;
  	struct list_head neigh_list;
+ 	struct list_head frag_list;
+ 	unsigned long last_frag_packet;
  	struct {
  		uint8_t candidates;
  		struct neigh_node *selected;
@@@ -88,8 -91,8 +91,8 @@@
  };
  
  /**
-   *	neigh_node
-   *	@last_valid: when last packet via this neighbor was received
+  *	neigh_node
+  *	@last_valid: when last packet via this neighbor was received
   */
  struct neigh_node {
  	struct list_head list;
@@@ -107,17 -110,45 +110,45 @@@
  };
  
  struct bat_priv {
+ 	atomic_t mesh_state;
  	struct net_device_stats stats;
  	atomic_t aggregation_enabled;
  	atomic_t bonding_enabled;
+ 	atomic_t frag_enabled;
  	atomic_t vis_mode;
  	atomic_t orig_interval;
  	atomic_t log_level;
+ 	atomic_t bcast_seqno;
+ 	atomic_t bcast_queue_left;
+ 	atomic_t batman_queue_left;
  	char num_ifaces;
  	struct debug_log *debug_log;
  	struct batman_if *primary_if;
  	struct kobject *mesh_obj;
  	struct dentry *debug_dir;
+ 	struct hlist_head forw_bat_list;
+ 	struct hlist_head forw_bcast_list;
+ 	struct hlist_head gw_list;
+ 	struct list_head vis_send_list;
+ 	struct hashtable_t *orig_hash;
+ 	struct hashtable_t *hna_local_hash;
+ 	struct hashtable_t *hna_global_hash;
+ 	struct hashtable_t *vis_hash;
+ 	spinlock_t orig_hash_lock;
+ 	spinlock_t forw_bat_list_lock;
+ 	spinlock_t forw_bcast_list_lock;
+ 	spinlock_t hna_lhash_lock;
+ 	spinlock_t hna_ghash_lock;
+ 	spinlock_t gw_list_lock;
+ 	spinlock_t vis_hash_lock;
+ 	spinlock_t vis_list_lock;
+ 	int16_t num_local_hna;
+ 	atomic_t hna_local_changed;
+ 	struct delayed_work hna_work;
+ 	struct delayed_work orig_work;
+ 	struct delayed_work vis_work;
+ 	struct gw_node *curr_gw;
+ 	struct vis_info *my_vis_info;
  };
  
  struct socket_client {
@@@ -147,15 -178,14 +178,14 @@@ struct hna_global_entry 
  };
  
  /**
-   *	forw_packet - structure for forw_list maintaining packets to be
-   *	              send/forwarded
+  *	forw_packet - structure for forw_list maintaining packets to be
+  *	              send/forwarded
   */
  struct forw_packet {
  	struct hlist_node list;
  	unsigned long send_time;
  	uint8_t own;
  	struct sk_buff *skb;
- 	unsigned char *packet_buff;
  	uint16_t packet_len;
  	uint32_t direct_link_flags;
  	uint8_t num_packets;
@@@ -181,4 -211,34 +211,34 @@@ struct debug_log 
  	wait_queue_head_t queue_wait;
  };
  
+ struct frag_packet_list_entry {
+ 	struct list_head list;
+ 	uint16_t seqno;
+ 	struct sk_buff *skb;
+ };
+ 
+ struct vis_info {
+ 	unsigned long       first_seen;
+ 	struct list_head    recv_list;
+ 			    /* list of server-neighbors we received a vis-packet
+ 			     * from.  we should not reply to them. */
+ 	struct list_head send_list;
+ 	struct kref refcount;
+ 	struct bat_priv *bat_priv;
+ 	/* this packet might be part of the vis send queue. */
+ 	struct sk_buff *skb_packet;
+ 	/* vis_info may follow here*/
+ } __attribute__((packed));
+ 
+ struct vis_info_entry {
+ 	uint8_t  src[ETH_ALEN];
+ 	uint8_t  dest[ETH_ALEN];
+ 	uint8_t  quality;	/* quality = 0 means HNA */
+ } __attribute__((packed));
+ 
+ struct recvlist_node {
+ 	struct list_head list;
+ 	uint8_t mac[ETH_ALEN];
+ };
+ 
  #endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --combined drivers/staging/batman-adv/unicast.c
index 0000000,f951abc..f951abc
mode 000000,100644..100644
--- a/drivers/staging/batman-adv/unicast.c
+++ b/drivers/staging/batman-adv/unicast.c
@@@ -1,0 -1,265 +1,265 @@@
+ /*
+  * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+  *
+  * Andreas Langer
+  *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of version 2 of the GNU General Public
+  * License as published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful, but
+  * WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+  * General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+  * 02110-1301, USA
+  *
+  */
+ 
+ #include "main.h"
+ #include "unicast.h"
+ #include "send.h"
+ #include "soft-interface.h"
+ #include "hash.h"
+ #include "translation-table.h"
+ #include "routing.h"
+ #include "hard-interface.h"
+ 
+ 
+ struct sk_buff *merge_frag_packet(struct list_head *head,
+ 				  struct frag_packet_list_entry *tfp,
+ 				  struct sk_buff *skb)
+ {
+ 	struct unicast_frag_packet *up =
+ 		(struct unicast_frag_packet *)skb->data;
+ 	struct sk_buff *tmp_skb;
+ 
+ 	/* set skb to the first part and tmp_skb to the second part */
+ 	if (up->flags & UNI_FRAG_HEAD) {
+ 		tmp_skb = tfp->skb;
+ 	} else {
+ 		tmp_skb = skb;
+ 		skb = tfp->skb;
+ 	}
+ 
+ 	skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
+ 	if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) {
+ 		/* free buffered skb, skb will be freed later */
+ 		kfree_skb(tfp->skb);
+ 		return NULL;
+ 	}
+ 
+ 	/* move free entry to end */
+ 	tfp->skb = NULL;
+ 	tfp->seqno = 0;
+ 	list_move_tail(&tfp->list, head);
+ 
+ 	memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
+ 	kfree_skb(tmp_skb);
+ 	return skb;
+ }
+ 
+ void create_frag_entry(struct list_head *head, struct sk_buff *skb)
+ {
+ 	struct frag_packet_list_entry *tfp;
+ 	struct unicast_frag_packet *up =
+ 		(struct unicast_frag_packet *)skb->data;
+ 
+ 	/* free and oldest packets stand at the end */
+ 	tfp = list_entry((head)->prev, typeof(*tfp), list);
+ 	kfree_skb(tfp->skb);
+ 
+ 	tfp->seqno = ntohs(up->seqno);
+ 	tfp->skb = skb;
+ 	list_move(&tfp->list, head);
+ 	return;
+ }
+ 
+ void create_frag_buffer(struct list_head *head)
+ {
+ 	int i;
+ 	struct frag_packet_list_entry *tfp;
+ 
+ 	for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
+ 		tfp = kmalloc(sizeof(struct frag_packet_list_entry),
+ 			GFP_ATOMIC);
+ 		tfp->skb = NULL;
+ 		tfp->seqno = 0;
+ 		INIT_LIST_HEAD(&tfp->list);
+ 		list_add(&tfp->list, head);
+ 	}
+ 
+ 	return;
+ }
+ 
+ struct frag_packet_list_entry *search_frag_packet(struct list_head *head,
+ 						 struct unicast_frag_packet *up)
+ {
+ 	struct frag_packet_list_entry *tfp;
+ 	struct unicast_frag_packet *tmp_up = NULL;
+ 	uint16_t search_seqno;
+ 
+ 	if (up->flags & UNI_FRAG_HEAD)
+ 		search_seqno = ntohs(up->seqno)+1;
+ 	else
+ 		search_seqno = ntohs(up->seqno)-1;
+ 
+ 	list_for_each_entry(tfp, head, list) {
+ 
+ 		if (!tfp->skb)
+ 			continue;
+ 
+ 		if (tfp->seqno == ntohs(up->seqno))
+ 			goto mov_tail;
+ 
+ 		tmp_up = (struct unicast_frag_packet *)tfp->skb->data;
+ 
+ 		if (tfp->seqno == search_seqno) {
+ 
+ 			if ((tmp_up->flags & UNI_FRAG_HEAD) !=
+ 			    (up->flags & UNI_FRAG_HEAD))
+ 				return tfp;
+ 			else
+ 				goto mov_tail;
+ 		}
+ 	}
+ 	return NULL;
+ 
+ mov_tail:
+ 	list_move_tail(&tfp->list, head);
+ 	return NULL;
+ }
+ 
+ void frag_list_free(struct list_head *head)
+ {
+ 	struct frag_packet_list_entry *pf, *tmp_pf;
+ 
+ 	if (!list_empty(head)) {
+ 
+ 		list_for_each_entry_safe(pf, tmp_pf, head, list) {
+ 			kfree_skb(pf->skb);
+ 			list_del(&pf->list);
+ 			kfree(pf);
+ 		}
+ 	}
+ 	return;
+ }
+ 
+ static int unicast_send_frag_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
+ 			  struct batman_if *batman_if, uint8_t dstaddr[],
+ 			  struct orig_node *orig_node)
+ {
+ 	struct unicast_frag_packet *ucast_frag1, *ucast_frag2;
+ 	int hdr_len = sizeof(struct unicast_frag_packet);
+ 	struct sk_buff *frag_skb;
+ 	int data_len = skb->len;
+ 
+ 	if (!bat_priv->primary_if)
+ 		goto dropped;
+ 
+ 	frag_skb = dev_alloc_skb(data_len - (data_len / 2) + hdr_len);
+ 	skb_split(skb, frag_skb, data_len / 2);
+ 
+ 	if (my_skb_head_push(frag_skb, hdr_len) < 0 ||
+ 	    my_skb_head_push(skb, hdr_len) < 0)
+ 		goto drop_frag;
+ 
+ 	ucast_frag1 = (struct unicast_frag_packet *)skb->data;
+ 	ucast_frag2 = (struct unicast_frag_packet *)frag_skb->data;
+ 
+ 	ucast_frag1->version = COMPAT_VERSION;
+ 	ucast_frag1->packet_type = BAT_UNICAST_FRAG;
+ 	ucast_frag1->ttl = TTL;
+ 	memcpy(ucast_frag1->orig,
+ 	       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
+ 	memcpy(ucast_frag1->dest, orig_node->orig, ETH_ALEN);
+ 
+ 	memcpy(ucast_frag2, ucast_frag1, sizeof(struct unicast_frag_packet));
+ 
+ 	ucast_frag1->flags |= UNI_FRAG_HEAD;
+ 	ucast_frag2->flags &= ~UNI_FRAG_HEAD;
+ 
+ 	ucast_frag1->seqno = htons((uint16_t)atomic_inc_return(
+ 						&batman_if->frag_seqno));
+ 
+ 	ucast_frag2->seqno = htons((uint16_t)atomic_inc_return(
+ 						&batman_if->frag_seqno));
+ 
+ 	send_skb_packet(skb, batman_if, dstaddr);
+ 	send_skb_packet(frag_skb, batman_if, dstaddr);
+ 	return 0;
+ 
+ drop_frag:
+ 	kfree_skb(frag_skb);
+ dropped:
+ 	kfree_skb(skb);
+ 	return 1;
+ }
+ 
+ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
+ {
+ 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+ 	struct unicast_packet *unicast_packet;
+ 	struct orig_node *orig_node;
+ 	struct batman_if *batman_if;
+ 	struct neigh_node *router;
+ 	int data_len = skb->len;
+ 	uint8_t dstaddr[6];
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ 
+ 	/* get routing information */
+ 	orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
+ 						   ethhdr->h_dest));
+ 
+ 	/* check for hna host */
+ 	if (!orig_node)
+ 		orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+ 
+ 	router = find_router(orig_node, NULL);
+ 
+ 	if (!router)
+ 		goto unlock;
+ 
+ 	/* don't lock while sending the packets ... we therefore
+ 		* copy the required data before sending */
+ 
+ 	batman_if = router->if_incoming;
+ 	memcpy(dstaddr, router->addr, ETH_ALEN);
+ 
+ 	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ 
+ 	if (batman_if->if_status != IF_ACTIVE)
+ 		goto dropped;
+ 
+ 	if (atomic_read(&bat_priv->frag_enabled) &&
+ 	    data_len + sizeof(struct unicast_packet) > batman_if->net_dev->mtu)
+ 		return unicast_send_frag_skb(skb, bat_priv, batman_if,
+ 					     dstaddr, orig_node);
+ 
+ 	if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
+ 		goto dropped;
+ 
+ 	unicast_packet = (struct unicast_packet *)skb->data;
+ 
+ 	unicast_packet->version = COMPAT_VERSION;
+ 	/* batman packet type: unicast */
+ 	unicast_packet->packet_type = BAT_UNICAST;
+ 	/* set unicast ttl */
+ 	unicast_packet->ttl = TTL;
+ 	/* copy the destination for faster routing */
+ 	memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
+ 
+ 	send_skb_packet(skb, batman_if, dstaddr);
+ 	return 0;
+ 
+ unlock:
+ 	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ dropped:
+ 	kfree_skb(skb);
+ 	return 1;
+ }
diff --combined drivers/staging/batman-adv/unicast.h
index 0000000,1d5cbeb..1d5cbeb
mode 000000,100644..100644
--- a/drivers/staging/batman-adv/unicast.h
+++ b/drivers/staging/batman-adv/unicast.h
@@@ -1,0 -1,39 +1,39 @@@
+ /*
+  * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+  *
+  * Andreas Langer
+  *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of version 2 of the GNU General Public
+  * License as published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful, but
+  * WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+  * General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+  * 02110-1301, USA
+  *
+  */
+ 
+ #ifndef _NET_BATMAN_ADV_UNICAST_H_
+ #define _NET_BATMAN_ADV_UNICAST_H_
+ 
+ #define FRAG_TIMEOUT 10000	/* purge frag list entrys after time in ms */
+ #define FRAG_BUFFER_SIZE 6	/* number of list elements in buffer */
+ 
+ struct sk_buff *merge_frag_packet(struct list_head *head,
+ 	struct frag_packet_list_entry *tfp,
+ 	struct sk_buff *skb);
+ 
+ void create_frag_entry(struct list_head *head, struct sk_buff *skb);
+ void create_frag_buffer(struct list_head *head);
+ struct frag_packet_list_entry *search_frag_packet(struct list_head *head,
+ 	struct unicast_frag_packet *up);
+ void frag_list_free(struct list_head *head);
+ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
+ 
+ #endif /* _NET_BATMAN_ADV_UNICAST_H_ */
diff --combined drivers/staging/batman-adv/vis.c
index 4b6a504,0000000..b2cec8e
mode 100644,000000..100644
--- a/drivers/staging/batman-adv/vis.c
+++ b/drivers/staging/batman-adv/vis.c
@@@ -1,817 -1,0 +1,901 @@@
 +/*
 + * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
 + *
 + * Simon Wunderlich
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of version 2 of the GNU General Public
 + * License as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 + * 02110-1301, USA
 + *
 + */
 +
 +#include "main.h"
 +#include "send.h"
 +#include "translation-table.h"
 +#include "vis.h"
 +#include "soft-interface.h"
 +#include "hard-interface.h"
 +#include "hash.h"
 +
++#define MAX_VIS_PACKET_SIZE 1000
++
 +/* Returns the smallest signed integer in two's complement with the sizeof x */
 +#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
 +
 +/* Checks if a sequence number x is a predecessor/successor of y.
 +   they handle overflows/underflows and can correctly check for a
 +   predecessor/successor unless the variable sequence number has grown by
 +   more then 2**(bitwidth(x)-1)-1.
 +   This means that for a uint8_t with the maximum value 255, it would think:
 +    * when adding nothing - it is neither a predecessor nor a successor
 +    * before adding more than 127 to the starting value - it is a predecessor,
 +    * when adding 128 - it is neither a predecessor nor a successor,
 +    * after adding more than 127 to the starting value - it is a successor */
 +#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
 +			_dummy > smallest_signed_int(_dummy); })
 +#define seq_after(x, y) seq_before(y, x)
 +
- static struct hashtable_t *vis_hash;
- static DEFINE_SPINLOCK(vis_hash_lock);
- static DEFINE_SPINLOCK(recv_list_lock);
- static struct vis_info *my_vis_info;
- static struct list_head send_list;	/* always locked with vis_hash_lock */
- 
- static void start_vis_timer(void);
++static void start_vis_timer(struct bat_priv *bat_priv);
 +
 +/* free the info */
 +static void free_info(struct kref *ref)
 +{
 +	struct vis_info *info = container_of(ref, struct vis_info, refcount);
++	struct bat_priv *bat_priv = info->bat_priv;
 +	struct recvlist_node *entry, *tmp;
 +	unsigned long flags;
 +
 +	list_del_init(&info->send_list);
- 	spin_lock_irqsave(&recv_list_lock, flags);
++	spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
 +	list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
 +		list_del(&entry->list);
 +		kfree(entry);
 +	}
- 	spin_unlock_irqrestore(&recv_list_lock, flags);
- 	kfree(info);
++
++	spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
++	kfree_skb(info->skb_packet);
 +}
 +
 +/* Compare two vis packets, used by the hashing algorithm */
 +static int vis_info_cmp(void *data1, void *data2)
 +{
 +	struct vis_info *d1, *d2;
++	struct vis_packet *p1, *p2;
 +	d1 = data1;
 +	d2 = data2;
- 	return compare_orig(d1->packet.vis_orig, d2->packet.vis_orig);
++	p1 = (struct vis_packet *)d1->skb_packet->data;
++	p2 = (struct vis_packet *)d2->skb_packet->data;
++	return compare_orig(p1->vis_orig, p2->vis_orig);
 +}
 +
 +/* hash function to choose an entry in a hash table of given size */
 +/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
 +static int vis_info_choose(void *data, int size)
 +{
 +	struct vis_info *vis_info = data;
++	struct vis_packet *packet;
 +	unsigned char *key;
 +	uint32_t hash = 0;
 +	size_t i;
 +
- 	key = vis_info->packet.vis_orig;
++	packet = (struct vis_packet *)vis_info->skb_packet->data;
++	key = packet->vis_orig;
 +	for (i = 0; i < ETH_ALEN; i++) {
 +		hash += key[i];
 +		hash += (hash << 10);
 +		hash ^= (hash >> 6);
 +	}
 +
 +	hash += (hash << 3);
 +	hash ^= (hash >> 11);
 +	hash += (hash << 15);
 +
 +	return hash % size;
 +}
 +
 +/* insert interface to the list of interfaces of one originator, if it
 + * does not already exist in the list */
 +static void vis_data_insert_interface(const uint8_t *interface,
 +				      struct hlist_head *if_list,
 +				      bool primary)
 +{
 +	struct if_list_entry *entry;
 +	struct hlist_node *pos;
 +
 +	hlist_for_each_entry(entry, pos, if_list, list) {
 +		if (compare_orig(entry->addr, (void *)interface))
 +			return;
 +	}
 +
 +	/* its a new address, add it to the list */
 +	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 +	if (!entry)
 +		return;
 +	memcpy(entry->addr, interface, ETH_ALEN);
 +	entry->primary = primary;
 +	hlist_add_head(&entry->list, if_list);
 +}
 +
 +static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
 +{
 +	struct if_list_entry *entry;
 +	struct hlist_node *pos;
 +	char tmp_addr_str[ETH_STR_LEN];
 +	size_t len = 0;
 +
 +	hlist_for_each_entry(entry, pos, if_list, list) {
 +		if (entry->primary)
 +			len += sprintf(buff + len, "PRIMARY, ");
 +		else {
 +			addr_to_string(tmp_addr_str, entry->addr);
 +			len += sprintf(buff + len,  "SEC %s, ", tmp_addr_str);
 +		}
 +	}
 +
 +	return len;
 +}
 +
 +static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
 +{
 +	struct if_list_entry *entry;
 +	struct hlist_node *pos;
 +	size_t count = 0;
 +
 +	hlist_for_each_entry(entry, pos, if_list, list) {
 +		if (entry->primary)
 +			count += 9;
 +		else
 +			count += 23;
 +	}
 +
 +	return count;
 +}
 +
 +/* read an entry  */
 +static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
 +				   uint8_t *src, bool primary)
 +{
 +	char to[18];
 +
 +	/* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
 +	addr_to_string(to, entry->dest);
 +	if (primary && entry->quality == 0)
 +		return sprintf(buff, "HNA %s, ", to);
 +	else if (compare_orig(entry->src, src))
 +		return sprintf(buff, "TQ %s %d, ", to, entry->quality);
 +
 +	return 0;
 +}
 +
 +int vis_seq_print_text(struct seq_file *seq, void *offset)
 +{
 +	HASHIT(hashit);
 +	HASHIT(hashit_count);
 +	struct vis_info *info;
++	struct vis_packet *packet;
 +	struct vis_info_entry *entries;
 +	struct net_device *net_dev = (struct net_device *)seq->private;
 +	struct bat_priv *bat_priv = netdev_priv(net_dev);
 +	HLIST_HEAD(vis_if_list);
 +	struct if_list_entry *entry;
 +	struct hlist_node *pos, *n;
 +	int i;
 +	char tmp_addr_str[ETH_STR_LEN];
 +	unsigned long flags;
 +	int vis_server = atomic_read(&bat_priv->vis_mode);
 +	size_t buff_pos, buf_size;
 +	char *buff;
 +
 +	if ((!bat_priv->primary_if) ||
 +	    (vis_server == VIS_TYPE_CLIENT_UPDATE))
 +		return 0;
 +
 +	buf_size = 1;
 +	/* Estimate length */
- 	spin_lock_irqsave(&vis_hash_lock, flags);
- 	while (hash_iterate(vis_hash, &hashit_count)) {
++	spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
++	while (hash_iterate(bat_priv->vis_hash, &hashit_count)) {
 +		info = hashit_count.bucket->data;
++		packet = (struct vis_packet *)info->skb_packet->data;
 +		entries = (struct vis_info_entry *)
- 			((char *)info + sizeof(struct vis_info));
++			  ((char *)packet + sizeof(struct vis_packet));
 +
- 		for (i = 0; i < info->packet.entries; i++) {
++		for (i = 0; i < packet->entries; i++) {
 +			if (entries[i].quality == 0)
 +				continue;
 +			vis_data_insert_interface(entries[i].src, &vis_if_list,
- 				compare_orig(entries[i].src,
- 						info->packet.vis_orig));
++				compare_orig(entries[i].src, packet->vis_orig));
 +		}
 +
 +		hlist_for_each_entry(entry, pos, &vis_if_list, list) {
- 			buf_size += 18 + 26 * info->packet.entries;
++			buf_size += 18 + 26 * packet->entries;
 +
 +			/* add primary/secondary records */
- 			if (compare_orig(entry->addr, info->packet.vis_orig))
++			if (compare_orig(entry->addr, packet->vis_orig))
 +				buf_size +=
 +					vis_data_count_prim_sec(&vis_if_list);
 +
 +			buf_size += 1;
 +		}
 +
 +		hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
 +			hlist_del(&entry->list);
 +			kfree(entry);
 +		}
 +	}
 +
 +	buff = kmalloc(buf_size, GFP_ATOMIC);
 +	if (!buff) {
- 		spin_unlock_irqrestore(&vis_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
 +		return -ENOMEM;
 +	}
 +	buff[0] = '\0';
 +	buff_pos = 0;
 +
- 	while (hash_iterate(vis_hash, &hashit)) {
++	while (hash_iterate(bat_priv->vis_hash, &hashit)) {
 +		info = hashit.bucket->data;
++		packet = (struct vis_packet *)info->skb_packet->data;
 +		entries = (struct vis_info_entry *)
- 			((char *)info + sizeof(struct vis_info));
++			  ((char *)packet + sizeof(struct vis_packet));
 +
- 		for (i = 0; i < info->packet.entries; i++) {
++		for (i = 0; i < packet->entries; i++) {
 +			if (entries[i].quality == 0)
 +				continue;
 +			vis_data_insert_interface(entries[i].src, &vis_if_list,
- 				compare_orig(entries[i].src,
- 						info->packet.vis_orig));
++				compare_orig(entries[i].src, packet->vis_orig));
 +		}
 +
 +		hlist_for_each_entry(entry, pos, &vis_if_list, list) {
 +			addr_to_string(tmp_addr_str, entry->addr);
 +			buff_pos += sprintf(buff + buff_pos, "%s,",
 +					    tmp_addr_str);
 +
- 			for (i = 0; i < info->packet.entries; i++)
++			for (i = 0; i < packet->entries; i++)
 +				buff_pos += vis_data_read_entry(buff + buff_pos,
 +								&entries[i],
 +								entry->addr,
 +								entry->primary);
 +
 +			/* add primary/secondary records */
- 			if (compare_orig(entry->addr, info->packet.vis_orig))
++			if (compare_orig(entry->addr, packet->vis_orig))
 +				buff_pos +=
 +					vis_data_read_prim_sec(buff + buff_pos,
 +							       &vis_if_list);
 +
 +			buff_pos += sprintf(buff + buff_pos, "\n");
 +		}
 +
 +		hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
 +			hlist_del(&entry->list);
 +			kfree(entry);
 +		}
 +	}
 +
- 	spin_unlock_irqrestore(&vis_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
 +
 +	seq_printf(seq, "%s", buff);
 +	kfree(buff);
 +
 +	return 0;
 +}
 +
 +/* add the info packet to the send list, if it was not
 + * already linked in. */
- static void send_list_add(struct vis_info *info)
++static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
 +{
 +	if (list_empty(&info->send_list)) {
 +		kref_get(&info->refcount);
- 		list_add_tail(&info->send_list, &send_list);
++		list_add_tail(&info->send_list, &bat_priv->vis_send_list);
 +	}
 +}
 +
 +/* delete the info packet from the send list, if it was
 + * linked in. */
 +static void send_list_del(struct vis_info *info)
 +{
 +	if (!list_empty(&info->send_list)) {
 +		list_del_init(&info->send_list);
 +		kref_put(&info->refcount, free_info);
 +	}
 +}
 +
 +/* tries to add one entry to the receive list. */
- static void recv_list_add(struct list_head *recv_list, char *mac)
++static void recv_list_add(struct bat_priv *bat_priv,
++			  struct list_head *recv_list, char *mac)
 +{
 +	struct recvlist_node *entry;
 +	unsigned long flags;
 +
 +	entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
 +	if (!entry)
 +		return;
 +
 +	memcpy(entry->mac, mac, ETH_ALEN);
- 	spin_lock_irqsave(&recv_list_lock, flags);
++	spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
 +	list_add_tail(&entry->list, recv_list);
- 	spin_unlock_irqrestore(&recv_list_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
 +}
 +
 +/* returns 1 if this mac is in the recv_list */
- static int recv_list_is_in(struct list_head *recv_list, char *mac)
++static int recv_list_is_in(struct bat_priv *bat_priv,
++			   struct list_head *recv_list, char *mac)
 +{
 +	struct recvlist_node *entry;
 +	unsigned long flags;
 +
- 	spin_lock_irqsave(&recv_list_lock, flags);
++	spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
 +	list_for_each_entry(entry, recv_list, list) {
 +		if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
- 			spin_unlock_irqrestore(&recv_list_lock, flags);
++			spin_unlock_irqrestore(&bat_priv->vis_list_lock,
++					       flags);
 +			return 1;
 +		}
 +	}
- 	spin_unlock_irqrestore(&recv_list_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
 +	return 0;
 +}
 +
 +/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
 + * broken.. ).	vis hash must be locked outside.  is_new is set when the packet
 + * is newer than old entries in the hash. */
- static struct vis_info *add_packet(struct vis_packet *vis_packet,
++static struct vis_info *add_packet(struct bat_priv *bat_priv,
++				   struct vis_packet *vis_packet,
 +				   int vis_info_len, int *is_new,
 +				   int make_broadcast)
 +{
 +	struct vis_info *info, *old_info;
++	struct vis_packet *search_packet, *old_packet;
 +	struct vis_info search_elem;
++	struct vis_packet *packet;
 +
 +	*is_new = 0;
 +	/* sanity check */
- 	if (vis_hash == NULL)
++	if (!bat_priv->vis_hash)
 +		return NULL;
 +
 +	/* see if the packet is already in vis_hash */
- 	memcpy(search_elem.packet.vis_orig, vis_packet->vis_orig, ETH_ALEN);
- 	old_info = hash_find(vis_hash, &search_elem);
++	search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet));
++	if (!search_elem.skb_packet)
++		return NULL;
++	search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
++						     sizeof(struct vis_packet));
++
++	memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
++	old_info = hash_find(bat_priv->vis_hash, &search_elem);
++	kfree_skb(search_elem.skb_packet);
 +
 +	if (old_info != NULL) {
++		old_packet = (struct vis_packet *)old_info->skb_packet->data;
 +		if (!seq_after(ntohl(vis_packet->seqno),
- 				ntohl(old_info->packet.seqno))) {
- 			if (old_info->packet.seqno == vis_packet->seqno) {
- 				recv_list_add(&old_info->recv_list,
++			       ntohl(old_packet->seqno))) {
++			if (old_packet->seqno == vis_packet->seqno) {
++				recv_list_add(bat_priv, &old_info->recv_list,
 +					      vis_packet->sender_orig);
 +				return old_info;
 +			} else {
 +				/* newer packet is already in hash. */
 +				return NULL;
 +			}
 +		}
 +		/* remove old entry */
- 		hash_remove(vis_hash, old_info);
++		hash_remove(bat_priv->vis_hash, old_info);
 +		send_list_del(old_info);
 +		kref_put(&old_info->refcount, free_info);
 +	}
 +
- 	info = kmalloc(sizeof(struct vis_info) + vis_info_len, GFP_ATOMIC);
- 	if (info == NULL)
++	info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
++	if (!info)
 +		return NULL;
 +
++	info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
++					 vis_info_len + sizeof(struct ethhdr));
++	if (!info->skb_packet) {
++		kfree(info);
++		return NULL;
++	}
++	skb_reserve(info->skb_packet, sizeof(struct ethhdr));
++	packet = (struct vis_packet *)skb_put(info->skb_packet,
++					      sizeof(struct vis_packet) +
++					      vis_info_len);
++
 +	kref_init(&info->refcount);
 +	INIT_LIST_HEAD(&info->send_list);
 +	INIT_LIST_HEAD(&info->recv_list);
 +	info->first_seen = jiffies;
- 	memcpy(&info->packet, vis_packet,
- 	       sizeof(struct vis_packet) + vis_info_len);
++	info->bat_priv = bat_priv;
++	memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
 +
 +	/* initialize and add new packet. */
 +	*is_new = 1;
 +
 +	/* Make it a broadcast packet, if required */
 +	if (make_broadcast)
- 		memcpy(info->packet.target_orig, broadcast_addr, ETH_ALEN);
++		memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
 +
 +	/* repair if entries is longer than packet. */
- 	if (info->packet.entries * sizeof(struct vis_info_entry) > vis_info_len)
- 		info->packet.entries = vis_info_len /
- 			sizeof(struct vis_info_entry);
++	if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
++		packet->entries = vis_info_len / sizeof(struct vis_info_entry);
 +
- 	recv_list_add(&info->recv_list, info->packet.sender_orig);
++	recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
 +
 +	/* try to add it */
- 	if (hash_add(vis_hash, info) < 0) {
++	if (hash_add(bat_priv->vis_hash, info) < 0) {
 +		/* did not work (for some reason) */
 +		kref_put(&old_info->refcount, free_info);
 +		info = NULL;
 +	}
 +
 +	return info;
 +}
 +
 +/* handle the server sync packet, forward if needed. */
 +void receive_server_sync_packet(struct bat_priv *bat_priv,
 +				struct vis_packet *vis_packet,
 +				int vis_info_len)
 +{
 +	struct vis_info *info;
 +	int is_new, make_broadcast;
 +	unsigned long flags;
 +	int vis_server = atomic_read(&bat_priv->vis_mode);
 +
 +	make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
 +
- 	spin_lock_irqsave(&vis_hash_lock, flags);
- 	info = add_packet(vis_packet, vis_info_len, &is_new, make_broadcast);
- 	if (info == NULL)
++	spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
++	info = add_packet(bat_priv, vis_packet, vis_info_len,
++			  &is_new, make_broadcast);
++	if (!info)
 +		goto end;
 +
 +	/* only if we are server ourselves and packet is newer than the one in
 +	 * hash.*/
 +	if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
- 		send_list_add(info);
++		send_list_add(bat_priv, info);
 +end:
- 	spin_unlock_irqrestore(&vis_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
 +}
 +
 +/* handle an incoming client update packet and schedule forward if needed. */
 +void receive_client_update_packet(struct bat_priv *bat_priv,
 +				  struct vis_packet *vis_packet,
 +				  int vis_info_len)
 +{
 +	struct vis_info *info;
++	struct vis_packet *packet;
 +	int is_new;
 +	unsigned long flags;
 +	int vis_server = atomic_read(&bat_priv->vis_mode);
 +	int are_target = 0;
 +
 +	/* clients shall not broadcast. */
 +	if (is_bcast(vis_packet->target_orig))
 +		return;
 +
 +	/* Are we the target for this VIS packet? */
 +	if (vis_server == VIS_TYPE_SERVER_SYNC	&&
 +	    is_my_mac(vis_packet->target_orig))
 +		are_target = 1;
 +
- 	spin_lock_irqsave(&vis_hash_lock, flags);
- 	info = add_packet(vis_packet, vis_info_len, &is_new, are_target);
- 	if (info == NULL)
++	spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
++	info = add_packet(bat_priv, vis_packet, vis_info_len,
++			  &is_new, are_target);
++
++	if (!info)
 +		goto end;
 +	/* note that outdated packets will be dropped at this point. */
 +
++	packet = (struct vis_packet *)info->skb_packet->data;
 +
 +	/* send only if we're the target server or ... */
 +	if (are_target && is_new) {
- 		info->packet.vis_type = VIS_TYPE_SERVER_SYNC;	/* upgrade! */
- 		send_list_add(info);
++		packet->vis_type = VIS_TYPE_SERVER_SYNC;	/* upgrade! */
++		send_list_add(bat_priv, info);
 +
 +		/* ... we're not the recipient (and thus need to forward). */
- 	} else if (!is_my_mac(info->packet.target_orig)) {
- 		send_list_add(info);
++	} else if (!is_my_mac(packet->target_orig)) {
++		send_list_add(bat_priv, info);
 +	}
++
 +end:
- 	spin_unlock_irqrestore(&vis_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
 +}
 +
 +/* Walk the originators and find the VIS server with the best tq. Set the packet
 + * address to its address and return the best_tq.
 + *
 + * Must be called with the originator hash locked */
- static int find_best_vis_server(struct vis_info *info)
++static int find_best_vis_server(struct bat_priv *bat_priv,
++				struct vis_info *info)
 +{
 +	HASHIT(hashit);
 +	struct orig_node *orig_node;
++	struct vis_packet *packet;
 +	int best_tq = -1;
 +
- 	while (hash_iterate(orig_hash, &hashit)) {
++	packet = (struct vis_packet *)info->skb_packet->data;
++
++	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
 +		orig_node = hashit.bucket->data;
- 		if ((orig_node != NULL) &&
- 		    (orig_node->router != NULL) &&
++		if ((orig_node) && (orig_node->router) &&
 +		    (orig_node->flags & VIS_SERVER) &&
 +		    (orig_node->router->tq_avg > best_tq)) {
 +			best_tq = orig_node->router->tq_avg;
- 			memcpy(info->packet.target_orig, orig_node->orig,
- 			       ETH_ALEN);
++			memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
 +		}
 +	}
 +	return best_tq;
 +}
 +
 +/* Return true if the vis packet is full. */
 +static bool vis_packet_full(struct vis_info *info)
 +{
- 	if (info->packet.entries + 1 >
- 	    (1000 - sizeof(struct vis_info)) / sizeof(struct vis_info_entry))
++	struct vis_packet *packet;
++	packet = (struct vis_packet *)info->skb_packet->data;
++
++	if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
++		< packet->entries + 1)
 +		return true;
 +	return false;
 +}
 +
 +/* generates a packet of own vis data,
 + * returns 0 on success, -1 if no packet could be generated */
 +static int generate_vis_packet(struct bat_priv *bat_priv)
 +{
 +	HASHIT(hashit_local);
 +	HASHIT(hashit_global);
 +	struct orig_node *orig_node;
- 	struct vis_info *info = (struct vis_info *)my_vis_info;
- 	struct vis_info_entry *entry, *entry_array;
++	struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
++	struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
++	struct vis_info_entry *entry;
 +	struct hna_local_entry *hna_local_entry;
 +	int best_tq = -1;
 +	unsigned long flags;
 +
 +	info->first_seen = jiffies;
- 	info->packet.vis_type = atomic_read(&bat_priv->vis_mode);
++	packet->vis_type = atomic_read(&bat_priv->vis_mode);
++
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
++	memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
++	packet->ttl = TTL;
++	packet->seqno = htonl(ntohl(packet->seqno) + 1);
++	packet->entries = 0;
++	skb_trim(info->skb_packet, sizeof(struct vis_packet));
 +
- 	spin_lock_irqsave(&orig_hash_lock, flags);
- 	memcpy(info->packet.target_orig, broadcast_addr, ETH_ALEN);
- 	info->packet.ttl = TTL;
- 	info->packet.seqno = htonl(ntohl(info->packet.seqno) + 1);
- 	info->packet.entries = 0;
++	if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
++		best_tq = find_best_vis_server(bat_priv, info);
 +
- 	if (info->packet.vis_type == VIS_TYPE_CLIENT_UPDATE) {
- 		best_tq = find_best_vis_server(info);
 +		if (best_tq < 0) {
- 			spin_unlock_irqrestore(&orig_hash_lock, flags);
++			spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
++					       flags);
 +			return -1;
 +		}
 +	}
 +
- 	entry_array = (struct vis_info_entry *)
- 		((char *)info + sizeof(struct vis_info));
- 
- 	while (hash_iterate(orig_hash, &hashit_global)) {
++	while (hash_iterate(bat_priv->orig_hash, &hashit_global)) {
 +		orig_node = hashit_global.bucket->data;
- 		if (orig_node->router != NULL
- 			&& compare_orig(orig_node->router->addr,
- 					orig_node->orig)
- 			&& (orig_node->router->if_incoming->if_status ==
- 								IF_ACTIVE)
- 		    && orig_node->router->tq_avg > 0) {
- 
- 			/* fill one entry into buffer. */
- 			entry = &entry_array[info->packet.entries];
- 			memcpy(entry->src,
- 			     orig_node->router->if_incoming->net_dev->dev_addr,
- 			       ETH_ALEN);
- 			memcpy(entry->dest, orig_node->orig, ETH_ALEN);
- 			entry->quality = orig_node->router->tq_avg;
- 			info->packet.entries++;
- 
- 			if (vis_packet_full(info)) {
- 				spin_unlock_irqrestore(&orig_hash_lock, flags);
- 				return 0;
- 			}
++
++		if (!orig_node->router)
++			continue;
++
++		if (!compare_orig(orig_node->router->addr, orig_node->orig))
++			continue;
++
++		if (orig_node->router->if_incoming->if_status != IF_ACTIVE)
++			continue;
++
++		if (orig_node->router->tq_avg < 1)
++			continue;
++
++		/* fill one entry into buffer. */
++		entry = (struct vis_info_entry *)
++				skb_put(info->skb_packet, sizeof(*entry));
++		memcpy(entry->src,
++		       orig_node->router->if_incoming->net_dev->dev_addr,
++		       ETH_ALEN);
++		memcpy(entry->dest, orig_node->orig, ETH_ALEN);
++		entry->quality = orig_node->router->tq_avg;
++		packet->entries++;
++
++		if (vis_packet_full(info)) {
++			spin_unlock_irqrestore(
++					&bat_priv->orig_hash_lock, flags);
++			return 0;
 +		}
 +	}
 +
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
- 	spin_lock_irqsave(&hna_local_hash_lock, flags);
- 	while (hash_iterate(hna_local_hash, &hashit_local)) {
++	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
++	while (hash_iterate(bat_priv->hna_local_hash, &hashit_local)) {
 +		hna_local_entry = hashit_local.bucket->data;
- 		entry = &entry_array[info->packet.entries];
++		entry = (struct vis_info_entry *)skb_put(info->skb_packet,
++							 sizeof(*entry));
 +		memset(entry->src, 0, ETH_ALEN);
 +		memcpy(entry->dest, hna_local_entry->addr, ETH_ALEN);
 +		entry->quality = 0; /* 0 means HNA */
- 		info->packet.entries++;
++		packet->entries++;
 +
 +		if (vis_packet_full(info)) {
- 			spin_unlock_irqrestore(&hna_local_hash_lock, flags);
++			spin_unlock_irqrestore(&bat_priv->hna_lhash_lock,
++					       flags);
 +			return 0;
 +		}
 +	}
- 	spin_unlock_irqrestore(&hna_local_hash_lock, flags);
++
++	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
 +	return 0;
 +}
 +
 +/* free old vis packets. Must be called with this vis_hash_lock
 + * held */
- static void purge_vis_packets(void)
++static void purge_vis_packets(struct bat_priv *bat_priv)
 +{
 +	HASHIT(hashit);
 +	struct vis_info *info;
 +
- 	while (hash_iterate(vis_hash, &hashit)) {
++	while (hash_iterate(bat_priv->vis_hash, &hashit)) {
 +		info = hashit.bucket->data;
- 		if (info == my_vis_info)	/* never purge own data. */
++
++		/* never purge own data. */
++		if (info == bat_priv->my_vis_info)
 +			continue;
++
 +		if (time_after(jiffies,
 +			       info->first_seen + VIS_TIMEOUT * HZ)) {
- 			hash_remove_bucket(vis_hash, &hashit);
++			hash_remove_bucket(bat_priv->vis_hash, &hashit);
 +			send_list_del(info);
 +			kref_put(&info->refcount, free_info);
 +		}
 +	}
 +}
 +
- static void broadcast_vis_packet(struct vis_info *info, int packet_length)
++static void broadcast_vis_packet(struct bat_priv *bat_priv,
++				 struct vis_info *info)
 +{
 +	HASHIT(hashit);
 +	struct orig_node *orig_node;
++	struct vis_packet *packet;
++	struct sk_buff *skb;
 +	unsigned long flags;
 +	struct batman_if *batman_if;
 +	uint8_t dstaddr[ETH_ALEN];
 +
- 	spin_lock_irqsave(&orig_hash_lock, flags);
++
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
++	packet = (struct vis_packet *)info->skb_packet->data;
 +
 +	/* send to all routers in range. */
- 	while (hash_iterate(orig_hash, &hashit)) {
++	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
 +		orig_node = hashit.bucket->data;
 +
 +		/* if it's a vis server and reachable, send it. */
 +		if ((!orig_node) || (!orig_node->router))
 +			continue;
 +		if (!(orig_node->flags & VIS_SERVER))
 +			continue;
 +		/* don't send it if we already received the packet from
 +		 * this node. */
- 		if (recv_list_is_in(&info->recv_list, orig_node->orig))
++		if (recv_list_is_in(bat_priv, &info->recv_list,
++							orig_node->orig))
 +			continue;
 +
- 		memcpy(info->packet.target_orig, orig_node->orig, ETH_ALEN);
++		memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
 +		batman_if = orig_node->router->if_incoming;
 +		memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- 		spin_unlock_irqrestore(&orig_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +
- 		send_raw_packet((unsigned char *)&info->packet,
- 				packet_length, batman_if, dstaddr);
++		skb = skb_clone(info->skb_packet, GFP_ATOMIC);
++		if (skb)
++			send_skb_packet(skb, batman_if, dstaddr);
 +
- 		spin_lock_irqsave(&orig_hash_lock, flags);
++		spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
 +
 +	}
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
- 	memcpy(info->packet.target_orig, broadcast_addr, ETH_ALEN);
++
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +}
 +
- static void unicast_vis_packet(struct vis_info *info, int packet_length)
++static void unicast_vis_packet(struct bat_priv *bat_priv,
++			       struct vis_info *info)
 +{
 +	struct orig_node *orig_node;
++	struct sk_buff *skb;
++	struct vis_packet *packet;
 +	unsigned long flags;
 +	struct batman_if *batman_if;
 +	uint8_t dstaddr[ETH_ALEN];
 +
- 	spin_lock_irqsave(&orig_hash_lock, flags);
- 	orig_node = ((struct orig_node *)
- 		     hash_find(orig_hash, info->packet.target_orig));
++	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
++	packet = (struct vis_packet *)info->skb_packet->data;
++	orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
++						   packet->target_orig));
 +
 +	if ((!orig_node) || (!orig_node->router))
 +		goto out;
 +
 +	/* don't lock while sending the packets ... we therefore
 +	 * copy the required data before sending */
 +	batman_if = orig_node->router->if_incoming;
 +	memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
++
++	skb = skb_clone(info->skb_packet, GFP_ATOMIC);
++	if (skb)
++		send_skb_packet(skb, batman_if, dstaddr);
 +
- 	send_raw_packet((unsigned char *)&info->packet,
- 			packet_length, batman_if, dstaddr);
 +	return;
 +
 +out:
- 	spin_unlock_irqrestore(&orig_hash_lock, flags);
++	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
 +}
 +
 +/* only send one vis packet. called from send_vis_packets() */
- static void send_vis_packet(struct vis_info *info)
++static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
 +{
- 	int packet_length;
++	struct vis_packet *packet;
 +
- 	if (info->packet.ttl < 2) {
- 		pr_warning("Error - can't send vis packet: ttl exceeded\n");
++	packet = (struct vis_packet *)info->skb_packet->data;
++	if (packet->ttl < 2) {
++		pr_debug("Error - can't send vis packet: ttl exceeded\n");
 +		return;
 +	}
 +
- 	memcpy(info->packet.sender_orig, main_if_addr, ETH_ALEN);
- 	info->packet.ttl--;
- 
- 	packet_length = sizeof(struct vis_packet) +
- 		info->packet.entries * sizeof(struct vis_info_entry);
++	memcpy(packet->sender_orig, bat_priv->primary_if->net_dev->dev_addr,
++	       ETH_ALEN);
++	packet->ttl--;
 +
- 	if (is_bcast(info->packet.target_orig))
- 		broadcast_vis_packet(info, packet_length);
++	if (is_bcast(packet->target_orig))
++		broadcast_vis_packet(bat_priv, info);
 +	else
- 		unicast_vis_packet(info, packet_length);
- 	info->packet.ttl++; /* restore TTL */
++		unicast_vis_packet(bat_priv, info);
++	packet->ttl++; /* restore TTL */
 +}
 +
 +/* called from timer; send (and maybe generate) vis packet. */
 +static void send_vis_packets(struct work_struct *work)
 +{
++	struct delayed_work *delayed_work =
++		container_of(work, struct delayed_work, work);
++	struct bat_priv *bat_priv =
++		container_of(delayed_work, struct bat_priv, vis_work);
 +	struct vis_info *info, *temp;
 +	unsigned long flags;
- 	/* FIXME: each batman_if will be attached to a softif */
- 	struct bat_priv *bat_priv = netdev_priv(soft_device);
 +
- 	spin_lock_irqsave(&vis_hash_lock, flags);
- 
- 	purge_vis_packets();
++	spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
++	purge_vis_packets(bat_priv);
 +
 +	if (generate_vis_packet(bat_priv) == 0) {
 +		/* schedule if generation was successful */
- 		send_list_add(my_vis_info);
++		send_list_add(bat_priv, bat_priv->my_vis_info);
 +	}
 +
- 	list_for_each_entry_safe(info, temp, &send_list, send_list) {
++	list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
++				 send_list) {
 +
 +		kref_get(&info->refcount);
- 		spin_unlock_irqrestore(&vis_hash_lock, flags);
++		spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
 +
- 		send_vis_packet(info);
++		if (bat_priv->primary_if)
++			send_vis_packet(bat_priv, info);
 +
- 		spin_lock_irqsave(&vis_hash_lock, flags);
++		spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
 +		send_list_del(info);
 +		kref_put(&info->refcount, free_info);
 +	}
- 	spin_unlock_irqrestore(&vis_hash_lock, flags);
- 	start_vis_timer();
++	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
++	start_vis_timer(bat_priv);
 +}
- static DECLARE_DELAYED_WORK(vis_timer_wq, send_vis_packets);
 +
 +/* init the vis server. this may only be called when if_list is already
 + * initialized (e.g. bat0 is initialized, interfaces have been added) */
- int vis_init(void)
++int vis_init(struct bat_priv *bat_priv)
 +{
++	struct vis_packet *packet;
 +	unsigned long flags;
- 	if (vis_hash)
++
++	if (bat_priv->vis_hash)
 +		return 1;
 +
- 	spin_lock_irqsave(&vis_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
 +
- 	vis_hash = hash_new(256, vis_info_cmp, vis_info_choose);
- 	if (!vis_hash) {
++	bat_priv->vis_hash = hash_new(256, vis_info_cmp, vis_info_choose);
++	if (!bat_priv->vis_hash) {
 +		pr_err("Can't initialize vis_hash\n");
 +		goto err;
 +	}
 +
- 	my_vis_info = kmalloc(1000, GFP_ATOMIC);
- 	if (!my_vis_info) {
++	bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
++	if (!bat_priv->my_vis_info) {
 +		pr_err("Can't initialize vis packet\n");
 +		goto err;
 +	}
 +
++	bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
++						sizeof(struct vis_packet) +
++						MAX_VIS_PACKET_SIZE +
++						sizeof(struct ethhdr));
++	if (!bat_priv->my_vis_info->skb_packet)
++		goto free_info;
++
++	skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
++	packet = (struct vis_packet *)skb_put(
++					bat_priv->my_vis_info->skb_packet,
++					sizeof(struct vis_packet));
++
 +	/* prefill the vis info */
- 	my_vis_info->first_seen = jiffies - msecs_to_jiffies(VIS_INTERVAL);
- 	INIT_LIST_HEAD(&my_vis_info->recv_list);
- 	INIT_LIST_HEAD(&my_vis_info->send_list);
- 	kref_init(&my_vis_info->refcount);
- 	my_vis_info->packet.version = COMPAT_VERSION;
- 	my_vis_info->packet.packet_type = BAT_VIS;
- 	my_vis_info->packet.ttl = TTL;
- 	my_vis_info->packet.seqno = 0;
- 	my_vis_info->packet.entries = 0;
- 
- 	INIT_LIST_HEAD(&send_list);
- 
- 	memcpy(my_vis_info->packet.vis_orig, main_if_addr, ETH_ALEN);
- 	memcpy(my_vis_info->packet.sender_orig, main_if_addr, ETH_ALEN);
- 
- 	if (hash_add(vis_hash, my_vis_info) < 0) {
++	bat_priv->my_vis_info->first_seen = jiffies -
++						msecs_to_jiffies(VIS_INTERVAL);
++	INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
++	INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
++	kref_init(&bat_priv->my_vis_info->refcount);
++	bat_priv->my_vis_info->bat_priv = bat_priv;
++	packet->version = COMPAT_VERSION;
++	packet->packet_type = BAT_VIS;
++	packet->ttl = TTL;
++	packet->seqno = 0;
++	packet->entries = 0;
++
++	INIT_LIST_HEAD(&bat_priv->vis_send_list);
++
++	if (hash_add(bat_priv->vis_hash, bat_priv->my_vis_info) < 0) {
 +		pr_err("Can't add own vis packet into hash\n");
 +		/* not in hash, need to remove it manually. */
- 		kref_put(&my_vis_info->refcount, free_info);
++		kref_put(&bat_priv->my_vis_info->refcount, free_info);
 +		goto err;
 +	}
 +
- 	spin_unlock_irqrestore(&vis_hash_lock, flags);
- 	start_vis_timer();
++	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
++	start_vis_timer(bat_priv);
 +	return 1;
 +
++free_info:
++	kfree(bat_priv->my_vis_info);
++	bat_priv->my_vis_info = NULL;
 +err:
- 	spin_unlock_irqrestore(&vis_hash_lock, flags);
- 	vis_quit();
++	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
++	vis_quit(bat_priv);
 +	return 0;
 +}
 +
 +/* Decrease the reference count on a hash item info */
- static void free_info_ref(void *data)
++static void free_info_ref(void *data, void *arg)
 +{
 +	struct vis_info *info = data;
 +
 +	send_list_del(info);
 +	kref_put(&info->refcount, free_info);
 +}
 +
 +/* shutdown vis-server */
- void vis_quit(void)
++void vis_quit(struct bat_priv *bat_priv)
 +{
 +	unsigned long flags;
- 	if (!vis_hash)
++	if (!bat_priv->vis_hash)
 +		return;
 +
- 	cancel_delayed_work_sync(&vis_timer_wq);
++	cancel_delayed_work_sync(&bat_priv->vis_work);
 +
- 	spin_lock_irqsave(&vis_hash_lock, flags);
++	spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
 +	/* properly remove, kill timers ... */
- 	hash_delete(vis_hash, free_info_ref);
- 	vis_hash = NULL;
- 	my_vis_info = NULL;
- 	spin_unlock_irqrestore(&vis_hash_lock, flags);
++	hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
++	bat_priv->vis_hash = NULL;
++	bat_priv->my_vis_info = NULL;
++	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
 +}
 +
 +/* schedule packets for (re)transmission */
- static void start_vis_timer(void)
++static void start_vis_timer(struct bat_priv *bat_priv)
 +{
- 	queue_delayed_work(bat_event_workqueue, &vis_timer_wq,
- 			   (VIS_INTERVAL * HZ) / 1000);
++	INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
++	queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
++			   msecs_to_jiffies(VIS_INTERVAL));
 +}
diff --combined drivers/staging/batman-adv/vis.h
index bb13bf1,2c3b330..2c3b330
--- a/drivers/staging/batman-adv/vis.h
+++ b/drivers/staging/batman-adv/vis.h
@@@ -24,29 -24,6 +24,6 @@@
  
  #define VIS_TIMEOUT		200	/* timeout of vis packets in seconds */
  
- struct vis_info {
- 	unsigned long       first_seen;
- 	struct list_head    recv_list;
- 			    /* list of server-neighbors we received a vis-packet
- 			     * from.  we should not reply to them. */
- 	struct list_head send_list;
- 	struct kref refcount;
- 	/* this packet might be part of the vis send queue. */
- 	struct vis_packet packet;
- 	/* vis_info may follow here*/
- } __attribute__((packed));
- 
- struct vis_info_entry {
- 	uint8_t  src[ETH_ALEN];
- 	uint8_t  dest[ETH_ALEN];
- 	uint8_t  quality;	/* quality = 0 means HNA */
- } __attribute__((packed));
- 
- struct recvlist_node {
- 	struct list_head list;
- 	uint8_t mac[ETH_ALEN];
- };
- 
  int vis_seq_print_text(struct seq_file *seq, void *offset);
  void receive_server_sync_packet(struct bat_priv *bat_priv,
  				struct vis_packet *vis_packet,
@@@ -54,7 -31,7 +31,7 @@@
  void receive_client_update_packet(struct bat_priv *bat_priv,
  				  struct vis_packet *vis_packet,
  				  int vis_info_len);
- int vis_init(void);
- void vis_quit(void);
+ int vis_init(struct bat_priv *bat_priv);
+ void vis_quit(struct bat_priv *bat_priv);
  
  #endif /* _NET_BATMAN_ADV_VIS_H_ */

-- 
linux integration


More information about the commits mailing list