The following commit has been merged in the merge/master branch: commit 8e28663f344c60da7b34bc27ac9076b73eee874d Merge: 1b756056c6875c1c3e1c47128461fd8e496d73ab b82d11e243ec144c515e7d04e8e8c31b111a060d Author: Sven Eckelmann sven@narfation.org Date: Sun Jun 17 10:52:17 2012 +0200
Merge tag 'v2012.2.0' into merge/master
Conflicts: net/batman-adv/CHANGELOG net/batman-adv/Makefile net/batman-adv/Makefile.kbuild net/batman-adv/README net/batman-adv/README.external net/batman-adv/gen-compat-autoconf.sh
diff --combined Documentation/networking/batman-adv.txt index 82c075f,75a5923..75a5923 --- a/Documentation/networking/batman-adv.txt +++ b/Documentation/networking/batman-adv.txt @@@ -202,8 -202,7 +202,7 @@@ abled during run time. Following log_l 2 - Enable messages related to route added / changed / deleted 4 - Enable messages related to translation table operations 8 - Enable messages related to bridge loop avoidance - 16 - Enable messaged related to DAT, ARP snooping and parsing - 31 - Enable all messages + 15 - enable all messages
The debug output can be changed at runtime using the file /sys/class/net/bat0/mesh/log_level. e.g. diff --combined net/batman-adv/Kconfig index 250e0b5,0000000..53f5244 mode 100644,000000..100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig @@@ -1,45 -1,0 +1,35 @@@ +# +# B.A.T.M.A.N meshing protocol +# + +config BATMAN_ADV + tristate "B.A.T.M.A.N. Advanced Meshing Protocol" + depends on NET + select CRC16 + default n + help + B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is + a routing protocol for multi-hop ad-hoc mesh networks. The + networks may be wired or wireless. See + http://www.open-mesh.org/ for more information and user space + tools. + +config BATMAN_ADV_BLA + bool "Bridge Loop Avoidance" + depends on BATMAN_ADV && INET + default y + help + This option enables BLA (Bridge Loop Avoidance), a mechanism + to avoid Ethernet frames looping when mesh nodes are connected + to both the same LAN and the same mesh. If you will never use + more than one mesh node in the same LAN, you can safely remove + this feature and save some space. + - config BATMAN_ADV_DAT - bool "Distributed ARP Table" - depends on BATMAN_ADV && INET - default n - help - This option enables DAT (Distributed ARP Table), a DHT based - mechanism that increases ARP reliability on sparse wireless - mesh networks. If you think that your network does not need - this option you can safely remove it and save some space. - +config BATMAN_ADV_DEBUG + bool "B.A.T.M.A.N. debugging" + depends on BATMAN_ADV + help + This is an option for use by developers; most people should + say N here. This enables compilation of support for + outputting debugging information to the kernel log. The + output is controlled via the module parameter debug. diff --combined net/batman-adv/Makefile index ad002cd,6d5c194..6d5c194 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@@ -24,7 -24,6 +24,6 @@@ batman-adv-y += bat_iv_ogm. batman-adv-y += bat_sysfs.o batman-adv-y += bitarray.o batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o - batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o batman-adv-y += gateway_client.o batman-adv-y += gateway_common.o batman-adv-y += hard-interface.o diff --combined net/batman-adv/bat_iv_ogm.c index 92fad91,0000000..dc53798 mode 100644,000000..100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@@ -1,1265 -1,0 +1,1265 @@@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "translation-table.h" +#include "ring_buffer.h" +#include "originator.h" +#include "routing.h" +#include "gateway_common.h" +#include "gateway_client.h" +#include "hard-interface.h" +#include "send.h" +#include "bat_algo.h" + +static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface, + const uint8_t *neigh_addr, + struct orig_node *orig_node, + struct orig_node *orig_neigh, + uint32_t seqno) +{ + struct neigh_node *neigh_node; + + neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno); + if (!neigh_node) + goto out; + + INIT_LIST_HEAD(&neigh_node->bonding_list); + + neigh_node->orig_node = orig_neigh; + neigh_node->if_incoming = hard_iface; + + spin_lock_bh(&orig_node->neigh_list_lock); + hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); + spin_unlock_bh(&orig_node->neigh_list_lock); + +out: + return neigh_node; +} + +static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface) +{ + struct batman_ogm_packet *batman_ogm_packet; + uint32_t random_seqno; - int res = -ENOMEM; ++ int res = -1; + + /* randomize initial seqno to avoid collision */ + get_random_bytes(&random_seqno, sizeof(random_seqno)); + atomic_set(&hard_iface->seqno, random_seqno); + + hard_iface->packet_len = BATMAN_OGM_HLEN; + hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); + + if (!hard_iface->packet_buff) + goto out; + + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; + batman_ogm_packet->header.packet_type = BAT_IV_OGM; + batman_ogm_packet->header.version = COMPAT_VERSION; + batman_ogm_packet->header.ttl = 2; + batman_ogm_packet->flags = NO_FLAGS; + batman_ogm_packet->tq = TQ_MAX_VALUE; + batman_ogm_packet->tt_num_changes = 0; + batman_ogm_packet->ttvn = 0; + + res = 0; + +out: + return res; +} + +static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface) +{ + kfree(hard_iface->packet_buff); + hard_iface->packet_buff = NULL; +} + +static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface) +{ + struct batman_ogm_packet *batman_ogm_packet; + + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; + memcpy(batman_ogm_packet->orig, + hard_iface->net_dev->dev_addr, ETH_ALEN); + memcpy(batman_ogm_packet->prev_sender, + hard_iface->net_dev->dev_addr, ETH_ALEN); +} + +static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface) +{ + struct batman_ogm_packet *batman_ogm_packet; + + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; + batman_ogm_packet->flags = PRIMARIES_FIRST_HOP; + batman_ogm_packet->header.ttl = TTL; +} + +/* when do we schedule our own ogm to be sent */ +static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv) +{ + return jiffies + msecs_to_jiffies( + atomic_read(&bat_priv->orig_interval) - + JITTER + (random32() % 2*JITTER)); +} + +/* when do we schedule a ogm packet to be sent */ +static unsigned long bat_iv_ogm_fwd_send_time(void) +{ + return jiffies + msecs_to_jiffies(random32() % (JITTER/2)); +} + +/* apply hop penalty for a normal link */ +static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv) +{ + int hop_penalty = atomic_read(&bat_priv->hop_penalty); + return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE); +} + +/* is there another aggregated packet here? */ +static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len, + int tt_num_changes) +{ + int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes); + + return (next_buff_pos <= packet_len) && + (next_buff_pos <= MAX_AGGREGATION_BYTES); +} + +/* send a batman ogm to a given interface */ +static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, + struct hard_iface *hard_iface) +{ + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + char *fwd_str; + uint8_t packet_num; + int16_t buff_pos; + struct batman_ogm_packet *batman_ogm_packet; + struct sk_buff *skb; + + if (hard_iface->if_status != IF_ACTIVE) + return; + + packet_num = 0; + buff_pos = 0; + batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data; + + /* adjust all flags and log packets */ + while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, + batman_ogm_packet->tt_num_changes)) { + + /* we might have aggregated direct link packets with an + * ordinary base packet */ + if ((forw_packet->direct_link_flags & (1 << packet_num)) && + (forw_packet->if_incoming == hard_iface)) + batman_ogm_packet->flags |= DIRECTLINK; + else + batman_ogm_packet->flags &= ~DIRECTLINK; + + fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ? + "Sending own" : + "Forwarding")); + bat_dbg(DBG_BATMAN, bat_priv, + "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", + fwd_str, (packet_num > 0 ? "aggregated " : ""), + batman_ogm_packet->orig, + ntohl(batman_ogm_packet->seqno), + batman_ogm_packet->tq, batman_ogm_packet->header.ttl, + (batman_ogm_packet->flags & DIRECTLINK ? + "on" : "off"), + batman_ogm_packet->ttvn, hard_iface->net_dev->name, + hard_iface->net_dev->dev_addr); + + buff_pos += BATMAN_OGM_HLEN + + tt_len(batman_ogm_packet->tt_num_changes); + packet_num++; + batman_ogm_packet = (struct batman_ogm_packet *) + (forw_packet->skb->data + buff_pos); + } + + /* create clone because function is called more than once */ + skb = skb_clone(forw_packet->skb, GFP_ATOMIC); + if (skb) + send_skb_packet(skb, hard_iface, broadcast_addr); +} + +/* send a batman ogm packet */ +static void bat_iv_ogm_emit(struct forw_packet *forw_packet) +{ + struct hard_iface *hard_iface; + struct net_device *soft_iface; + struct bat_priv *bat_priv; + struct hard_iface *primary_if = NULL; + struct batman_ogm_packet *batman_ogm_packet; + unsigned char directlink; + + batman_ogm_packet = (struct batman_ogm_packet *) + (forw_packet->skb->data); + directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); + + if (!forw_packet->if_incoming) { + pr_err("Error - can't forward packet: incoming iface not specified\n"); + goto out; + } + + soft_iface = forw_packet->if_incoming->soft_iface; + bat_priv = netdev_priv(soft_iface); + + if (forw_packet->if_incoming->if_status != IF_ACTIVE) + goto out; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + /* multihomed peer assumed */ + /* non-primary OGMs are only broadcasted on their interface */ + if ((directlink && (batman_ogm_packet->header.ttl == 1)) || + (forw_packet->own && (forw_packet->if_incoming != primary_if))) { + + /* FIXME: what about aggregated packets ? */ + bat_dbg(DBG_BATMAN, bat_priv, + "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n", + (forw_packet->own ? "Sending own" : "Forwarding"), + batman_ogm_packet->orig, + ntohl(batman_ogm_packet->seqno), + batman_ogm_packet->header.ttl, + forw_packet->if_incoming->net_dev->name, + forw_packet->if_incoming->net_dev->dev_addr); + + /* skb is only used once and than forw_packet is free'd */ + send_skb_packet(forw_packet->skb, forw_packet->if_incoming, + broadcast_addr); + forw_packet->skb = NULL; + + goto out; + } + + /* broadcast on every interface */ + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if (hard_iface->soft_iface != soft_iface) + continue; + + bat_iv_ogm_send_to_if(forw_packet, hard_iface); + } + rcu_read_unlock(); + +out: + if (primary_if) + hardif_free_ref(primary_if); +} + +/* return true if new_packet can be aggregated with forw_packet */ +static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet + *new_batman_ogm_packet, + struct bat_priv *bat_priv, + int packet_len, unsigned long send_time, + bool directlink, + const struct hard_iface *if_incoming, + const struct forw_packet *forw_packet) +{ + struct batman_ogm_packet *batman_ogm_packet; + int aggregated_bytes = forw_packet->packet_len + packet_len; + struct hard_iface *primary_if = NULL; + bool res = false; + + batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data; + + /** + * we can aggregate the current packet to this aggregated packet + * if: + * + * - the send time is within our MAX_AGGREGATION_MS time + * - the resulting packet wont be bigger than + * MAX_AGGREGATION_BYTES + */ + + if (time_before(send_time, forw_packet->send_time) && + time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS), + forw_packet->send_time) && + (aggregated_bytes <= MAX_AGGREGATION_BYTES)) { + + /** + * check aggregation compatibility + * -> direct link packets are broadcasted on + * their interface only + * -> aggregate packet if the current packet is + * a "global" packet as well as the base + * packet + */ + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + /* packets without direct link flag and high TTL + * are flooded through the net */ + if ((!directlink) && + (!(batman_ogm_packet->flags & DIRECTLINK)) && + (batman_ogm_packet->header.ttl != 1) && + + /* own packets originating non-primary + * interfaces leave only that interface */ + ((!forw_packet->own) || + (forw_packet->if_incoming == primary_if))) { + res = true; + goto out; + } + + /* if the incoming packet is sent via this one + * interface only - we still can aggregate */ + if ((directlink) && + (new_batman_ogm_packet->header.ttl == 1) && + (forw_packet->if_incoming == if_incoming) && + + /* packets from direct neighbors or + * own secondary interface packets + * (= secondary interface packets in general) */ + (batman_ogm_packet->flags & DIRECTLINK || + (forw_packet->own && + forw_packet->if_incoming != primary_if))) { + res = true; + goto out; + } + } + +out: + if (primary_if) + hardif_free_ref(primary_if); + return res; +} + +/* create a new aggregated packet and add this packet to it */ +static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff, + int packet_len, unsigned long send_time, + bool direct_link, + struct hard_iface *if_incoming, + int own_packet) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct forw_packet *forw_packet_aggr; + unsigned char *skb_buff; + + if (!atomic_inc_not_zero(&if_incoming->refcount)) + return; + + /* own packet should always be scheduled */ + if (!own_packet) { + if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) { + bat_dbg(DBG_BATMAN, bat_priv, + "batman packet queue full\n"); + goto out; + } + } + + forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC); + if (!forw_packet_aggr) { + if (!own_packet) + atomic_inc(&bat_priv->batman_queue_left); + goto out; + } + + if ((atomic_read(&bat_priv->aggregated_ogms)) && + (packet_len < MAX_AGGREGATION_BYTES)) + forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES + + ETH_HLEN); + else + forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN); + + if (!forw_packet_aggr->skb) { + if (!own_packet) + atomic_inc(&bat_priv->batman_queue_left); + kfree(forw_packet_aggr); + goto out; + } + skb_reserve(forw_packet_aggr->skb, ETH_HLEN); + + INIT_HLIST_NODE(&forw_packet_aggr->list); + + skb_buff = skb_put(forw_packet_aggr->skb, packet_len); + forw_packet_aggr->packet_len = packet_len; + memcpy(skb_buff, packet_buff, packet_len); + + forw_packet_aggr->own = own_packet; + forw_packet_aggr->if_incoming = if_incoming; + forw_packet_aggr->num_packets = 0; + forw_packet_aggr->direct_link_flags = NO_FLAGS; + forw_packet_aggr->send_time = send_time; + + /* save packet direct link flag status */ + if (direct_link) + forw_packet_aggr->direct_link_flags |= 1; + + /* add new packet to packet list */ + spin_lock_bh(&bat_priv->forw_bat_list_lock); + hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list); + spin_unlock_bh(&bat_priv->forw_bat_list_lock); + + /* start timer for this packet */ + INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, + send_outstanding_bat_ogm_packet); + queue_delayed_work(bat_event_workqueue, + &forw_packet_aggr->delayed_work, + send_time - jiffies); + + return; +out: + hardif_free_ref(if_incoming); +} + +/* aggregate a new packet into the existing ogm packet */ +static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr, + const unsigned char *packet_buff, + int packet_len, bool direct_link) +{ + unsigned char *skb_buff; + + skb_buff = skb_put(forw_packet_aggr->skb, packet_len); + memcpy(skb_buff, packet_buff, packet_len); + forw_packet_aggr->packet_len += packet_len; + forw_packet_aggr->num_packets++; + + /* save packet direct link flag status */ + if (direct_link) + forw_packet_aggr->direct_link_flags |= + (1 << forw_packet_aggr->num_packets); +} + +static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv, + unsigned char *packet_buff, + int packet_len, struct hard_iface *if_incoming, + int own_packet, unsigned long send_time) +{ + /** + * _aggr -> pointer to the packet we want to aggregate with + * _pos -> pointer to the position in the queue + */ + struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL; + struct hlist_node *tmp_node; + struct batman_ogm_packet *batman_ogm_packet; + bool direct_link; + + batman_ogm_packet = (struct batman_ogm_packet *)packet_buff; + direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0; + + /* find position for the packet in the forward queue */ + spin_lock_bh(&bat_priv->forw_bat_list_lock); + /* own packets are not to be aggregated */ + if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { + hlist_for_each_entry(forw_packet_pos, tmp_node, + &bat_priv->forw_bat_list, list) { + if (bat_iv_ogm_can_aggregate(batman_ogm_packet, + bat_priv, packet_len, + send_time, direct_link, + if_incoming, + forw_packet_pos)) { + forw_packet_aggr = forw_packet_pos; + break; + } + } + } + + /* nothing to aggregate with - either aggregation disabled or no + * suitable aggregation packet found */ + if (!forw_packet_aggr) { + /* the following section can run without the lock */ + spin_unlock_bh(&bat_priv->forw_bat_list_lock); + + /** + * if we could not aggregate this packet with one of the others + * we hold it back for a while, so that it might be aggregated + * later on + */ + if ((!own_packet) && + (atomic_read(&bat_priv->aggregated_ogms))) + send_time += msecs_to_jiffies(MAX_AGGREGATION_MS); + + bat_iv_ogm_aggregate_new(packet_buff, packet_len, + send_time, direct_link, + if_incoming, own_packet); + } else { + bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff, + packet_len, direct_link); + spin_unlock_bh(&bat_priv->forw_bat_list_lock); + } +} + +static void bat_iv_ogm_forward(struct orig_node *orig_node, + const struct ethhdr *ethhdr, + struct batman_ogm_packet *batman_ogm_packet, + bool is_single_hop_neigh, + bool is_from_best_next_hop, + struct hard_iface *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + uint8_t tt_num_changes; + + if (batman_ogm_packet->header.ttl <= 1) { + bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); + return; + } + + if (!is_from_best_next_hop) { + /* Mark the forwarded packet when it is not coming from our + * best next hop. We still need to forward the packet for our + * neighbor link quality detection to work in case the packet + * originated from a single hop neighbor. Otherwise we can + * simply drop the ogm. + */ + if (is_single_hop_neigh) + batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP; + else + return; + } + + tt_num_changes = batman_ogm_packet->tt_num_changes; + + batman_ogm_packet->header.ttl--; + memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); + + /* apply hop penalty */ + batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv); + + bat_dbg(DBG_BATMAN, bat_priv, + "Forwarding packet: tq: %i, ttl: %i\n", + batman_ogm_packet->tq, batman_ogm_packet->header.ttl); + + batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno); + batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc); + + /* switch of primaries first hop flag when forwarding */ + batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP; + if (is_single_hop_neigh) + batman_ogm_packet->flags |= DIRECTLINK; + else + batman_ogm_packet->flags &= ~DIRECTLINK; + + bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet, + BATMAN_OGM_HLEN + tt_len(tt_num_changes), + if_incoming, 0, bat_iv_ogm_fwd_send_time()); +} + +static void bat_iv_ogm_schedule(struct hard_iface *hard_iface, + int tt_num_changes) +{ + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batman_ogm_packet *batman_ogm_packet; + struct hard_iface *primary_if; + int vis_server; + + vis_server = atomic_read(&bat_priv->vis_mode); + primary_if = primary_if_get_selected(bat_priv); + + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; + + /* change sequence number to network order */ + batman_ogm_packet->seqno = + htonl((uint32_t)atomic_read(&hard_iface->seqno)); + + batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn); + batman_ogm_packet->tt_crc = htons((uint16_t) + atomic_read(&bat_priv->tt_crc)); + if (tt_num_changes >= 0) + batman_ogm_packet->tt_num_changes = tt_num_changes; + + if (vis_server == VIS_TYPE_SERVER_SYNC) + batman_ogm_packet->flags |= VIS_SERVER; + else + batman_ogm_packet->flags &= ~VIS_SERVER; + + if ((hard_iface == primary_if) && + (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) + batman_ogm_packet->gw_flags = + (uint8_t)atomic_read(&bat_priv->gw_bandwidth); + else + batman_ogm_packet->gw_flags = NO_FLAGS; + + atomic_inc(&hard_iface->seqno); + + slide_own_bcast_window(hard_iface); + bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff, + hard_iface->packet_len, hard_iface, 1, + bat_iv_ogm_emit_send_time(bat_priv)); + + if (primary_if) + hardif_free_ref(primary_if); +} + +static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const struct ethhdr *ethhdr, + const struct batman_ogm_packet + *batman_ogm_packet, + struct hard_iface *if_incoming, + const unsigned char *tt_buff, + int is_duplicate) +{ + struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; + struct neigh_node *router = NULL; + struct orig_node *orig_node_tmp; + struct hlist_node *node; + uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; + + bat_dbg(DBG_BATMAN, bat_priv, + "update_originator(): Searching and updating originator entry of received packet\n"); + + rcu_read_lock(); + hlist_for_each_entry_rcu(tmp_neigh_node, node, + &orig_node->neigh_list, list) { + if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && + (tmp_neigh_node->if_incoming == if_incoming) && + atomic_inc_not_zero(&tmp_neigh_node->refcount)) { + if (neigh_node) + neigh_node_free_ref(neigh_node); + neigh_node = tmp_neigh_node; + continue; + } + + if (is_duplicate) + continue; + + spin_lock_bh(&tmp_neigh_node->lq_update_lock); + ring_buffer_set(tmp_neigh_node->tq_recv, + &tmp_neigh_node->tq_index, 0); + tmp_neigh_node->tq_avg = + ring_buffer_avg(tmp_neigh_node->tq_recv); + spin_unlock_bh(&tmp_neigh_node->lq_update_lock); + } + + if (!neigh_node) { + struct orig_node *orig_tmp; + + orig_tmp = get_orig_node(bat_priv, ethhdr->h_source); + if (!orig_tmp) + goto unlock; + + neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source, + orig_node, orig_tmp, + batman_ogm_packet->seqno); + + orig_node_free_ref(orig_tmp); + if (!neigh_node) + goto unlock; + } else + bat_dbg(DBG_BATMAN, bat_priv, + "Updating existing last-hop neighbor of originator\n"); + + rcu_read_unlock(); + + orig_node->flags = batman_ogm_packet->flags; + neigh_node->last_seen = jiffies; + + spin_lock_bh(&neigh_node->lq_update_lock); + ring_buffer_set(neigh_node->tq_recv, + &neigh_node->tq_index, + batman_ogm_packet->tq); + neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); + spin_unlock_bh(&neigh_node->lq_update_lock); + + if (!is_duplicate) { + orig_node->last_ttl = batman_ogm_packet->header.ttl; + neigh_node->last_ttl = batman_ogm_packet->header.ttl; + } + + bonding_candidate_add(orig_node, neigh_node); + + /* if this neighbor already is our next hop there is nothing + * to change */ + router = orig_node_get_router(orig_node); + if (router == neigh_node) + goto update_tt; + + /* if this neighbor does not offer a better TQ we won't consider it */ + if (router && (router->tq_avg > neigh_node->tq_avg)) + goto update_tt; + + /* if the TQ is the same and the link not more symmetric we + * won't consider it either */ + if (router && (neigh_node->tq_avg == router->tq_avg)) { + orig_node_tmp = router->orig_node; + spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); + bcast_own_sum_orig = + orig_node_tmp->bcast_own_sum[if_incoming->if_num]; + spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); + + orig_node_tmp = neigh_node->orig_node; + spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); + bcast_own_sum_neigh = + orig_node_tmp->bcast_own_sum[if_incoming->if_num]; + spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); + + if (bcast_own_sum_orig >= bcast_own_sum_neigh) + goto update_tt; + } + + update_route(bat_priv, orig_node, neigh_node); + +update_tt: + /* I have to check for transtable changes only if the OGM has been + * sent through a primary interface */ + if (((batman_ogm_packet->orig != ethhdr->h_source) && + (batman_ogm_packet->header.ttl > 2)) || + (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) + tt_update_orig(bat_priv, orig_node, tt_buff, + batman_ogm_packet->tt_num_changes, + batman_ogm_packet->ttvn, + batman_ogm_packet->tt_crc); + + if (orig_node->gw_flags != batman_ogm_packet->gw_flags) + gw_node_update(bat_priv, orig_node, + batman_ogm_packet->gw_flags); + + orig_node->gw_flags = batman_ogm_packet->gw_flags; + + /* restart gateway selection if fast or late switching was enabled */ + if ((orig_node->gw_flags) && + (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) && + (atomic_read(&bat_priv->gw_sel_class) > 2)) + gw_check_election(bat_priv, orig_node); + + goto out; + +unlock: + rcu_read_unlock(); +out: + if (neigh_node) + neigh_node_free_ref(neigh_node); + if (router) + neigh_node_free_ref(router); +} + +static int bat_iv_ogm_calc_tq(struct orig_node *orig_node, + struct orig_node *orig_neigh_node, + struct batman_ogm_packet *batman_ogm_packet, + struct hard_iface *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct neigh_node *neigh_node = NULL, *tmp_neigh_node; + struct hlist_node *node; + uint8_t total_count; + uint8_t orig_eq_count, neigh_rq_count, tq_own; + int tq_asym_penalty, ret = 0; + + /* find corresponding one hop neighbor */ + rcu_read_lock(); + hlist_for_each_entry_rcu(tmp_neigh_node, node, + &orig_neigh_node->neigh_list, list) { + + if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig)) + continue; + + if (tmp_neigh_node->if_incoming != if_incoming) + continue; + + if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) + continue; + + neigh_node = tmp_neigh_node; + break; + } + rcu_read_unlock(); + + if (!neigh_node) + neigh_node = bat_iv_ogm_neigh_new(if_incoming, + orig_neigh_node->orig, + orig_neigh_node, + orig_neigh_node, + batman_ogm_packet->seqno); + + if (!neigh_node) + goto out; + + /* if orig_node is direct neighbor update neigh_node last_seen */ + if (orig_node == orig_neigh_node) + neigh_node->last_seen = jiffies; + + orig_node->last_seen = jiffies; + + /* find packet count of corresponding one hop neighbor */ + spin_lock_bh(&orig_node->ogm_cnt_lock); + orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num]; + neigh_rq_count = neigh_node->real_packet_count; + spin_unlock_bh(&orig_node->ogm_cnt_lock); + + /* pay attention to not get a value bigger than 100 % */ + total_count = (orig_eq_count > neigh_rq_count ? + neigh_rq_count : orig_eq_count); + + /* if we have too few packets (too less data) we set tq_own to zero */ + /* if we receive too few packets it is not considered bidirectional */ + if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) || + (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM)) + tq_own = 0; + else + /* neigh_node->real_packet_count is never zero as we + * only purge old information when getting new + * information */ + tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count; + + /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does + * affect the nearly-symmetric links only a little, but + * punishes asymmetric links more. This will give a value + * between 0 and TQ_MAX_VALUE + */ + tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE * + (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) * + (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) * + (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) / + (TQ_LOCAL_WINDOW_SIZE * + TQ_LOCAL_WINDOW_SIZE * + TQ_LOCAL_WINDOW_SIZE); + + batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own + * tq_asym_penalty) / + (TQ_MAX_VALUE * TQ_MAX_VALUE)); + + bat_dbg(DBG_BATMAN, bat_priv, + "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n", + orig_node->orig, orig_neigh_node->orig, total_count, + neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq); + + /* if link has the minimum required transmission quality + * consider it bidirectional */ + if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT) + ret = 1; + +out: + if (neigh_node) + neigh_node_free_ref(neigh_node); + return ret; +} + +/* processes a batman packet for all interfaces, adjusts the sequence number and + * finds out whether it is a duplicate. + * returns: + * 1 the packet is a duplicate + * 0 the packet has not yet been received + * -1 the packet is old and has been received while the seqno window + * was protected. Caller should drop it. + */ +static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, + const struct batman_ogm_packet + *batman_ogm_packet, + const struct hard_iface *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct orig_node *orig_node; + struct neigh_node *tmp_neigh_node; + struct hlist_node *node; + int is_duplicate = 0; + int32_t seq_diff; + int need_update = 0; + int set_mark, ret = -1; + + orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); + if (!orig_node) + return 0; + + spin_lock_bh(&orig_node->ogm_cnt_lock); + seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno; + + /* signalize caller that the packet is to be dropped. */ + if (!hlist_empty(&orig_node->neigh_list) && + window_protected(bat_priv, seq_diff, + &orig_node->batman_seqno_reset)) + goto out; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tmp_neigh_node, node, + &orig_node->neigh_list, list) { + + is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits, + orig_node->last_real_seqno, + batman_ogm_packet->seqno); + + if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && + (tmp_neigh_node->if_incoming == if_incoming)) + set_mark = 1; + else + set_mark = 0; + + /* if the window moved, set the update flag. */ + need_update |= bit_get_packet(bat_priv, + tmp_neigh_node->real_bits, + seq_diff, set_mark); + + tmp_neigh_node->real_packet_count = + bitmap_weight(tmp_neigh_node->real_bits, + TQ_LOCAL_WINDOW_SIZE); + } + rcu_read_unlock(); + + if (need_update) { + bat_dbg(DBG_BATMAN, bat_priv, + "updating last_seqno: old %u, new %u\n", + orig_node->last_real_seqno, batman_ogm_packet->seqno); + orig_node->last_real_seqno = batman_ogm_packet->seqno; + } + + ret = is_duplicate; + +out: + spin_unlock_bh(&orig_node->ogm_cnt_lock); + orig_node_free_ref(orig_node); + return ret; +} + +static void bat_iv_ogm_process(const struct ethhdr *ethhdr, + struct batman_ogm_packet *batman_ogm_packet, + const unsigned char *tt_buff, + struct hard_iface *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct hard_iface *hard_iface; + struct orig_node *orig_neigh_node, *orig_node; + struct neigh_node *router = NULL, *router_router = NULL; + struct neigh_node *orig_neigh_router = NULL; + int has_directlink_flag; + int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; + int is_broadcast = 0, is_bidirectional; + bool is_single_hop_neigh = false; + bool is_from_best_next_hop = false; + int is_duplicate; + uint32_t if_incoming_seqno; + + /* Silently drop when the batman packet is actually not a + * correct packet. + * + * This might happen if a packet is padded (e.g. Ethernet has a + * minimum frame length of 64 byte) and the aggregation interprets + * it as an additional length. + * + * TODO: A more sane solution would be to have a bit in the + * batman_ogm_packet to detect whether the packet is the last + * packet in an aggregation. Here we expect that the padding + * is always zero (or not 0x01) + */ + if (batman_ogm_packet->header.packet_type != BAT_IV_OGM) + return; + + /* could be changed by schedule_own_packet() */ + if_incoming_seqno = atomic_read(&if_incoming->seqno); + + has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); + + if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig)) + is_single_hop_neigh = true; + + bat_dbg(DBG_BATMAN, bat_priv, + "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", + ethhdr->h_source, if_incoming->net_dev->name, + if_incoming->net_dev->dev_addr, batman_ogm_packet->orig, + batman_ogm_packet->prev_sender, batman_ogm_packet->seqno, + batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc, + batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq, + batman_ogm_packet->header.ttl, + batman_ogm_packet->header.version, has_directlink_flag); + + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if (hard_iface->if_status != IF_ACTIVE) + continue; + + if (hard_iface->soft_iface != if_incoming->soft_iface) + continue; + + if (compare_eth(ethhdr->h_source, + hard_iface->net_dev->dev_addr)) + is_my_addr = 1; + + if (compare_eth(batman_ogm_packet->orig, + hard_iface->net_dev->dev_addr)) + is_my_orig = 1; + + if (compare_eth(batman_ogm_packet->prev_sender, + hard_iface->net_dev->dev_addr)) + is_my_oldorig = 1; + + if (is_broadcast_ether_addr(ethhdr->h_source)) + is_broadcast = 1; + } + rcu_read_unlock(); + + if (batman_ogm_packet->header.version != COMPAT_VERSION) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: incompatible batman version (%i)\n", + batman_ogm_packet->header.version); + return; + } + + if (is_my_addr) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: received my own broadcast (sender: %pM)\n", + ethhdr->h_source); + return; + } + + if (is_broadcast) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n", + ethhdr->h_source); + return; + } + + if (is_my_orig) { + unsigned long *word; + int offset; + + orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source); + if (!orig_neigh_node) + return; + + /* neighbor has to indicate direct link and it has to + * come via the corresponding interface */ + /* save packet seqno for bidirectional check */ + if (has_directlink_flag && + compare_eth(if_incoming->net_dev->dev_addr, + batman_ogm_packet->orig)) { + offset = if_incoming->if_num * NUM_WORDS; + + spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); + word = &(orig_neigh_node->bcast_own[offset]); + bat_set_bit(word, + if_incoming_seqno - + batman_ogm_packet->seqno - 2); + orig_neigh_node->bcast_own_sum[if_incoming->if_num] = + bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE); + spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); + } + + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: originator packet from myself (via neighbor)\n"); + orig_node_free_ref(orig_neigh_node); + return; + } + + if (is_my_oldorig) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n", + ethhdr->h_source); + return; + } + + if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n", + ethhdr->h_source); + return; + } + + orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); + if (!orig_node) + return; + + is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet, + if_incoming); + + if (is_duplicate == -1) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: packet within seqno protection time (sender: %pM)\n", + ethhdr->h_source); + goto out; + } + + if (batman_ogm_packet->tq == 0) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: originator packet with tq equal 0\n"); + goto out; + } + + router = orig_node_get_router(orig_node); + if (router) + router_router = orig_node_get_router(router->orig_node); + + if ((router && router->tq_avg != 0) && + (compare_eth(router->addr, ethhdr->h_source))) + is_from_best_next_hop = true; + + /* avoid temporary routing loops */ + if (router && router_router && + (compare_eth(router->addr, batman_ogm_packet->prev_sender)) && + !(compare_eth(batman_ogm_packet->orig, + batman_ogm_packet->prev_sender)) && + (compare_eth(router->addr, router_router->addr))) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n", + ethhdr->h_source); + goto out; + } + + /* if sender is a direct neighbor the sender mac equals + * originator mac */ + orig_neigh_node = (is_single_hop_neigh ? + orig_node : + get_orig_node(bat_priv, ethhdr->h_source)); + if (!orig_neigh_node) + goto out; + + orig_neigh_router = orig_node_get_router(orig_neigh_node); + + /* drop packet if sender is not a direct neighbor and if we + * don't route towards it */ + if (!is_single_hop_neigh && (!orig_neigh_router)) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: OGM via unknown neighbor!\n"); + goto out_neigh; + } + + is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node, + batman_ogm_packet, if_incoming); + + bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet); + + /* update ranking if it is not a duplicate or has the same + * seqno and similar ttl as the non-duplicate */ + if (is_bidirectional && + (!is_duplicate || + ((orig_node->last_real_seqno == batman_ogm_packet->seqno) && + (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl)))) + bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, + batman_ogm_packet, if_incoming, + tt_buff, is_duplicate); + + /* is single hop (direct) neighbor */ + if (is_single_hop_neigh) { + + /* mark direct link on incoming interface */ + bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, + is_single_hop_neigh, is_from_best_next_hop, + if_incoming); + + bat_dbg(DBG_BATMAN, bat_priv, + "Forwarding packet: rebroadcast neighbor packet with direct link flag\n"); + goto out_neigh; + } + + /* multihop originator */ + if (!is_bidirectional) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: not received via bidirectional link\n"); + goto out_neigh; + } + + if (is_duplicate) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: duplicate packet received\n"); + goto out_neigh; + } + + bat_dbg(DBG_BATMAN, bat_priv, + "Forwarding packet: rebroadcast originator packet\n"); + bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, + is_single_hop_neigh, is_from_best_next_hop, + if_incoming); + +out_neigh: + if ((orig_neigh_node) && (!is_single_hop_neigh)) + orig_node_free_ref(orig_neigh_node); +out: + if (router) + neigh_node_free_ref(router); + if (router_router) + neigh_node_free_ref(router_router); + if (orig_neigh_router) + neigh_node_free_ref(orig_neigh_router); + + orig_node_free_ref(orig_node); +} + +static int bat_iv_ogm_receive(struct sk_buff *skb, + struct hard_iface *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct batman_ogm_packet *batman_ogm_packet; + struct ethhdr *ethhdr; + int buff_pos = 0, packet_len; + unsigned char *tt_buff, *packet_buff; + bool ret; + + ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN); + if (!ret) + return NET_RX_DROP; + + /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface + * that does not have B.A.T.M.A.N. IV enabled ? + */ + if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit) + return NET_RX_DROP; + + packet_len = skb_headlen(skb); + ethhdr = (struct ethhdr *)skb_mac_header(skb); + packet_buff = skb->data; + batman_ogm_packet = (struct batman_ogm_packet *)packet_buff; + + /* unpack the aggregated packets and process them one by one */ + do { + /* network to host order for our 32bit seqno and the + orig_interval */ + batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno); + batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc); + + tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN; + + bat_iv_ogm_process(ethhdr, batman_ogm_packet, + tt_buff, if_incoming); + + buff_pos += BATMAN_OGM_HLEN + + tt_len(batman_ogm_packet->tt_num_changes); + + batman_ogm_packet = (struct batman_ogm_packet *) + (packet_buff + buff_pos); + } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len, + batman_ogm_packet->tt_num_changes)); + + kfree_skb(skb); + return NET_RX_SUCCESS; +} + +static struct bat_algo_ops batman_iv __read_mostly = { + .name = "BATMAN IV", + .bat_iface_enable = bat_iv_ogm_iface_enable, + .bat_iface_disable = bat_iv_ogm_iface_disable, + .bat_iface_update_mac = bat_iv_ogm_iface_update_mac, + .bat_primary_iface_set = bat_iv_ogm_primary_iface_set, + .bat_ogm_schedule = bat_iv_ogm_schedule, + .bat_ogm_emit = bat_iv_ogm_emit, +}; + +int __init bat_iv_init(void) +{ + int ret; + + /* batman originator packet */ + ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive); + if (ret < 0) + goto out; + + ret = bat_algo_register(&batman_iv); + if (ret < 0) + goto handler_unregister; + + goto out; + +handler_unregister: + recv_handler_unregister(BAT_IV_OGM); +out: + return ret; +} diff --combined net/batman-adv/bat_sysfs.c index acb2640,5bc7b66..5bc7b66 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c @@@ -445,7 -445,7 +445,7 @@@ BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUG static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, store_gw_bwidth); #ifdef CONFIG_BATMAN_ADV_DEBUG - BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 31, NULL); + BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL); #endif
static struct bat_attribute *mesh_attrs[] = { diff --combined net/batman-adv/hard-interface.c index 843caa7,0000000..dc334fa mode 100644,000000..100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@@ -1,581 -1,0 +1,580 @@@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" - #include "distributed-arp-table.h" +#include "hard-interface.h" +#include "soft-interface.h" +#include "send.h" +#include "translation-table.h" +#include "routing.h" +#include "bat_sysfs.h" +#include "originator.h" +#include "hash.h" +#include "bridge_loop_avoidance.h" + +#include <linux/if_arp.h> + +void hardif_free_rcu(struct rcu_head *rcu) +{ + struct hard_iface *hard_iface; + + hard_iface = container_of(rcu, struct hard_iface, rcu); + dev_put(hard_iface->net_dev); + kfree(hard_iface); +} + +struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev) +{ + struct hard_iface *hard_iface; + + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if (hard_iface->net_dev == net_dev && + atomic_inc_not_zero(&hard_iface->refcount)) + goto out; + } + + hard_iface = NULL; + +out: + rcu_read_unlock(); + return hard_iface; +} + +static int is_valid_iface(const struct net_device *net_dev) +{ + if (net_dev->flags & IFF_LOOPBACK) + return 0; + + if (net_dev->type != ARPHRD_ETHER) + return 0; + + if (net_dev->addr_len != ETH_ALEN) + return 0; + + /* no batman over batman */ + if (softif_is_valid(net_dev)) + return 0; + + /* Device is being bridged */ + /* if (net_dev->priv_flags & IFF_BRIDGE_PORT) + return 0; */ + + return 1; +} + +static struct hard_iface *hardif_get_active(const struct net_device *soft_iface) +{ + struct hard_iface *hard_iface; + + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if (hard_iface->soft_iface != soft_iface) + continue; + + if (hard_iface->if_status == IF_ACTIVE && + atomic_inc_not_zero(&hard_iface->refcount)) + goto out; + } + + hard_iface = NULL; + +out: + rcu_read_unlock(); + return hard_iface; +} + +static void primary_if_update_addr(struct bat_priv *bat_priv, + struct hard_iface *oldif) +{ + struct vis_packet *vis_packet; + struct hard_iface *primary_if; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + - dat_init_own_dht_addr(bat_priv, primary_if); - + vis_packet = (struct vis_packet *) + bat_priv->my_vis_info->skb_packet->data; + memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN); + memcpy(vis_packet->sender_orig, + primary_if->net_dev->dev_addr, ETH_ALEN); + + bla_update_orig_address(bat_priv, primary_if, oldif); +out: + if (primary_if) + hardif_free_ref(primary_if); +} + +static void primary_if_select(struct bat_priv *bat_priv, + struct hard_iface *new_hard_iface) +{ + struct hard_iface *curr_hard_iface; + + ASSERT_RTNL(); + + if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount)) + new_hard_iface = NULL; + + curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1); + rcu_assign_pointer(bat_priv->primary_if, new_hard_iface); + + if (!new_hard_iface) + goto out; + + bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface); + primary_if_update_addr(bat_priv, curr_hard_iface); + +out: + if (curr_hard_iface) + hardif_free_ref(curr_hard_iface); +} + +static bool hardif_is_iface_up(const struct hard_iface *hard_iface) +{ + if (hard_iface->net_dev->flags & IFF_UP) + return true; + + return false; +} + +static void check_known_mac_addr(const struct net_device *net_dev) +{ + const struct hard_iface *hard_iface; + + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if ((hard_iface->if_status != IF_ACTIVE) && + (hard_iface->if_status != IF_TO_BE_ACTIVATED)) + continue; + + if (hard_iface->net_dev == net_dev) + continue; + + if (!compare_eth(hard_iface->net_dev->dev_addr, + net_dev->dev_addr)) + continue; + + pr_warn("The newly added mac address (%pM) already exists on: %s\n", + net_dev->dev_addr, hard_iface->net_dev->name); + pr_warn("It is strongly recommended to keep mac addresses unique to avoid problems!\n"); + } + rcu_read_unlock(); +} + +int hardif_min_mtu(struct net_device *soft_iface) +{ + const struct bat_priv *bat_priv = netdev_priv(soft_iface); + const struct hard_iface *hard_iface; + /* allow big frames if all devices are capable to do so + * (have MTU > 1500 + BAT_HEADER_LEN) */ + int min_mtu = ETH_DATA_LEN; + + if (atomic_read(&bat_priv->fragmentation)) + goto out; + + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if ((hard_iface->if_status != IF_ACTIVE) && + (hard_iface->if_status != IF_TO_BE_ACTIVATED)) + continue; + + if (hard_iface->soft_iface != soft_iface) + continue; + + min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN, + min_mtu); + } + rcu_read_unlock(); +out: + return min_mtu; +} + +/* adjusts the MTU if a new interface with a smaller MTU appeared. */ +void update_min_mtu(struct net_device *soft_iface) +{ + int min_mtu; + + min_mtu = hardif_min_mtu(soft_iface); + if (soft_iface->mtu != min_mtu) + soft_iface->mtu = min_mtu; +} + +static void hardif_activate_interface(struct hard_iface *hard_iface) +{ + struct bat_priv *bat_priv; + struct hard_iface *primary_if = NULL; + + if (hard_iface->if_status != IF_INACTIVE) + goto out; + + bat_priv = netdev_priv(hard_iface->soft_iface); + + bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); + hard_iface->if_status = IF_TO_BE_ACTIVATED; + + /** + * the first active interface becomes our primary interface or + * the next active interface after the old primary interface was removed + */ + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + primary_if_select(bat_priv, hard_iface); + + bat_info(hard_iface->soft_iface, "Interface activated: %s\n", + hard_iface->net_dev->name); + + update_min_mtu(hard_iface->soft_iface); + +out: + if (primary_if) + hardif_free_ref(primary_if); +} + +static void hardif_deactivate_interface(struct hard_iface *hard_iface) +{ + if ((hard_iface->if_status != IF_ACTIVE) && + (hard_iface->if_status != IF_TO_BE_ACTIVATED)) + return; + + hard_iface->if_status = IF_INACTIVE; + + bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n", + hard_iface->net_dev->name); + + update_min_mtu(hard_iface->soft_iface); +} + +int hardif_enable_interface(struct hard_iface *hard_iface, + const char *iface_name) +{ + struct bat_priv *bat_priv; + struct net_device *soft_iface; + int ret; + + if (hard_iface->if_status != IF_NOT_IN_USE) + goto out; + + if (!atomic_inc_not_zero(&hard_iface->refcount)) + goto out; + + /* hard-interface is part of a bridge */ + if (hard_iface->net_dev->priv_flags & IFF_BRIDGE_PORT) + pr_err("You are about to enable batman-adv on '%s' which already is part of a bridge. Unless you know exactly what you are doing this is probably wrong and won't work the way you think it would.\n", + hard_iface->net_dev->name); + + soft_iface = dev_get_by_name(&init_net, iface_name); + + if (!soft_iface) { + soft_iface = softif_create(iface_name); + + if (!soft_iface) { + ret = -ENOMEM; + goto err; + } + + /* dev_get_by_name() increases the reference counter for us */ + dev_hold(soft_iface); + } + + if (!softif_is_valid(soft_iface)) { + pr_err("Can't create batman mesh interface %s: already exists as regular interface\n", + soft_iface->name); + ret = -EINVAL; + goto err_dev; + } + + hard_iface->soft_iface = soft_iface; + bat_priv = netdev_priv(hard_iface->soft_iface); + + ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface); - if (ret < 0) ++ if (ret < 0) { ++ ret = -ENOMEM; + goto err_dev; ++ } + + hard_iface->if_num = bat_priv->num_ifaces; + bat_priv->num_ifaces++; + hard_iface->if_status = IF_INACTIVE; + orig_hash_add_if(hard_iface, bat_priv->num_ifaces); + + hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); + hard_iface->batman_adv_ptype.func = batman_skb_recv; + hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; + dev_add_pack(&hard_iface->batman_adv_ptype); + + atomic_set(&hard_iface->frag_seqno, 1); + bat_info(hard_iface->soft_iface, "Adding interface: %s\n", + hard_iface->net_dev->name); + + if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < + ETH_DATA_LEN + BAT_HEADER_LEN) + bat_info(hard_iface->soft_iface, + "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n", + hard_iface->net_dev->name, hard_iface->net_dev->mtu, + ETH_DATA_LEN + BAT_HEADER_LEN); + + if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < + ETH_DATA_LEN + BAT_HEADER_LEN) + bat_info(hard_iface->soft_iface, + "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n", + hard_iface->net_dev->name, hard_iface->net_dev->mtu, + ETH_DATA_LEN + BAT_HEADER_LEN); + + if (hardif_is_iface_up(hard_iface)) + hardif_activate_interface(hard_iface); + else + bat_err(hard_iface->soft_iface, + "Not using interface %s (retrying later): interface not active\n", + hard_iface->net_dev->name); + + /* begin scheduling originator messages on that interface */ + schedule_bat_ogm(hard_iface); + +out: + return 0; + +err_dev: + dev_put(soft_iface); +err: + hardif_free_ref(hard_iface); + return ret; +} + +void hardif_disable_interface(struct hard_iface *hard_iface) +{ + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct hard_iface *primary_if = NULL; + + if (hard_iface->if_status == IF_ACTIVE) + hardif_deactivate_interface(hard_iface); + + if (hard_iface->if_status != IF_INACTIVE) + goto out; + + bat_info(hard_iface->soft_iface, "Removing interface: %s\n", + hard_iface->net_dev->name); + dev_remove_pack(&hard_iface->batman_adv_ptype); + + bat_priv->num_ifaces--; + orig_hash_del_if(hard_iface, bat_priv->num_ifaces); + + primary_if = primary_if_get_selected(bat_priv); + if (hard_iface == primary_if) { + struct hard_iface *new_if; + + new_if = hardif_get_active(hard_iface->soft_iface); + primary_if_select(bat_priv, new_if); + + if (new_if) + hardif_free_ref(new_if); + } + + bat_priv->bat_algo_ops->bat_iface_disable(hard_iface); + hard_iface->if_status = IF_NOT_IN_USE; + + /* delete all references to this hard_iface */ + purge_orig_ref(bat_priv); + purge_outstanding_packets(bat_priv, hard_iface); + dev_put(hard_iface->soft_iface); + + /* nobody uses this interface anymore */ + if (!bat_priv->num_ifaces) + softif_destroy(hard_iface->soft_iface); + + hard_iface->soft_iface = NULL; + hardif_free_ref(hard_iface); + +out: + if (primary_if) + hardif_free_ref(primary_if); +} + +static struct hard_iface *hardif_add_interface(struct net_device *net_dev) +{ + struct hard_iface *hard_iface; + int ret; + + ASSERT_RTNL(); + + ret = is_valid_iface(net_dev); + if (ret != 1) + goto out; + + dev_hold(net_dev); + + hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC); + if (!hard_iface) + goto release_dev; + + ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev); + if (ret) + goto free_if; + + hard_iface->if_num = -1; + hard_iface->net_dev = net_dev; + hard_iface->soft_iface = NULL; + hard_iface->if_status = IF_NOT_IN_USE; + INIT_LIST_HEAD(&hard_iface->list); + /* extra reference for return */ + atomic_set(&hard_iface->refcount, 2); + + check_known_mac_addr(hard_iface->net_dev); + list_add_tail_rcu(&hard_iface->list, &hardif_list); + + /** + * This can't be called via a bat_priv callback because + * we have no bat_priv yet. + */ + atomic_set(&hard_iface->seqno, 1); + hard_iface->packet_buff = NULL; + + return hard_iface; + +free_if: + kfree(hard_iface); +release_dev: + dev_put(net_dev); +out: + return NULL; +} + +static void hardif_remove_interface(struct hard_iface *hard_iface) +{ + ASSERT_RTNL(); + + /* first deactivate interface */ + if (hard_iface->if_status != IF_NOT_IN_USE) + hardif_disable_interface(hard_iface); + + if (hard_iface->if_status != IF_NOT_IN_USE) + return; + + hard_iface->if_status = IF_TO_BE_REMOVED; + sysfs_del_hardif(&hard_iface->hardif_obj); + hardif_free_ref(hard_iface); +} + +void hardif_remove_interfaces(void) +{ + struct hard_iface *hard_iface, *hard_iface_tmp; + + rtnl_lock(); + list_for_each_entry_safe(hard_iface, hard_iface_tmp, + &hardif_list, list) { + list_del_rcu(&hard_iface->list); + hardif_remove_interface(hard_iface); + } + rtnl_unlock(); +} + +static int hard_if_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *net_dev = ptr; + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); + struct hard_iface *primary_if = NULL; + struct bat_priv *bat_priv; + + if (!hard_iface && event == NETDEV_REGISTER) + hard_iface = hardif_add_interface(net_dev); + + if (!hard_iface) + goto out; + + switch (event) { + case NETDEV_UP: + hardif_activate_interface(hard_iface); + break; + case NETDEV_GOING_DOWN: + case NETDEV_DOWN: + hardif_deactivate_interface(hard_iface); + break; + case NETDEV_UNREGISTER: + list_del_rcu(&hard_iface->list); + + hardif_remove_interface(hard_iface); + break; + case NETDEV_CHANGEMTU: + if (hard_iface->soft_iface) + update_min_mtu(hard_iface->soft_iface); + break; + case NETDEV_CHANGEADDR: + if (hard_iface->if_status == IF_NOT_IN_USE) + goto hardif_put; + + check_known_mac_addr(hard_iface->net_dev); + + bat_priv = netdev_priv(hard_iface->soft_iface); + bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto hardif_put; + + if (hard_iface == primary_if) + primary_if_update_addr(bat_priv, NULL); + break; + default: + break; + } + +hardif_put: + hardif_free_ref(hard_iface); +out: + if (primary_if) + hardif_free_ref(primary_if); + return NOTIFY_DONE; +} + +/* This function returns true if the interface represented by ifindex is a + * 802.11 wireless device */ +bool is_wifi_iface(int ifindex) +{ + struct net_device *net_device = NULL; + bool ret = false; + + if (ifindex == NULL_IFINDEX) + goto out; + + net_device = dev_get_by_index(&init_net, ifindex); + if (!net_device) + goto out; + +#ifdef CONFIG_WIRELESS_EXT + /* pre-cfg80211 drivers have to implement WEXT, so it is possible to + * check for wireless_handlers != NULL */ + if (net_device->wireless_handlers) + ret = true; + else +#endif + /* cfg80211 drivers have to set ieee80211_ptr */ + if (net_device->ieee80211_ptr) + ret = true; +out: + if (net_device) + dev_put(net_device); + return ret; +} + +struct notifier_block hard_if_notifier = { + .notifier_call = hard_if_event, +}; diff --combined net/batman-adv/main.c index 9137aa0,083a299..083a299 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@@ -268,8 -268,6 +268,6 @@@ static void recv_handler_init(void
/* batman icmp packet */ recv_packet_handler[BAT_ICMP] = recv_icmp_packet; - /* unicast with 4 addresses packet */ - recv_packet_handler[BAT_UNICAST_4ADDR] = recv_unicast_packet; /* unicast packet */ recv_packet_handler[BAT_UNICAST] = recv_unicast_packet; /* fragmented unicast packet */ diff --combined net/batman-adv/main.h index 4d0326a,0000000..f4a3ec0 mode 100644,000000..100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@@ -1,258 -1,0 +1,245 @@@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_MAIN_H_ +#define _NET_BATMAN_ADV_MAIN_H_ + +#define DRIVER_AUTHOR "Marek Lindner lindner_marek@yahoo.de, " \ + "Simon Wunderlich siwu@hrz.tu-chemnitz.de" +#define DRIVER_DESC "B.A.T.M.A.N. advanced" +#define DRIVER_DEVICE "batman-adv" + +#ifndef SOURCE_VERSION +#define SOURCE_VERSION "2012.2.0" +#endif + +/* B.A.T.M.A.N. parameters */ + +#define TQ_MAX_VALUE 255 +#define JITTER 20 + + /* Time To Live of broadcast messages */ +#define TTL 50 + +/* purge originators after time in seconds if no valid packet comes in + * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */ +#define PURGE_TIMEOUT 200000 /* 200 seconds */ +#define TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */ +#define TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */ +/* sliding packet range of received originator messages in sequence numbers + * (should be a multiple of our word size) */ +#define TQ_LOCAL_WINDOW_SIZE 64 +#define TT_REQUEST_TIMEOUT 3000 /* miliseconds we have to keep + * pending tt_req */ + +#define TQ_GLOBAL_WINDOW_SIZE 5 +#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 +#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 +#define TQ_TOTAL_BIDRECT_LIMIT 1 + +#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */ + +#define ROAMING_MAX_TIME 20000 /* Time in which a client can roam at most + * ROAMING_MAX_COUNT times in miliseconds*/ +#define ROAMING_MAX_COUNT 5 + +#define NO_FLAGS 0 + +#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */ + +#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE) + - /* msecs after which an ARP_REQUEST is sent in broadcast as fallback */ - #define ARP_REQ_DELAY 250 - /* numbers of originator to contact for any PUT/GET DHT operation */ - #define DHT_CANDIDATES_NUM 3 - /* Factor which default ARP timeout values of the soft_iface table are - * multiplied by - */ - #define ARP_TIMEOUT_FACTOR 4 - +#define LOG_BUF_LEN 8192 /* has to be a power of 2 */ + +#define VIS_INTERVAL 5000 /* 5 seconds */ + +/* how much worse secondary interfaces may be to be considered as bonding + * candidates */ +#define BONDING_TQ_THRESHOLD 50 + +/* should not be bigger than 512 bytes or change the size of + * forw_packet->direct_link_flags */ +#define MAX_AGGREGATION_BYTES 512 +#define MAX_AGGREGATION_MS 100 + +#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */ +#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3) +#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10) + +#define DUPLIST_SIZE 16 +#define DUPLIST_TIMEOUT 500 /* 500 ms */ +/* don't reset again within 30 seconds */ +#define RESET_PROTECTION_MS 30000 +#define EXPECTED_SEQNO_RANGE 65536 + +enum mesh_state { + MESH_INACTIVE, + MESH_ACTIVE, + MESH_DEACTIVATING +}; + +#define BCAST_QUEUE_LEN 256 +#define BATMAN_QUEUE_LEN 256 + +enum uev_action { + UEV_ADD = 0, + UEV_DEL, + UEV_CHANGE +}; + +enum uev_type { + UEV_GW = 0 +}; + +#define GW_THRESHOLD 50 + - #define DHT_CANDIDATE_NOT_FOUND 0 - #define DHT_CANDIDATE_ORIG 1 - +/* Debug Messages */ +#ifdef pr_fmt +#undef pr_fmt +#endif +/* Append 'batman-adv: ' before kernel messages */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/* all messages related to routing / flooding / broadcasting / etc */ +enum dbg_level { + DBG_BATMAN = 1 << 0, + DBG_ROUTES = 1 << 1, /* route added / changed / deleted */ + DBG_TT = 1 << 2, /* translation table operations */ + DBG_BLA = 1 << 3, /* bridge loop avoidance */ - DBG_DAT = 1 << 4, /* snooped arp messages / dat operations */ - DBG_ALL = 31 ++ DBG_ALL = 15 +}; + +/* Kernel headers */ + +#include <linux/mutex.h> /* mutex */ +#include <linux/module.h> /* needed by all modules */ +#include <linux/netdevice.h> /* netdevice */ +#include <linux/etherdevice.h> /* ethernet address classification */ +#include <linux/if_ether.h> /* ethernet header */ +#include <linux/poll.h> /* poll_table */ +#include <linux/kthread.h> /* kernel threads */ +#include <linux/pkt_sched.h> /* schedule types */ +#include <linux/workqueue.h> /* workqueue */ +#include <linux/slab.h> +#include <net/sock.h> /* struct sock */ +#include <linux/jiffies.h> +#include <linux/seq_file.h> +#include "types.h" + +extern char bat_routing_algo[]; +extern struct list_head hardif_list; + +extern unsigned char broadcast_addr[]; +extern struct workqueue_struct *bat_event_workqueue; + +int mesh_init(struct net_device *soft_iface); +void mesh_free(struct net_device *soft_iface); +void inc_module_count(void); +void dec_module_count(void); +int is_my_mac(const uint8_t *addr); +int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev); +int recv_handler_register(uint8_t packet_type, + int (*recv_handler)(struct sk_buff *, + struct hard_iface *)); +void recv_handler_unregister(uint8_t packet_type); +int bat_algo_register(struct bat_algo_ops *bat_algo_ops); +int bat_algo_select(struct bat_priv *bat_priv, char *name); +int bat_algo_seq_print_text(struct seq_file *seq, void *offset); + +#ifdef CONFIG_BATMAN_ADV_DEBUG +int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3); + +#define bat_dbg(type, bat_priv, fmt, arg...) \ + do { \ + if (atomic_read(&bat_priv->log_level) & type) \ + debug_log(bat_priv, fmt, ## arg); \ + } \ + while (0) +#else /* !CONFIG_BATMAN_ADV_DEBUG */ +__printf(3, 4) +static inline void bat_dbg(int type __always_unused, + struct bat_priv *bat_priv __always_unused, + const char *fmt __always_unused, ...) +{ +} +#endif + +#define bat_info(net_dev, fmt, arg...) \ + do { \ + struct net_device *_netdev = (net_dev); \ + struct bat_priv *_batpriv = netdev_priv(_netdev); \ + bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \ + pr_info("%s: " fmt, _netdev->name, ## arg); \ + } while (0) +#define bat_err(net_dev, fmt, arg...) \ + do { \ + struct net_device *_netdev = (net_dev); \ + struct bat_priv *_batpriv = netdev_priv(_netdev); \ + bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \ + pr_err("%s: " fmt, _netdev->name, ## arg); \ + } while (0) + +/** + * returns 1 if they are the same ethernet addr + * + * note: can't use compare_ether_addr() as it requires aligned memory + */ + +static inline int compare_eth(const void *data1, const void *data2) +{ + return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); +} + +/** + * has_timed_out - compares current time (jiffies) and timestamp + timeout + * @timestamp: base value to compare with (in jiffies) + * @timeout: added to base value before comparing (in milliseconds) + * + * Returns true if current time is after timestamp + timeout + */ +static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout) +{ + return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout)); +} + +#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) + +/* Returns the smallest signed integer in two's complement with the sizeof x */ +#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u))) + +/* Checks if a sequence number x is a predecessor/successor of y. + * they handle overflows/underflows and can correctly check for a + * predecessor/successor unless the variable sequence number has grown by + * more then 2**(bitwidth(x)-1)-1. + * This means that for a uint8_t with the maximum value 255, it would think: + * - when adding nothing - it is neither a predecessor nor a successor + * - before adding more than 127 to the starting value - it is a predecessor, + * - when adding 128 - it is neither a predecessor nor a successor, + * - after adding more than 127 to the starting value - it is a successor */ +#define seq_before(x, y) ({typeof(x) _d1 = (x); \ + typeof(y) _d2 = (y); \ + typeof(x) _dummy = (_d1 - _d2); \ + (void) (&_d1 == &_d2); \ + _dummy > smallest_signed_int(_dummy); }) +#define seq_after(x, y) seq_before(y, x) + +#endif /* _NET_BATMAN_ADV_MAIN_H_ */ diff --combined net/batman-adv/originator.c index c6a00b3,41147942..41147942 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@@ -20,7 -20,6 +20,6 @@@ */
#include "main.h" - #include "distributed-arp-table.h" #include "originator.h" #include "hash.h" #include "translation-table.h" @@@ -221,7 -220,6 +220,6 @@@ struct orig_node *get_orig_node(struct orig_node->tt_poss_change = false; orig_node->bat_priv = bat_priv; memcpy(orig_node->orig, addr, ETH_ALEN); - dat_init_orig_node_dht_addr(orig_node); orig_node->router = NULL; orig_node->tt_crc = 0; atomic_set(&orig_node->last_ttvn, 0); diff --combined net/batman-adv/packet.h index 307dbb3,0ee1af7..0ee1af7 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@@ -25,22 -25,14 +25,14 @@@ #define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
enum bat_packettype { - BAT_IV_OGM = 0x01, - BAT_ICMP = 0x02, - BAT_UNICAST = 0x03, - BAT_BCAST = 0x04, - BAT_VIS = 0x05, - BAT_UNICAST_FRAG = 0x06, - BAT_TT_QUERY = 0x07, - BAT_ROAM_ADV = 0x08, - BAT_UNICAST_4ADDR = 0x09 - }; - - enum bat_subtype { - BAT_P_DATA = 0x01, - BAT_P_DAT_DHT_GET = 0x02, - BAT_P_DAT_DHT_PUT = 0x03, - BAT_P_DAT_CACHE_REPLY = 0x04 + BAT_IV_OGM = 0x01, + BAT_ICMP = 0x02, + BAT_UNICAST = 0x03, + BAT_BCAST = 0x04, + BAT_VIS = 0x05, + BAT_UNICAST_FRAG = 0x06, + BAT_TT_QUERY = 0x07, + BAT_ROAM_ADV = 0x08 };
/* this file is included by batctl which needs these defines */ @@@ -168,12 -160,6 +160,6 @@@ struct unicast_packet uint8_t dest[ETH_ALEN]; } __packed;
- struct unicast_4addr_packet { - struct unicast_packet u; - uint8_t src[ETH_ALEN]; - uint8_t subtype; - } __packed; - struct unicast_frag_packet { struct batman_header header; uint8_t ttvn; /* destination translation table version number */ diff --combined net/batman-adv/routing.c index 4f2059f,840e2c6..840e2c6 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@@ -962,18 -962,14 +962,14 @@@ int recv_unicast_packet(struct sk_buff struct unicast_packet *unicast_packet; int hdr_size = sizeof(*unicast_packet);
- unicast_packet = (struct unicast_packet *)skb->data; - - /* the caller function should have already pulled 2 bytes */ - if (unicast_packet->header.packet_type == BAT_UNICAST_4ADDR) - hdr_size = sizeof(struct unicast_4addr_packet); - if (check_unicast_packet(skb, hdr_size) < 0) return NET_RX_DROP;
if (!check_unicast_ttvn(bat_priv, skb)) return NET_RX_DROP;
+ unicast_packet = (struct unicast_packet *)skb->data; + /* packet for me */ if (is_my_mac(unicast_packet->dest)) { interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); diff --combined net/batman-adv/send.c index 815cc9c,f47299f..f47299f --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@@ -20,7 -20,6 +20,6 @@@ */
#include "main.h" - #include "distributed-arp-table.h" #include "send.h" #include "routing.h" #include "translation-table.h" @@@ -275,9 -274,6 +274,6 @@@ static void send_outstanding_bcast_pack if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) goto out;
- if (dat_drop_broadcast_packet(bat_priv, forw_packet)) - goto out; - /* rebroadcast packet */ rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &hardif_list, list) { diff --combined net/batman-adv/soft-interface.c index b56dafd,6e2530b..6e2530b --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@@ -22,7 -22,6 +22,6 @@@ #include "main.h" #include "soft-interface.h" #include "hard-interface.h" - #include "distributed-arp-table.h" #include "routing.h" #include "send.h" #include "bat_debugfs.h" @@@ -137,7 -136,6 +136,6 @@@ static int interface_tx(struct sk_buff int data_len = skb->len, ret; short vid __maybe_unused = -1; bool do_bcast = false; - unsigned long brd_delay = 1;
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) goto dropped; @@@ -199,9 -197,6 +197,6 @@@ if (!primary_if) goto dropped;
- if (dat_snoop_outgoing_arp_request(bat_priv, skb)) - brd_delay = msecs_to_jiffies(ARP_REQ_DELAY); - if (my_skb_head_push(skb, sizeof(*bcast_packet)) < 0) goto dropped;
@@@ -221,7 -216,7 +216,7 @@@ bcast_packet->seqno = htonl(atomic_inc_return(&bat_priv->bcast_seqno));
- add_bcast_packet_to_list(bat_priv, skb, brd_delay); + add_bcast_packet_to_list(bat_priv, skb, 1);
/* a copy is stored in the bcast list, therefore removing * the original skb. */ @@@ -235,8 -230,6 +230,6 @@@ goto dropped; }
- dat_snoop_outgoing_arp_reply(bat_priv, skb); - ret = unicast_send_skb(skb, bat_priv); if (ret != 0) goto dropped_freed; @@@ -269,12 -262,6 +262,6 @@@ void interface_rx(struct net_device *so if (!pskb_may_pull(skb, hdr_size)) goto dropped;
- if (dat_snoop_incoming_arp_request(bat_priv, skb, hdr_size)) - goto out; - - if (dat_snoop_incoming_arp_reply(bat_priv, skb, hdr_size)) - goto out; - skb_pull_rcsum(skb, hdr_size); skb_reset_mac_header(skb);
@@@ -381,8 -368,6 +368,6 @@@ struct net_device *softif_create(const goto free_soft_iface; }
- arp_change_timeout(soft_iface, name); - bat_priv = netdev_priv(soft_iface);
atomic_set(&bat_priv->aggregated_ogms, 1); diff --combined net/batman-adv/types.h index 6474324,61308e8..61308e8 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@@ -27,17 -27,6 +27,6 @@@ #include "packet.h" #include "bitarray.h"
- #ifdef CONFIG_BATMAN_ADV_DAT - - /* dat_addr_t is the type used for all DHT addresses. If it is changed, - * DAT_ADDR_MAX is changed as well. - * - * *Please be careful: dat_addr_t must be UNSIGNED* - */ - #define dat_addr_t uint16_t - - #endif /* CONFIG_BATMAN_ADV_DAT */ - #define BAT_HEADER_LEN (ETH_HLEN + \ ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \ sizeof(struct unicast_packet) : \ @@@ -78,9 -67,6 +67,6 @@@ struct hard_iface struct orig_node { uint8_t orig[ETH_ALEN]; uint8_t primary_addr[ETH_ALEN]; - #ifdef CONFIG_BATMAN_ADV_DAT - dat_addr_t dht_addr; - #endif struct neigh_node __rcu *router; /* rcu protected pointer */ unsigned long *bcast_own; uint8_t *bcast_own_sum; @@@ -235,9 -221,6 +221,6 @@@ struct bat_priv struct gw_node __rcu *curr_gw; /* rcu protected pointer */ atomic_t gw_reselect; struct hard_iface __rcu *primary_if; /* rcu protected pointer */ - #ifdef CONFIG_BATMAN_ADV_DAT - dat_addr_t dht_addr; - #endif struct vis_info *my_vis_info; struct bat_algo_ops *bat_algo_ops; }; @@@ -411,9 -394,4 +394,4 @@@ struct bat_algo_ops void (*bat_ogm_emit)(struct forw_packet *forw_packet); };
- struct dht_candidate { - int type; - struct orig_node *orig_node; - }; - #endif /* _NET_BATMAN_ADV_TYPES_H_ */ diff --combined net/batman-adv/unicast.c index e5c7999,74175c2..74175c2 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c @@@ -283,78 -283,13 +283,13 @@@ out return ret; }
- static bool pull_and_fill_unicast(struct sk_buff *skb, int hdr_size, - struct orig_node *orig_node) - { - struct unicast_packet *unicast_packet; - - if (my_skb_head_push(skb, hdr_size) < 0) - return false; - - unicast_packet = (struct unicast_packet *)skb->data; - unicast_packet->header.version = COMPAT_VERSION; - /* batman packet type: unicast */ - unicast_packet->header.packet_type = BAT_UNICAST; - /* set unicast ttl */ - unicast_packet->header.ttl = TTL; - /* copy the destination for faster routing */ - memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); - /* set the destination tt version number */ - unicast_packet->ttvn = - (uint8_t)atomic_read(&orig_node->last_ttvn); - - return true; - } - - static bool prepare_unicast_packet(struct sk_buff *skb, - struct orig_node *orig_node) - { - return pull_and_fill_unicast(skb, sizeof(struct unicast_packet), - orig_node); - } - - bool prepare_unicast_4addr_packet(struct bat_priv *bat_priv, - struct sk_buff *skb, - struct orig_node *orig_node, - int packet_subtype) - { - struct hard_iface *primary_if; - struct unicast_4addr_packet *unicast_4addr_packet; - bool ret = false; - - primary_if = primary_if_get_selected(bat_priv); - if (!primary_if) - goto out; - - /* pull the header space and fill the unicast_packet substructure. - * We can do that because the first member of the unicast_4addr_packet - * is of type struct unicast_packet - */ - if (!pull_and_fill_unicast(skb, sizeof(*unicast_4addr_packet), - orig_node)) - goto out; - - unicast_4addr_packet = (struct unicast_4addr_packet *)skb->data; - unicast_4addr_packet->u.header.packet_type = BAT_UNICAST_4ADDR; - memcpy(unicast_4addr_packet->src, primary_if->net_dev->dev_addr, - ETH_ALEN); - unicast_4addr_packet->subtype = packet_subtype; - - ret = true; - out: - if (primary_if) - hardif_free_ref(primary_if); - return ret; - } - - int unicast_generic_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, - int packet_type, int packet_subtype) + int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) { struct ethhdr *ethhdr = (struct ethhdr *)skb->data; + struct unicast_packet *unicast_packet; struct orig_node *orig_node; struct neigh_node *neigh_node; int data_len = skb->len; - struct unicast_packet *unicast_packet; int ret = 1;
/* get routing information */ @@@ -368,6 -303,7 +303,7 @@@ * returns NULL in case of AP isolation */ orig_node = transtable_search(bat_priv, ethhdr->h_source, ethhdr->h_dest); + find_router: /** * find_router(): @@@ -375,26 -311,26 +311,26 @@@ * - increases neigh_nodes refcount if found. */ neigh_node = find_router(bat_priv, orig_node, NULL); + if (!neigh_node) goto out;
- switch (packet_type) { - case BAT_UNICAST: - prepare_unicast_packet(skb, orig_node); - break; - case BAT_UNICAST_4ADDR: - prepare_unicast_4addr_packet(bat_priv, skb, orig_node, - packet_subtype); - break; - default: - /* this function supports UNICAST and UNICAST_4ADDR only. It - * should never be invoked with any other packet type - */ + if (my_skb_head_push(skb, sizeof(*unicast_packet)) < 0) goto out;
unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet->header.version = COMPAT_VERSION; + /* batman packet type: unicast */ + unicast_packet->header.packet_type = BAT_UNICAST; + /* set unicast ttl */ + unicast_packet->header.ttl = TTL; + /* copy the destination for faster routing */ + memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); + /* set the destination tt version number */ + unicast_packet->ttvn = + (uint8_t)atomic_read(&orig_node->last_ttvn); + /* inform the destination node that we are still missing a correct route * for this client. The destination will receive this packet and will * try to reroute it because the ttvn contained in the header is less @@@ -403,9 -339,7 +339,7 @@@ if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) unicast_packet->ttvn = unicast_packet->ttvn - 1;
- /* fragmentation mechanism only works for UNICAST (now) */ - if (packet_type == BAT_UNICAST && - atomic_read(&bat_priv->fragmentation) && + if (atomic_read(&bat_priv->fragmentation) && data_len + sizeof(*unicast_packet) > neigh_node->if_incoming->net_dev->mtu) { /* send frag skb decreases ttl */ @@@ -417,6 -351,7 +351,7 @@@
send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = 0; + goto out;
out: if (neigh_node) diff --combined net/batman-adv/unicast.h index e15aa62,a9faf6b..a9faf6b --- a/net/batman-adv/unicast.h +++ b/net/batman-adv/unicast.h @@@ -30,28 -30,9 +30,9 @@@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, struct sk_buff **new_skb); void frag_list_free(struct list_head *head); + int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, struct hard_iface *hard_iface, const uint8_t dstaddr[]); - bool prepare_unicast_4addr_packet(struct bat_priv *bat_priv, - struct sk_buff *skb, - struct orig_node *orig_node, - int packet_subtype); - int unicast_generic_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, - int packet_type, int packet_subtype); - - static inline int unicast_send_skb(struct sk_buff *skb, - struct bat_priv *bat_priv) - { - return unicast_generic_send_skb(skb, bat_priv, BAT_UNICAST, 0); - } - - static inline int unicast_4addr_send_skb(struct sk_buff *skb, - struct bat_priv *bat_priv, - int packet_subtype) - { - return unicast_generic_send_skb(skb, bat_priv, BAT_UNICAST_4ADDR, - packet_subtype); - }
static inline int frag_can_reassemble(const struct sk_buff *skb, int mtu) {