The following commit has been merged in the master branch: commit b124f413323e90398b868a9848e63149d0fed8ce Merge: 8f3dbfd79ed9ef9770305a7cc4e13dfd31ad2cd0 1a9070ec91b37234fe915849b767c61584c64a44 Author: David S. Miller davem@davemloft.net Date: Thu Mar 16 12:05:38 2017 -0700
Merge tag 'batadv-net-for-davem-20170316' of git://git.open-mesh.org/linux-merge
Simon Wunderlich says:
==================== Here are two batman-adv bugfixes:
- Keep fragments equally sized, avoids some problems with too small fragments, by Sven Eckelmann
- Initialize gateway class correctly when BATMAN V is compiled in, by Sven Eckelmann ====================
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined net/batman-adv/bat_iv_ogm.c index 7c3d994,7bfd0d7..71343d0 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@@ -1,4 -1,4 +1,4 @@@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@@ -2477,6 -2477,16 +2477,16 @@@ static void batadv_iv_iface_activate(st batadv_iv_ogm_schedule(hard_iface); }
+ /** + * batadv_iv_init_sel_class - initialize GW selection class + * @bat_priv: the bat priv with all the soft interface information + */ + static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv) + { + /* set default TQ difference threshold to 20 */ + atomic_set(&bat_priv->gw.sel_class, 20); + } + static struct batadv_gw_node * batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv) { @@@ -2823,6 -2833,7 +2833,7 @@@ static struct batadv_algo_ops batadv_ba .del_if = batadv_iv_ogm_orig_del_if, }, .gw = { + .init_sel_class = batadv_iv_init_sel_class, .get_best_gw_node = batadv_iv_gw_get_best_gw_node, .is_eligible = batadv_iv_gw_is_eligible, #ifdef CONFIG_BATMAN_ADV_DEBUGFS diff --combined net/batman-adv/bat_v.c index 0acd081,2e2471c..a36c8e7 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c @@@ -1,4 -1,4 +1,4 @@@ -/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors: * * Linus Lüssing, Marek Lindner * @@@ -668,6 -668,16 +668,16 @@@ err_ifinfo1 return ret; }
+ /** + * batadv_v_init_sel_class - initialize GW selection class + * @bat_priv: the bat priv with all the soft interface information + */ + static void batadv_v_init_sel_class(struct batadv_priv *bat_priv) + { + /* set default throughput difference threshold to 5Mbps */ + atomic_set(&bat_priv->gw.sel_class, 50); + } + static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv, char *buff, size_t count) { @@@ -1052,6 -1062,7 +1062,7 @@@ static struct batadv_algo_ops batadv_ba .dump = batadv_v_orig_dump, }, .gw = { + .init_sel_class = batadv_v_init_sel_class, .store_sel_class = batadv_v_store_sel_class, .show_sel_class = batadv_v_show_sel_class, .get_best_gw_node = batadv_v_gw_get_best_gw_node, @@@ -1092,9 -1103,6 +1103,6 @@@ int batadv_v_mesh_init(struct batadv_pr if (ret < 0) return ret;
- /* set default throughput difference threshold to 5Mbps */ - atomic_set(&bat_priv->gw.sel_class, 50); - return 0; }
diff --combined net/batman-adv/fragmentation.c index 11a23fd,106bda5..8f964be --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@@ -1,4 -1,4 +1,4 @@@ -/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors: * * Martin Hundebøll martin@hundeboll.net * @@@ -404,7 -404,7 +404,7 @@@ out * batadv_frag_create - create a fragment from skb * @skb: skb to create fragment from * @frag_head: header to use in new fragment - * @mtu: size of new fragment + * @fragment_size: size of new fragment * * Split the passed skb into two fragments: A new one with size matching the * passed mtu and the old one with the rest. The new skb contains data from the @@@ -414,11 -414,11 +414,11 @@@ */ static struct sk_buff *batadv_frag_create(struct sk_buff *skb, struct batadv_frag_packet *frag_head, - unsigned int mtu) + unsigned int fragment_size) { struct sk_buff *skb_fragment; unsigned int header_size = sizeof(*frag_head); - unsigned int fragment_size = mtu - header_size; + unsigned int mtu = fragment_size + header_size;
skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); if (!skb_fragment) @@@ -456,7 -456,7 +456,7 @@@ int batadv_frag_send_packet(struct sk_b struct sk_buff *skb_fragment; unsigned int mtu = neigh_node->if_incoming->net_dev->mtu; unsigned int header_size = sizeof(frag_header); - unsigned int max_fragment_size, max_packet_size; + unsigned int max_fragment_size, num_fragments; int ret;
/* To avoid merge and refragmentation at next-hops we never send @@@ -464,10 -464,15 +464,15 @@@ */ mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE); max_fragment_size = mtu - header_size; - max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; + + if (skb->len == 0 || max_fragment_size == 0) + return -EINVAL; + + num_fragments = (skb->len - 1) / max_fragment_size + 1; + max_fragment_size = (skb->len - 1) / num_fragments + 1;
/* Don't even try to fragment, if we need more than 16 fragments */ - if (skb->len > max_packet_size) { + if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) { ret = -EAGAIN; goto free_skb; } @@@ -507,7 -512,8 +512,8 @@@ goto put_primary_if; }
- skb_fragment = batadv_frag_create(skb, &frag_header, mtu); + skb_fragment = batadv_frag_create(skb, &frag_header, + max_fragment_size); if (!skb_fragment) { ret = -ENOMEM; goto put_primary_if; diff --combined net/batman-adv/gateway_common.c index 5db2e43,3e3f91a..33940c5 --- a/net/batman-adv/gateway_common.c +++ b/net/batman-adv/gateway_common.c @@@ -1,4 -1,4 +1,4 @@@ -/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors: * * Marek Lindner * @@@ -253,6 -253,11 +253,11 @@@ static void batadv_gw_tvlv_ogm_handler_ */ void batadv_gw_init(struct batadv_priv *bat_priv) { + if (bat_priv->algo_ops->gw.init_sel_class) + bat_priv->algo_ops->gw.init_sel_class(bat_priv); + else + atomic_set(&bat_priv->gw.sel_class, 1); + batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1, NULL, BATADV_TVLV_GW, 1, BATADV_TVLV_HANDLER_OGM_CIFNOTFND); diff --combined net/batman-adv/soft-interface.c index 5d099b2,2e0b346..d042c99 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@@ -1,4 -1,4 +1,4 @@@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@@ -258,8 -258,7 +258,8 @@@ static int batadv_interface_tx(struct s ethhdr = eth_hdr(skb);
/* Register the client MAC in the transtable */ - if (!is_multicast_ether_addr(ethhdr->h_source)) { + if (!is_multicast_ether_addr(ethhdr->h_source) && + !batadv_bla_is_loopdetect_mac(ethhdr->h_source)) { client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source, vid, skb->skb_iif, skb->mark); @@@ -482,6 -481,8 +482,6 @@@ void batadv_interface_rx(struct net_dev batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN);
- soft_iface->last_rx = jiffies; - /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ @@@ -819,7 -820,6 +819,6 @@@ static int batadv_softif_init_late(stru atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0); #endif atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF); - atomic_set(&bat_priv->gw.sel_class, 20); atomic_set(&bat_priv->gw.bandwidth_down, 100); atomic_set(&bat_priv->gw.bandwidth_up, 20); atomic_set(&bat_priv->orig_interval, 1000); diff --combined net/batman-adv/types.h index 66b25e4,5137d85..246f21b --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@@ -1,4 -1,4 +1,4 @@@ -/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@@ -402,7 -402,7 +402,7 @@@ struct batadv_gw_node struct rcu_head rcu; };
-DECLARE_EWMA(throughput, 1024, 8) +DECLARE_EWMA(throughput, 10, 8)
/** * struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor @@@ -1489,6 -1489,7 +1489,7 @@@ struct batadv_algo_orig_ops
/** * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific) + * @init_sel_class: initialize GW selection class (optional) * @store_sel_class: parse and stores a new GW selection class (optional) * @show_sel_class: prints the current GW selection class (optional) * @get_best_gw_node: select the best GW from the list of available nodes @@@ -1499,6 -1500,7 +1500,7 @@@ * @dump: dump gateways to a netlink socket (optional) */ struct batadv_algo_gw_ops { + void (*init_sel_class)(struct batadv_priv *bat_priv); ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff, size_t count); ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);