Hi everyone,
here's the third iteration of the multicast patches which is mainly some bugfixing, smaller but also slightly larger restructuring. Thanks for the feedback again!
Cheers, Linus
= Changelog v3 =
* rebasing to commit [8bb3d9422707a7814ac804acbc00569dd017288c] * 4B-aligning for 'struct mcast_entry', adding __packed attribute [01/19] * converted mcast_forw_table and subsequent lists to hlists [10/19], [11/19], [12/19], [13/19] * splitted mcast_forw_table_seq_print_text into several functions [11/19] * shorter rcu-locking when printing to sysfs, only in seq_print_if_entry [11/19] * add missing spin_lock initilization [10/19] * fix logic for break_flag in tracker_packet_for_each() macro [07/19] * compat.h, should be "#define MC_LIST struct netdev_hw_addr" instead [03/19] * adding mcast duplicate check seqno lock [17/19] * fixed rcu-locking in route_mcast_tracker_packet(), in fact, using refcounting there now [07/19] * restructuring route_mcast_tracker_packet(), merging two loops * also delete dest from tracker packet if unknown originator in zero_tracker_packet() [07/19] * compactify check-code in zero_tracker_packet() [07/19] * don't use index i like nexthop_tracker_packets[i] in skb sending loop in route_mcast_tracker_packet(), it has the wrong base [07/19] * use hash_find() instead of own orig_node look-up in zero_tracker_packt() and add_router_of_dest() [07/19] * reserve dest_entries on heap instead of stack, >4k is too much for the stack [07/19] * rename dest_entries_list to dest_entries_buckets, trying to clarify its purpose [07/19] * add comment for find_mca_match() [07/19] * move comments for zero_tracker_packet() and shrink_tracker_packet() to the correct patches [10/19] * remove wrong comment for sync_orig() (duplicate of sync_table()) [10/19] * remove variable name defines for net_device multicast lists [03/19] * directly create & use SKBs for tracker_packets [07/19], [09/19] * moving stuff from mcast_proact_tracker_prepare() to build_tracker_packet_skb() [07/19] * only create methods / variables / defines / macros in patches that need them (this changed the BAT_MCAST/MCAST_TRACKER define numbers) -[01/19], -[03/19] * use rcu-locking for mcast_forw_table, +[18]
This commit adds the needed configurable variables in bat_priv and according user interfaces in sysfs for the future multicast optimizations.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- Makefile.kbuild | 1 + bat_sysfs.c | 160 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ multicast.c | 121 +++++++++++++++++++++++++++++++++++++++++ multicast.h | 30 ++++++++++ packet.h | 4 ++ soft-interface.c | 4 ++ types.h | 4 ++ 7 files changed, 324 insertions(+), 0 deletions(-) create mode 100644 multicast.c create mode 100644 multicast.h
diff --git a/batman-adv/Makefile.kbuild b/batman-adv/Makefile.kbuild index 0a35006..1272d0d 100644 --- a/batman-adv/Makefile.kbuild +++ b/batman-adv/Makefile.kbuild @@ -49,5 +49,6 @@ batman-adv-y += send.o batman-adv-y += soft-interface.o batman-adv-y += translation-table.o batman-adv-y += unicast.o +batman-adv-y += multicast.o batman-adv-y += vis.o batman-adv-y += bat_printk.o diff --git a/batman-adv/bat_sysfs.c b/batman-adv/bat_sysfs.c index f7b93a0..67adb35 100644 --- a/batman-adv/bat_sysfs.c +++ b/batman-adv/bat_sysfs.c @@ -27,6 +27,7 @@ #include "gateway_common.h" #include "gateway_client.h" #include "vis.h" +#include "multicast.h"
#define to_dev(obj) container_of(obj, struct device, kobj) #define kobj_to_netdev(obj) to_net_dev(to_dev(obj->parent)) @@ -356,6 +357,153 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr, return gw_bandwidth_set(net_dev, buff, count); }
+static ssize_t show_mcast_mode(struct kobject *kobj, struct attribute *attr, + char *buff) +{ + struct device *dev = to_dev(kobj->parent); + struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev)); + int mcast_mode = atomic_read(&bat_priv->mcast_mode); + int ret; + + switch (mcast_mode) { + case MCAST_MODE_CLASSIC_FLOODING: + ret = sprintf(buff, "classic_flooding\n"); + break; + case MCAST_MODE_PROACT_TRACKING: + ret = sprintf(buff, "proactive_tracking\n"); + break; + default: + ret = -1; + break; + } + + return ret; +} + +static ssize_t store_mcast_mode(struct kobject *kobj, struct attribute *attr, + char *buff, size_t count) +{ + struct device *dev = to_dev(kobj->parent); + struct net_device *net_dev = to_net_dev(dev); + struct bat_priv *bat_priv = netdev_priv(net_dev); + unsigned long val; + int ret, mcast_mode_tmp = -1; + + ret = strict_strtoul(buff, 10, &val); + + if (((count == 2) && (!ret) && (val == MCAST_MODE_CLASSIC_FLOODING)) || + (strncmp(buff, "classic_flooding", 16) == 0)) + mcast_mode_tmp = MCAST_MODE_CLASSIC_FLOODING; + + if (((count == 2) && (!ret) && (val == MCAST_MODE_PROACT_TRACKING)) || + (strncmp(buff, "proact_tracking", 15) == 0)) + mcast_mode_tmp = MCAST_MODE_PROACT_TRACKING; + + if (mcast_mode_tmp < 0) { + if (buff[count - 1] == '\n') + buff[count - 1] = '\0'; + + bat_info(net_dev, + "Invalid parameter for 'mcast mode' setting received: " + "%s\n", buff); + return -EINVAL; + } + + if (atomic_read(&bat_priv->mcast_mode) == mcast_mode_tmp) + return count; + + bat_info(net_dev, "Changing mcast mode from: %s to: %s\n", + atomic_read(&bat_priv->mcast_mode) == + MCAST_MODE_CLASSIC_FLOODING ? + "classic_flooding" : "proact_tracking", + mcast_mode_tmp == MCAST_MODE_CLASSIC_FLOODING ? + "classic_flooding" : "proact_tracking"); + + atomic_set(&bat_priv->mcast_mode, (unsigned)mcast_mode_tmp); + return count; +} + +static ssize_t show_mcast_tracker_interval(struct kobject *kobj, + struct attribute *attr, char *buff) +{ + struct device *dev = to_dev(kobj->parent); + struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev)); + int tracker_interval = atomic_read(&bat_priv->mcast_tracker_interval); + + if (!tracker_interval) + return sprintf(buff, "auto\n"); + else + return sprintf(buff, "%i\n", tracker_interval); +} + +static ssize_t store_mcast_tracker_interval(struct kobject *kobj, + struct attribute *attr, char *buff, size_t count) +{ + struct device *dev = to_dev(kobj->parent); + struct net_device *net_dev = to_net_dev(dev); + + return mcast_tracker_interval_set(net_dev, buff, count); +} + +static ssize_t show_mcast_tracker_timeout(struct kobject *kobj, + struct attribute *attr, char *buff) +{ + struct device *dev = to_dev(kobj->parent); + struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev)); + int tracker_timeout = atomic_read(&bat_priv->mcast_tracker_timeout); + + if (!tracker_timeout) + return sprintf(buff, "auto\n"); + else + return sprintf(buff, "%i\n", tracker_timeout); +} + +static ssize_t store_mcast_tracker_timeout(struct kobject *kobj, + struct attribute *attr, char *buff, size_t count) +{ + struct device *dev = to_dev(kobj->parent); + struct net_device *net_dev = to_net_dev(dev); + + return mcast_tracker_timeout_set(net_dev, buff, count); +} + +static ssize_t show_mcast_fanout(struct kobject *kobj, + struct attribute *attr, char *buff) +{ + struct device *dev = to_dev(kobj->parent); + struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev)); + + return sprintf(buff, "%i\n", + atomic_read(&bat_priv->mcast_fanout)); +} + +static ssize_t store_mcast_fanout(struct kobject *kobj, + struct attribute *attr, char *buff, size_t count) +{ + struct device *dev = to_dev(kobj->parent); + struct net_device *net_dev = to_net_dev(dev); + struct bat_priv *bat_priv = netdev_priv(net_dev); + unsigned long mcast_fanout_tmp; + int ret; + + ret = strict_strtoul(buff, 10, &mcast_fanout_tmp); + if (ret) { + bat_info(net_dev, "Invalid parameter for 'mcast_fanout' " + "setting received: %s\n", buff); + return -EINVAL; + } + + if (atomic_read(&bat_priv->mcast_fanout) == mcast_fanout_tmp) + return count; + + bat_info(net_dev, "Changing mcast fanout interval from: %i to: %li\n", + atomic_read(&bat_priv->mcast_fanout), + mcast_fanout_tmp); + + atomic_set(&bat_priv->mcast_fanout, mcast_fanout_tmp); + return count; +} + BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); @@ -367,6 +515,14 @@ BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, post_gw_deselect); static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, store_gw_bwidth); +static BAT_ATTR(mcast_mode, S_IRUGO | S_IWUSR, + show_mcast_mode, store_mcast_mode); +static BAT_ATTR(mcast_tracker_interval, S_IRUGO | S_IWUSR, + show_mcast_tracker_interval, store_mcast_tracker_interval); +static BAT_ATTR(mcast_tracker_timeout, S_IRUGO | S_IWUSR, + show_mcast_tracker_timeout, store_mcast_tracker_timeout); +static BAT_ATTR(mcast_fanout, S_IRUGO | S_IWUSR, + show_mcast_fanout, store_mcast_fanout); #ifdef CONFIG_BATMAN_ADV_DEBUG BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 3, NULL); #endif @@ -381,6 +537,10 @@ static struct bat_attribute *mesh_attrs[] = { &bat_attr_hop_penalty, &bat_attr_gw_sel_class, &bat_attr_gw_bandwidth, + &bat_attr_mcast_mode, + &bat_attr_mcast_tracker_interval, + &bat_attr_mcast_tracker_timeout, + &bat_attr_mcast_fanout, #ifdef CONFIG_BATMAN_ADV_DEBUG &bat_attr_log_level, #endif diff --git a/batman-adv/multicast.c b/batman-adv/multicast.c new file mode 100644 index 0000000..0598873 --- /dev/null +++ b/batman-adv/multicast.c @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2010 B.A.T.M.A.N. contributors: + * + * Linus Lüssing + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "multicast.h" + +int mcast_tracker_interval_set(struct net_device *net_dev, char *buff, + size_t count) +{ + struct bat_priv *bat_priv = netdev_priv(net_dev); + unsigned long new_tracker_interval; + int cur_tracker_interval; + int ret; + + ret = strict_strtoul(buff, 10, &new_tracker_interval); + + if (ret && !strncmp(buff, "auto", 4)) { + new_tracker_interval = 0; + goto ok; + } + + else if (ret) { + bat_info(net_dev, "Invalid parameter for " + "'mcast_tracker_interval' setting received: %s\n", + buff); + return -EINVAL; + } + + if (new_tracker_interval < JITTER) { + bat_info(net_dev, "New mcast tracker interval too small: %li " + "(min: %i or auto)\n", new_tracker_interval, JITTER); + return -EINVAL; + } + +ok: + cur_tracker_interval = atomic_read(&bat_priv->mcast_tracker_interval); + + if (cur_tracker_interval == new_tracker_interval) + return count; + + if (!cur_tracker_interval && new_tracker_interval) + bat_info(net_dev, "Tracker interval change from: %s to: %li\n", + "auto", new_tracker_interval); + else if (cur_tracker_interval && !new_tracker_interval) + bat_info(net_dev, "Tracker interval change from: %i to: %s\n", + cur_tracker_interval, "auto"); + else + bat_info(net_dev, "Tracker interval change from: %i to: %li\n", + cur_tracker_interval, new_tracker_interval); + + atomic_set(&bat_priv->mcast_tracker_interval, new_tracker_interval); + + return count; +} + +int mcast_tracker_timeout_set(struct net_device *net_dev, char *buff, + size_t count) +{ + struct bat_priv *bat_priv = netdev_priv(net_dev); + unsigned long new_tracker_timeout; + int cur_tracker_timeout; + int ret; + + ret = strict_strtoul(buff, 10, &new_tracker_timeout); + + if (ret && !strncmp(buff, "auto", 4)) { + new_tracker_timeout = 0; + goto ok; + } + + else if (ret) { + bat_info(net_dev, "Invalid parameter for " + "'mcast_tracker_timeout' setting received: %s\n", + buff); + return -EINVAL; + } + + if (new_tracker_timeout < JITTER) { + bat_info(net_dev, "New mcast tracker timeout too small: %li " + "(min: %i or auto)\n", new_tracker_timeout, JITTER); + return -EINVAL; + } + +ok: + cur_tracker_timeout = atomic_read(&bat_priv->mcast_tracker_timeout); + + if (cur_tracker_timeout == new_tracker_timeout) + return count; + + if (!cur_tracker_timeout && new_tracker_timeout) + bat_info(net_dev, "Tracker timeout change from: %s to: %li\n", + "auto", new_tracker_timeout); + else if (cur_tracker_timeout && !new_tracker_timeout) + bat_info(net_dev, "Tracker timeout change from: %i to: %s\n", + cur_tracker_timeout, "auto"); + else + bat_info(net_dev, "Tracker timeout change from: %i to: %li\n", + cur_tracker_timeout, new_tracker_timeout); + + atomic_set(&bat_priv->mcast_tracker_timeout, new_tracker_timeout); + + return count; +} diff --git a/batman-adv/multicast.h b/batman-adv/multicast.h new file mode 100644 index 0000000..12a3376 --- /dev/null +++ b/batman-adv/multicast.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2010 B.A.T.M.A.N. contributors: + * + * Linus Lüssing + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_MULTICAST_H_ +#define _NET_BATMAN_ADV_MULTICAST_H_ + +int mcast_tracker_interval_set(struct net_device *net_dev, char *buff, + size_t count); +int mcast_tracker_timeout_set(struct net_device *net_dev, char *buff, + size_t count); + +#endif /* _NET_BATMAN_ADV_MULTICAST_H_ */ diff --git a/batman-adv/packet.h b/batman-adv/packet.h index e757187..eef5371 100644 --- a/batman-adv/packet.h +++ b/batman-adv/packet.h @@ -48,6 +48,10 @@ #define VIS_TYPE_SERVER_SYNC 0 #define VIS_TYPE_CLIENT_UPDATE 1
+/* mcast defines */ +#define MCAST_MODE_CLASSIC_FLOODING 0 +#define MCAST_MODE_PROACT_TRACKING 1 + /* fragmentation defines */ #define UNI_FRAG_HEAD 0x01 #define UNI_FRAG_LARGETAIL 0x02 diff --git a/batman-adv/soft-interface.c b/batman-adv/soft-interface.c index 145e0f7..f25fe9d 100644 --- a/batman-adv/soft-interface.c +++ b/batman-adv/soft-interface.c @@ -597,6 +597,10 @@ struct net_device *softif_create(char *name) atomic_set(&bat_priv->gw_bandwidth, 41); atomic_set(&bat_priv->orig_interval, 1000); atomic_set(&bat_priv->hop_penalty, 10); + atomic_set(&bat_priv->mcast_mode, MCAST_MODE_CLASSIC_FLOODING); + atomic_set(&bat_priv->mcast_tracker_interval, 0); /* = auto */ + atomic_set(&bat_priv->mcast_tracker_timeout, 0); /* = auto */ + atomic_set(&bat_priv->mcast_fanout, 2); atomic_set(&bat_priv->log_level, 0); atomic_set(&bat_priv->fragmentation, 1); atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN); diff --git a/batman-adv/types.h b/batman-adv/types.h index e4a0462..47490fa 100644 --- a/batman-adv/types.h +++ b/batman-adv/types.h @@ -139,6 +139,10 @@ struct bat_priv { atomic_t gw_bandwidth; /* gw bandwidth */ atomic_t orig_interval; /* uint */ atomic_t hop_penalty; /* uint */ + atomic_t mcast_mode; /* MCAST_MODE_* */ + atomic_t mcast_tracker_interval;/* uint, auto */ + atomic_t mcast_tracker_timeout; /* uint, auto */ + atomic_t mcast_fanout; /* uint */ atomic_t log_level; /* uint */ atomic_t bcast_seqno; atomic_t bcast_queue_left;
This patch introduces multicast announcements - MCA for short - which are now being attached to an OGM if an optimized multicast mode needing MCAs has been selected (i.e. proactive_tracking).
MCA entries are multicast mac addresses used by a multicast receiver in the mesh cloud. Currently MCAs are only fetched locally from the according batman interface itself, bridged-in hosts will not yet get announced and will need a more complex patch for supporting IGMP/MLD snooping. However, the local fetching also allows to have multicast optimizations on layer 2 already for batman nodes, not depending on IP at all.
This patch increases the COMPAT_VERSION.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- aggregation.c | 12 +++++++- aggregation.h | 6 +++- compat.h | 44 +++++++++++++++++++++++++++++ hard-interface.c | 1 + main.h | 2 + packet.h | 4 +- send.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++------- 7 files changed, 132 insertions(+), 17 deletions(-)
diff --git a/batman-adv/aggregation.c b/batman-adv/aggregation.c index 1997725..0251d17 100644 --- a/batman-adv/aggregation.c +++ b/batman-adv/aggregation.c @@ -30,6 +30,12 @@ static int hna_len(struct batman_packet *batman_packet) return batman_packet->num_hna * ETH_ALEN; }
+/* calculate the size of the mca information for a given packet */ +static int mca_len(struct batman_packet *batman_packet) +{ + return batman_packet->num_mca * ETH_ALEN; +} + /* return true if new_packet can be aggregated with forw_packet */ static bool can_aggregate_with(struct batman_packet *new_batman_packet, int packet_len, @@ -265,9 +271,11 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, hna_buff, hna_len(batman_packet), if_incoming);
- buff_pos += BAT_PACKET_LEN + hna_len(batman_packet); + buff_pos += BAT_PACKET_LEN + hna_len(batman_packet) + + mca_len(batman_packet); batman_packet = (struct batman_packet *) (packet_buff + buff_pos); } while (aggregated_packet(buff_pos, packet_len, - batman_packet->num_hna)); + batman_packet->num_hna, + batman_packet->num_mca)); } diff --git a/batman-adv/aggregation.h b/batman-adv/aggregation.h index 6ce305b..a4551f8 100644 --- a/batman-adv/aggregation.h +++ b/batman-adv/aggregation.h @@ -25,9 +25,11 @@ #include "main.h"
/* is there another aggregated packet here? */ -static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna) +static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna, + int num_mca) { - int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_hna * ETH_ALEN); + int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_hna * ETH_ALEN) + + (num_mca * ETH_ALEN);
return (next_buff_pos <= packet_len) && (next_buff_pos <= MAX_AGGREGATION_BYTES); diff --git a/batman-adv/compat.h b/batman-adv/compat.h index a76d0be..e7b19cd 100644 --- a/batman-adv/compat.h +++ b/batman-adv/compat.h @@ -270,4 +270,48 @@ int bat_seq_printf(struct seq_file *m, const char *f, ...);
#endif /* < KERNEL_VERSION(2, 6, 33) */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) + +#define netdev_mc_count(dev) ((dev)->mc_count) +#undef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = (struct bat_dev_addr_list*)dev->mc_list; mclist; \ + mclist = (struct bat_dev_addr_list*)mclist->next) + +#endif /* < KERNEL_VERSION(2, 6, 35) */ + + +/* + * net_device - multicast list handling + * structures + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) + +/* Note, that this breaks the usage of the normal 'struct netdev_hw_addr' + * for kernels < 2.6.35 in batman-adv! */ +#define netdev_hw_addr bat_dev_addr_list +struct bat_dev_addr_list { + struct dev_addr_list *next; + u8 addr[MAX_ADDR_LEN]; + u8 da_addrlen; + u8 da_synced; + int da_users; + int da_gusers; +}; + +#endif /* < KERNEL_VERSION(2, 6, 35) */ + +/* + * net_device - multicast list handling + * locking + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) + +#define netif_addr_lock_bh(soft_iface) \ + netif_tx_lock_bh(soft_iface) +#define netif_addr_unlock_bh(soft_iface) \ + netif_tx_unlock_bh(soft_iface) + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) */ + #endif /* _NET_BATMAN_ADV_COMPAT_H_ */ diff --git a/batman-adv/hard-interface.c b/batman-adv/hard-interface.c index e2b001a..1455e7f 100644 --- a/batman-adv/hard-interface.c +++ b/batman-adv/hard-interface.c @@ -319,6 +319,7 @@ int hardif_enable_interface(struct batman_if *batman_if, char *iface_name) batman_packet->ttl = 2; batman_packet->tq = TQ_MAX_VALUE; batman_packet->num_hna = 0; + batman_packet->num_mca = 0;
batman_if->if_num = bat_priv->num_ifaces; bat_priv->num_ifaces++; diff --git a/batman-adv/main.h b/batman-adv/main.h index 1ec9986..64c1793 100644 --- a/batman-adv/main.h +++ b/batman-adv/main.h @@ -97,6 +97,8 @@ * Vis */
+#define UINT8_MAX 255 + /* * Kernel headers */ diff --git a/batman-adv/packet.h b/batman-adv/packet.h index eef5371..d77af90 100644 --- a/batman-adv/packet.h +++ b/batman-adv/packet.h @@ -32,7 +32,7 @@ #define BAT_UNICAST_FRAG 0x06
/* this file is included by batctl which needs these defines */ -#define COMPAT_VERSION 12 +#define COMPAT_VERSION 14 #define DIRECTLINK 0x40 #define VIS_SERVER 0x20 #define PRIMARIES_FIRST_HOP 0x10 @@ -66,8 +66,8 @@ struct batman_packet { uint8_t prev_sender[6]; uint8_t ttl; uint8_t num_hna; + uint8_t num_mca; uint8_t gw_flags; /* flags related to gateway class */ - uint8_t align; } __packed;
#define BAT_PACKET_LEN sizeof(struct batman_packet) diff --git a/batman-adv/send.c b/batman-adv/send.c index 7cc620e..0d4a2a6 100644 --- a/batman-adv/send.c +++ b/batman-adv/send.c @@ -122,7 +122,8 @@ static void send_packet_to_if(struct forw_packet *forw_packet, /* adjust all flags and log packets */ while (aggregated_packet(buff_pos, forw_packet->packet_len, - batman_packet->num_hna)) { + batman_packet->num_hna, + batman_packet->num_mca)) {
/* we might have aggregated direct link packets with an * ordinary base packet */ @@ -214,18 +215,69 @@ static void send_packet(struct forw_packet *forw_packet) rcu_read_unlock(); }
+static void add_own_MCA(struct batman_packet *batman_packet, int num_mca, + struct net_device *soft_iface) +{ + struct netdev_hw_addr *mc_list_entry; + int num_mca_done = 0; + char *mca_entry = (char *)(batman_packet + 1); + + if (num_mca == 0) + goto out; + + if (num_mca > UINT8_MAX) { + pr_warning("Too many multicast announcements here, " + "just adding %i\n", UINT8_MAX); + num_mca = UINT8_MAX; + } + + mca_entry = mca_entry + batman_packet->num_hna * ETH_ALEN; + + netif_addr_lock_bh(soft_iface); + netdev_for_each_mc_addr(mc_list_entry, soft_iface) { + memcpy(mca_entry, &mc_list_entry->addr, ETH_ALEN); + mca_entry += ETH_ALEN; + + /* A multicast address might just have been added, + * avoid writing outside of buffer */ + if (++num_mca_done == num_mca) + break; + } + netif_addr_unlock_bh(soft_iface); + +out: + batman_packet->num_mca = num_mca_done; +} + static void rebuild_batman_packet(struct bat_priv *bat_priv, struct batman_if *batman_if) { - int new_len; - unsigned char *new_buff; + int new_len, mcast_mode, num_mca = 0; + unsigned char *new_buff = NULL; struct batman_packet *batman_packet;
- new_len = sizeof(struct batman_packet) + - (bat_priv->num_local_hna * ETH_ALEN); - new_buff = kmalloc(new_len, GFP_ATOMIC); + batman_packet = (struct batman_packet *)batman_if->packet_buff; + mcast_mode = atomic_read(&bat_priv->mcast_mode);
- /* keep old buffer if kmalloc should fail */ + /* Avoid attaching MCAs, if multicast optimization is disabled */ + if (mcast_mode == MCAST_MODE_PROACT_TRACKING) { + netif_addr_lock_bh(batman_if->soft_iface); + num_mca = netdev_mc_count(batman_if->soft_iface); + netif_addr_unlock_bh(batman_if->soft_iface); + } + + if (atomic_read(&bat_priv->hna_local_changed) || + num_mca != batman_packet->num_mca) { + new_len = sizeof(struct batman_packet) + + (bat_priv->num_local_hna * ETH_ALEN) + + num_mca * ETH_ALEN; + new_buff = kmalloc(new_len, GFP_ATOMIC); + } + + /* + * if local hna or mca has changed but kmalloc failed + * then just keep the old buffer + */ if (new_buff) { memcpy(new_buff, batman_if->packet_buff, sizeof(struct batman_packet)); @@ -239,6 +291,13 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, batman_if->packet_buff = new_buff; batman_if->packet_len = new_len; } + + /** + * always copy mca entries (if there are any) - we have to + * traverse the list anyway, so we can just do a memcpy instead of + * memcmp for the sake of simplicity + */ + add_own_MCA(batman_packet, num_mca, batman_if->soft_iface); }
void schedule_own_packet(struct batman_if *batman_if) @@ -264,9 +323,7 @@ void schedule_own_packet(struct batman_if *batman_if) if (batman_if->if_status == IF_TO_BE_ACTIVATED) batman_if->if_status = IF_ACTIVE;
- /* if local hna has changed and interface is a primary interface */ - if ((atomic_read(&bat_priv->hna_local_changed)) && - (batman_if == bat_priv->primary_if)) + if (batman_if == bat_priv->primary_if) rebuild_batman_packet(bat_priv, batman_if);
/** @@ -359,7 +416,8 @@ void schedule_forward_packet(struct orig_node *orig_node, send_time = forward_send_time(); add_bat_packet_to_list(bat_priv, (unsigned char *)batman_packet, - sizeof(struct batman_packet) + hna_buff_len, + sizeof(struct batman_packet) + hna_buff_len + + batman_packet->num_mca * ETH_ALEN, if_incoming, 0, send_time); }
This commit adds a timer for sending periodic tracker packets (the sending is not in the scope of this patch). Furthermore, the timer gets restarted if the tracker interval gets changed or if the originator interval changed and we have selected auto mode for the tracker interval.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- bat_sysfs.c | 13 +++++++++++-- main.c | 5 +++++ multicast.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ multicast.h | 3 +++ types.h | 1 + 5 files changed, 77 insertions(+), 2 deletions(-)
diff --git a/batman-adv/bat_sysfs.c b/batman-adv/bat_sysfs.c index 67adb35..f6e918f 100644 --- a/batman-adv/bat_sysfs.c +++ b/batman-adv/bat_sysfs.c @@ -357,8 +357,16 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr, return gw_bandwidth_set(net_dev, buff, count); }
+void update_mcast_tracker(struct net_device *net_dev) +{ + struct bat_priv *bat_priv = netdev_priv(net_dev); + + if (!atomic_read(&bat_priv->mcast_tracker_interval)) + mcast_tracker_reset(bat_priv); +} + static ssize_t show_mcast_mode(struct kobject *kobj, struct attribute *attr, - char *buff) + char *buff) { struct device *dev = to_dev(kobj->parent); struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev)); @@ -509,7 +517,8 @@ BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode); -BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL); +BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, + update_mcast_tracker); BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL); BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, post_gw_deselect); diff --git a/batman-adv/main.c b/batman-adv/main.c index e687e7f..4233c7b 100644 --- a/batman-adv/main.c +++ b/batman-adv/main.c @@ -32,6 +32,7 @@ #include "gateway_client.h" #include "types.h" #include "vis.h" +#include "multicast.h" #include "hash.h"
struct list_head if_list; @@ -108,6 +109,9 @@ int mesh_init(struct net_device *soft_iface) if (vis_init(bat_priv) < 1) goto err;
+ if (mcast_init(bat_priv) < 1) + goto err; + atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); goto end;
@@ -138,6 +142,7 @@ void mesh_free(struct net_device *soft_iface) hna_global_free(bat_priv);
softif_neigh_purge(bat_priv); + mcast_free(bat_priv);
atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); } diff --git a/batman-adv/multicast.c b/batman-adv/multicast.c index 0598873..cc83937 100644 --- a/batman-adv/multicast.c +++ b/batman-adv/multicast.c @@ -22,6 +22,48 @@ #include "main.h" #include "multicast.h"
+/* how long to wait until sending a multicast tracker packet */ +static int tracker_send_delay(struct bat_priv *bat_priv) +{ + int tracker_interval = atomic_read(&bat_priv->mcast_tracker_interval); + + /* auto mode, set to 1/2 ogm interval */ + if (!tracker_interval) + tracker_interval = atomic_read(&bat_priv->orig_interval) / 2; + + /* multicast tracker packets get half as much jitter as ogms as they're + * limited down to JITTER and not JITTER*2 */ + return msecs_to_jiffies(tracker_interval - + JITTER/2 + (random32() % JITTER)); +} + +static void start_mcast_tracker(struct bat_priv *bat_priv) +{ + /* adding some jitter */ + unsigned long tracker_interval = tracker_send_delay(bat_priv); + queue_delayed_work(bat_event_workqueue, &bat_priv->mcast_tracker_work, + tracker_interval); +} + +static void stop_mcast_tracker(struct bat_priv *bat_priv) +{ + cancel_delayed_work_sync(&bat_priv->mcast_tracker_work); +} + +void mcast_tracker_reset(struct bat_priv *bat_priv) +{ + stop_mcast_tracker(bat_priv); + start_mcast_tracker(bat_priv); +} + +static void mcast_tracker_timer(struct work_struct *work) +{ + struct bat_priv *bat_priv = container_of(work, struct bat_priv, + mcast_tracker_work.work); + + start_mcast_tracker(bat_priv); +} + int mcast_tracker_interval_set(struct net_device *net_dev, char *buff, size_t count) { @@ -68,6 +110,8 @@ ok:
atomic_set(&bat_priv->mcast_tracker_interval, new_tracker_interval);
+ mcast_tracker_reset(bat_priv); + return count; }
@@ -119,3 +163,16 @@ ok:
return count; } + +int mcast_init(struct bat_priv *bat_priv) +{ + INIT_DELAYED_WORK(&bat_priv->mcast_tracker_work, mcast_tracker_timer); + start_mcast_tracker(bat_priv); + + return 1; +} + +void mcast_free(struct bat_priv *bat_priv) +{ + stop_mcast_tracker(bat_priv); +} diff --git a/batman-adv/multicast.h b/batman-adv/multicast.h index 12a3376..26ce6d8 100644 --- a/batman-adv/multicast.h +++ b/batman-adv/multicast.h @@ -26,5 +26,8 @@ int mcast_tracker_interval_set(struct net_device *net_dev, char *buff, size_t count); int mcast_tracker_timeout_set(struct net_device *net_dev, char *buff, size_t count); +void mcast_tracker_reset(struct bat_priv *bat_priv); +int mcast_init(struct bat_priv *bat_priv); +void mcast_free(struct bat_priv *bat_priv);
#endif /* _NET_BATMAN_ADV_MULTICAST_H_ */ diff --git a/batman-adv/types.h b/batman-adv/types.h index 47490fa..6bd74c1 100644 --- a/batman-adv/types.h +++ b/batman-adv/types.h @@ -175,6 +175,7 @@ struct bat_priv { struct delayed_work hna_work; struct delayed_work orig_work; struct delayed_work vis_work; + struct delayed_work mcast_tracker_work; struct gw_node *curr_gw; struct vis_info *my_vis_info; };
We need to memorize the MCA information attached to the OGMs to be able to prepare the tracker packets with them later.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- originator.c | 7 ++++++- routing.c | 40 +++++++++++++++++++++++++++++++++++++--- routing.h | 2 +- types.h | 2 ++ 4 files changed, 46 insertions(+), 5 deletions(-)
diff --git a/batman-adv/originator.c b/batman-adv/originator.c index e8a8473..637d5f1 100644 --- a/batman-adv/originator.c +++ b/batman-adv/originator.c @@ -139,6 +139,8 @@ void orig_node_free_ref(struct kref *refcount) hna_global_del_orig(orig_node->bat_priv, orig_node, "originator timed out");
+ kfree(orig_node->mca_buff); + kfree(orig_node->bcast_own); kfree(orig_node->bcast_own_sum); kfree(orig_node); @@ -228,6 +230,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr) memcpy(orig_node->orig, addr, ETH_ALEN); orig_node->router = NULL; orig_node->hna_buff = NULL; + orig_node->mca_buff = NULL; + orig_node->num_mca = 0; orig_node->bcast_seqno_reset = jiffies - 1 - msecs_to_jiffies(RESET_PROTECTION_MS); orig_node->batman_seqno_reset = jiffies - 1 @@ -341,7 +345,8 @@ static bool purge_orig_node(struct bat_priv *bat_priv, update_routes(bat_priv, orig_node, best_neigh_node, orig_node->hna_buff, - orig_node->hna_buff_len); + orig_node->hna_buff_len, + orig_node->mca_buff, orig_node->num_mca); } }
diff --git a/batman-adv/routing.c b/batman-adv/routing.c index 2861f18..81719d9 100644 --- a/batman-adv/routing.c +++ b/batman-adv/routing.c @@ -85,6 +85,34 @@ static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node, } }
+/* Copy the mca buffer again if something has changed */ +static void update_MCA(struct orig_node *orig_node, + unsigned char *mca_buff, int num_mca) +{ + /* numbers differ? then reallocate buffer */ + if (num_mca != orig_node->num_mca) { + kfree(orig_node->mca_buff); + if (num_mca > 0) { + orig_node->mca_buff = + kmalloc(num_mca * ETH_ALEN, GFP_ATOMIC); + if (orig_node->mca_buff) + goto update; + } + orig_node->mca_buff = NULL; + orig_node->num_mca = 0; + /* size ok, just update? */ + } else if (num_mca > 0 && + memcmp(orig_node->mca_buff, mca_buff, num_mca * ETH_ALEN)) + goto update; + + /* it's the same, leave it like that */ + return; + +update: + memcpy(orig_node->mca_buff, mca_buff, num_mca * ETH_ALEN); + orig_node->num_mca = num_mca; +} + static void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, struct neigh_node *neigh_node, @@ -129,7 +157,7 @@ static void update_route(struct bat_priv *bat_priv,
void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, struct neigh_node *neigh_node, unsigned char *hna_buff, - int hna_buff_len) + int hna_buff_len, unsigned char *mca_buff, int num_mca) {
if (!orig_node) @@ -141,6 +169,8 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, /* may be just HNA changed */ else update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len); + + update_MCA(orig_node, mca_buff, num_mca); }
static int is_bidirectional_neigh(struct orig_node *orig_node, @@ -374,6 +404,7 @@ static void update_orig(struct bat_priv *bat_priv, struct hlist_node *node; int tmp_hna_buff_len; uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; + unsigned char *mca_buff;
bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " "Searching and updating originator entry of received packet\n"); @@ -433,6 +464,7 @@ static void update_orig(struct bat_priv *bat_priv,
tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ? batman_packet->num_hna * ETH_ALEN : hna_buff_len); + mca_buff = (char *)batman_packet + BAT_PACKET_LEN + tmp_hna_buff_len;
/* if this neighbor already is our next hop there is nothing * to change */ @@ -465,12 +497,14 @@ static void update_orig(struct bat_priv *bat_priv, }
update_routes(bat_priv, orig_node, neigh_node, - hna_buff, tmp_hna_buff_len); + hna_buff, tmp_hna_buff_len, mca_buff, + batman_packet->num_mca); goto update_gw;
update_hna: update_routes(bat_priv, orig_node, orig_node->router, - hna_buff, tmp_hna_buff_len); + hna_buff, tmp_hna_buff_len, mca_buff, + batman_packet->num_mca);
update_gw: if (orig_node->gw_flags != batman_packet->gw_flags) diff --git a/batman-adv/routing.h b/batman-adv/routing.h index 8b76424..d138644 100644 --- a/batman-adv/routing.h +++ b/batman-adv/routing.h @@ -31,7 +31,7 @@ void receive_bat_packet(struct ethhdr *ethhdr, struct batman_if *if_incoming); void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, struct neigh_node *neigh_node, unsigned char *hna_buff, - int hna_buff_len); + int hna_buff_len, unsigned char *mca_buff, int num_mca); int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, int hdr_size); int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if); diff --git a/batman-adv/types.h b/batman-adv/types.h index 6bd74c1..f24798e 100644 --- a/batman-adv/types.h +++ b/batman-adv/types.h @@ -77,6 +77,8 @@ struct orig_node { uint8_t flags; unsigned char *hna_buff; int16_t hna_buff_len; + unsigned char *mca_buff; + uint8_t num_mca; uint32_t last_real_seqno; uint8_t last_ttl; unsigned long bcast_bits[NUM_WORDS];
This commit introduces batman multicast tracker packets. Their job is, to mark nodes responsible for forwarding multicast data later (so a multicast receiver will not be marked, only the forwarding nodes).
When having activated the proact_tracking multicast mode, a path between all multicast _receivers_ of a group will be marked - in fact, in this mode BATMAN will assume, that a multicast receiver is also a multicast sender, therefore a multicast sender should also join the same multicast group.
The advantage of this is less complexity and the paths are marked in advance before an actual data packet has been sent, decreasing delays. The disadvantage is higher protocol overhead.
One large tracker packet will be created on a generating node first, which then gets split for every necessary next hop destination.
This commit does not add forwarding of tracker packets but just local generation and local sending of them.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- multicast.c | 506 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ multicast.h | 2 + packet.h | 29 +++- 3 files changed, 531 insertions(+), 6 deletions(-)
diff --git a/batman-adv/multicast.c b/batman-adv/multicast.c index cc83937..f414394 100644 --- a/batman-adv/multicast.c +++ b/batman-adv/multicast.c @@ -21,6 +21,82 @@
#include "main.h" #include "multicast.h" +#include "hash.h" +#include "send.h" +#include "hard-interface.h" +#include "originator.h" +#include "compat.h" + +struct tracker_packet_state { + int mcast_num, dest_num; + struct mcast_entry *mcast_entry; + uint8_t *dest_entry; + int break_flag; +}; + +static void init_state_mcast_entry(struct tracker_packet_state *state, + struct mcast_tracker_packet *tracker_packet) +{ + state->mcast_num = 0; + state->mcast_entry = (struct mcast_entry *)(tracker_packet + 1); + state->dest_entry = (uint8_t *)(state->mcast_entry + 1); + state->break_flag = 0; +} + +static int check_state_mcast_entry(struct tracker_packet_state *state, + struct mcast_tracker_packet *tracker_packet) +{ + if (state->mcast_num < tracker_packet->num_mcast_entries && + !state->break_flag) + return 1; + + return 0; +} + +static void inc_state_mcast_entry(struct tracker_packet_state *state) +{ + if (state->break_flag) + return; + + state->mcast_num++; + state->mcast_entry = (struct mcast_entry *)state->dest_entry; + state->dest_entry = (uint8_t *)(state->mcast_entry + 1); +} + +static void init_state_dest_entry(struct tracker_packet_state *state) +{ + state->dest_num = 0; + state->break_flag = 1; +} + +static int check_state_dest_entry(struct tracker_packet_state *state) +{ + if (state->dest_num < state->mcast_entry->num_dest) + return 1; + + state->break_flag = 0; + return 0; +} + +static void inc_state_dest_entry(struct tracker_packet_state *state) +{ + state->dest_num++; + state->dest_entry += ETH_ALEN; +} + +#define tracker_packet_for_each_dest(state, tracker_packet) \ + for (init_state_mcast_entry(state, tracker_packet); \ + check_state_mcast_entry(state, tracker_packet); \ + inc_state_mcast_entry(state)) \ + for (init_state_dest_entry(state); \ + check_state_dest_entry(state); \ + inc_state_dest_entry(state)) + +struct dest_entries_list { + struct list_head list; + uint8_t dest[6]; + struct batman_if *batman_if; +};
/* how long to wait until sending a multicast tracker packet */ static int tracker_send_delay(struct bat_priv *bat_priv) @@ -56,11 +132,441 @@ void mcast_tracker_reset(struct bat_priv *bat_priv) start_mcast_tracker(bat_priv); }
+/** + * Searches if a certain multicast address of another originator is also + * one of ours. + * + * Returns -1 if no match could be found. Otherwise returns the number of + * the element in our mc_addr_list which matches. + * + * @orig_node: the originator we are refering to + * @mca_pos: refers to the specific multicast address in orig_node's + * mca buffer which we are trying to find a match for + * @mc_addr_list: a list of our own multicast addresses + * @num_mcast_entries: the number of our own multicast addresses + */ +static inline int find_mca_match(struct orig_node *orig_node, + int mca_pos, uint8_t *mc_addr_list, int num_mcast_entries) +{ + int pos; + + for (pos = 0; pos < num_mcast_entries; pos++) + if (!memcmp(&mc_addr_list[pos*ETH_ALEN], + &orig_node->mca_buff[ETH_ALEN*mca_pos], ETH_ALEN)) + return pos; + return -1; +} + +static struct sk_buff *build_tracker_packet_skb(int tracker_packet_len, + int used_mcast_entries, + struct bat_priv *bat_priv) +{ + struct sk_buff *skb; + struct mcast_tracker_packet *tracker_packet; + + skb = dev_alloc_skb(tracker_packet_len + sizeof(struct ethhdr)); + if (!skb) + return NULL; + + skb_reserve(skb, sizeof(struct ethhdr)); + tracker_packet = (struct mcast_tracker_packet*) skb_put(skb, tracker_packet_len); + + tracker_packet->packet_type = BAT_MCAST_TRACKER; + tracker_packet->version = COMPAT_VERSION; + memcpy(tracker_packet->orig, bat_priv->primary_if->net_dev->dev_addr, + ETH_ALEN); + tracker_packet->ttl = TTL; + tracker_packet->num_mcast_entries = (used_mcast_entries > UINT8_MAX) ? + UINT8_MAX : used_mcast_entries; + memset(tracker_packet->align, 0, sizeof(tracker_packet->align)); + + return skb; +} + +/** + * Prepares a multicast tracker packet on a multicast member with all its + * groups and their members attached. Note, that the proactive tracking + * mode does not differentiate between multicast senders and receivers, + * resulting in tracker packets between each node. + * + * Returns NULL if this node is not a member of any group or if there are + * no other members in its groups. + * + * @bat_priv: bat_priv for the mesh we are preparing this packet + */ +static struct sk_buff *mcast_proact_tracker_prepare(struct bat_priv *bat_priv) +{ + struct net_device *soft_iface = bat_priv->primary_if->soft_iface; + uint8_t *mc_addr_list; + struct netdev_hw_addr *mc_entry; + struct element_t *bucket; + struct orig_node *orig_node; + struct hashtable_t *hash = bat_priv->orig_hash; + struct hlist_node *walk; + struct hlist_head *head; + int i, tracker_packet_len; + + /* one dest_entries_buckets[x] per multicast group, + * they'll collect dest_entries[y] items */ + int num_mcast_entries, used_mcast_entries = 0; + struct list_head *dest_entries_buckets; + struct dest_entries_list *dest_entries, *dest, *tmp; + int dest_entries_total = 0; + + uint8_t *dest_entry; + int pos, mca_pos; + struct sk_buff *skb; + struct mcast_entry *mcast_entry; + + if (!hash) + goto out; + + dest_entries = kmalloc(sizeof(struct dest_entries_list) * UINT8_MAX, + GFP_ATOMIC); + if (!dest_entries) + goto out; + + /* Make a copy so we don't have to rush because of locking */ + netif_addr_lock_bh(soft_iface); + num_mcast_entries = netdev_mc_count(soft_iface); + mc_addr_list = kmalloc(ETH_ALEN * num_mcast_entries, GFP_ATOMIC); + if (!mc_addr_list) { + netif_addr_unlock_bh(soft_iface); + goto free; + } + pos = 0; + netdev_for_each_mc_addr(mc_entry, soft_iface) { + memcpy(&mc_addr_list[pos * ETH_ALEN], mc_entry->addr, + ETH_ALEN); + pos++; + } + netif_addr_unlock_bh(soft_iface); + + if (num_mcast_entries > UINT8_MAX) + num_mcast_entries = UINT8_MAX; + dest_entries_buckets = kmalloc(num_mcast_entries * + sizeof(struct list_head), GFP_ATOMIC); + if (!dest_entries_buckets) + goto free2; + + for (pos = 0; pos < num_mcast_entries; pos++) + INIT_LIST_HEAD(&dest_entries_buckets[pos]); + + /* Collect items from every orig_node's mca buffer if matching one of + * our own multicast groups' address in a dest_entries[x] and throw + * them in the according multicast group buckets + * (dest_entries_buckets[y]) */ + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(bucket, walk, head, hlist) { + orig_node = bucket->data; + if (!orig_node->num_mca) + continue; + + for (mca_pos = 0; mca_pos < orig_node->num_mca && + dest_entries_total != UINT8_MAX; mca_pos++) { + pos = find_mca_match(orig_node, mca_pos, + mc_addr_list, num_mcast_entries); + if (pos > num_mcast_entries || pos < 0) + continue; + memcpy(dest_entries[dest_entries_total].dest, + orig_node->orig, ETH_ALEN); + list_add( + &dest_entries[dest_entries_total].list, + &dest_entries_buckets[pos]); + + dest_entries_total++; + } + } + rcu_read_unlock(); + } + + /* Any list left empty? */ + for (pos = 0; pos < num_mcast_entries; pos++) + if (!list_empty(&dest_entries_buckets[pos])) + used_mcast_entries++; + + if (!used_mcast_entries) + goto free_all; + + /* prepare tracker packet, finally! */ + tracker_packet_len = sizeof(struct mcast_tracker_packet) + + used_mcast_entries * sizeof(struct mcast_entry) + + ETH_ALEN * dest_entries_total; + if (tracker_packet_len > ETH_DATA_LEN) { + pr_warning("mcast tracker packet got too large (%i Bytes), " + "forcing reduced size of %i Bytes\n", + tracker_packet_len, ETH_DATA_LEN); + tracker_packet_len = ETH_DATA_LEN; + } + + skb = build_tracker_packet_skb(tracker_packet_len, used_mcast_entries, + bat_priv); + if (!skb) + goto free_all; + + /* append all collected entries */ + mcast_entry = (struct mcast_entry *) + (skb->data + sizeof(struct mcast_tracker_packet)); + for (pos = 0; pos < num_mcast_entries; pos++) { + if (list_empty(&dest_entries_buckets[pos])) + continue; + + if ((unsigned char *)(mcast_entry + 1) <= + skb_tail_pointer(skb)) { + memcpy(mcast_entry->mcast_addr, + &mc_addr_list[pos*ETH_ALEN], ETH_ALEN); + mcast_entry->num_dest = 0; + mcast_entry->align = 0; + } + + dest_entry = (uint8_t *)(mcast_entry + 1); + list_for_each_entry_safe(dest, tmp, &dest_entries_buckets[pos], + list) { + /* still place for a dest_entry left? + * watch out for overflow here, stop at UINT8_MAX */ + if ((unsigned char *)dest_entry + ETH_ALEN <= + skb_tail_pointer(skb) && + mcast_entry->num_dest != UINT8_MAX) { + mcast_entry->num_dest++; + memcpy(dest_entry, dest->dest, ETH_ALEN); + dest_entry += ETH_ALEN; + } + list_del(&dest->list); + } + /* still space for another mcast_entry left? */ + if ((unsigned char *)(mcast_entry + 1) <= + skb_tail_pointer(skb)) + mcast_entry = (struct mcast_entry *)dest_entry; + } + + + /* outstanding cleanup */ +free_all: + kfree(dest_entries_buckets); +free2: + kfree(mc_addr_list); +free: + kfree(dest_entries); +out: + + return skb; +} + +/* Adds the router for the destination address to the next_hop list and its + * interface to the forw_if_list - but only if this router has not been + * added yet */ +static int add_router_of_dest(struct dest_entries_list *next_hops, + uint8_t *dest, struct bat_priv *bat_priv) +{ + struct dest_entries_list *next_hop_tmp, *next_hop_entry; + struct orig_node *orig_node; + int ret = 1; + + + next_hop_entry = kmalloc(sizeof(struct dest_entries_list), GFP_ATOMIC); + if (!next_hop_entry) + goto out; + + rcu_read_lock(); + orig_node = hash_find(bat_priv->orig_hash, compare_orig, choose_orig, + dest); + if (!orig_node || !orig_node->router || + !orig_node->router->if_incoming) { + rcu_read_unlock(); + goto free; + } + + memcpy(next_hop_entry->dest, orig_node->router->addr, + ETH_ALEN); + next_hop_entry->batman_if = orig_node->router->if_incoming; + kref_get(&next_hop_entry->batman_if->refcount); + rcu_read_unlock(); + + list_for_each_entry(next_hop_tmp, &next_hops->list, list) + if (!memcmp(next_hop_tmp->dest, next_hop_entry->dest, + ETH_ALEN)) + goto kref_free; + + list_add(&next_hop_entry->list, &next_hops->list); + + ret = 0; + goto out; + +kref_free: + kref_put(&next_hop_entry->batman_if->refcount, hardif_free_ref); +free: + kfree(next_hop_entry); +out: + return ret; +} + +/* Collect nexthops for all dest entries specified in this tracker packet */ +static int tracker_next_hops(struct mcast_tracker_packet *tracker_packet, + struct dest_entries_list *next_hops, + struct bat_priv *bat_priv) +{ + int num_next_hops = 0, ret; + struct tracker_packet_state state; + + INIT_LIST_HEAD(&next_hops->list); + + tracker_packet_for_each_dest(&state, tracker_packet) { + ret = add_router_of_dest(next_hops, state.dest_entry, + bat_priv); + if (!ret) + num_next_hops++; + } + + return num_next_hops; +} + +/* Zero destination entries not destined for the specified next hop in the + * tracker packet */ +static void zero_tracker_packet(struct mcast_tracker_packet *tracker_packet, + uint8_t *next_hop, struct bat_priv *bat_priv) +{ + struct tracker_packet_state state; + struct orig_node *orig_node; + + tracker_packet_for_each_dest(&state, tracker_packet) { + rcu_read_lock(); + orig_node = hash_find(bat_priv->orig_hash, compare_orig, + choose_orig, state.dest_entry); + + /* we don't know this destination */ + if (!orig_node || + /* is the next hop already our destination? */ + !memcmp(orig_node->orig, next_hop, ETH_ALEN) || + !orig_node->router || + !memcmp(orig_node->router->orig_node->primary_addr, + orig_node->orig, ETH_ALEN) || + /* is this the wrong next hop for our + * destination? */ + memcmp(orig_node->router->addr, next_hop, ETH_ALEN)) + memset(state.dest_entry, '\0', ETH_ALEN); + + rcu_read_unlock(); + } +} + +/* Remove zeroed destination entries and empty multicast entries in tracker + * packet */ +static void shrink_tracker_packet(struct sk_buff *skb) +{ + struct mcast_tracker_packet *tracker_packet = + (struct mcast_tracker_packet*)skb->data; + struct tracker_packet_state state; + unsigned char *tail = skb_tail_pointer(skb); + int new_tracker_packet_len = sizeof(struct mcast_tracker_packet); + + tracker_packet_for_each_dest(&state, tracker_packet) { + if (memcmp(state.dest_entry, "\0\0\0\0\0\0", ETH_ALEN)) { + new_tracker_packet_len += ETH_ALEN; + continue; + } + + memmove(state.dest_entry, state.dest_entry + ETH_ALEN, + tail - state.dest_entry - ETH_ALEN); + + state.mcast_entry->num_dest--; + tail -= ETH_ALEN; + + if (state.mcast_entry->num_dest) { + state.dest_num--; + state.dest_entry -= ETH_ALEN; + continue; + } + + /* = mcast_entry */ + state.dest_entry -= sizeof(struct mcast_entry); + + memmove(state.dest_entry, state.dest_entry + + sizeof(struct mcast_entry), + tail - state.dest_entry - sizeof(struct mcast_entry)); + + tracker_packet->num_mcast_entries--; + tail -= sizeof(struct mcast_entry); + + state.mcast_num--; + + /* Avoid mcast_entry check of tracker_packet_for_each_dest's + * inner loop */ + state.break_flag = 0; + break; + } + + new_tracker_packet_len += sizeof(struct mcast_entry) * + tracker_packet->num_mcast_entries; + + skb_trim(skb, new_tracker_packet_len); +} + + +/** + * Sends (splitted parts of) a multicast tracker packet on the according + * interfaces. + * + * @tracker_packet: A compact multicast tracker packet with all groups and + * destinations attached. + */ +void route_mcast_tracker_packet(struct sk_buff *skb, + struct bat_priv *bat_priv) +{ + struct dest_entries_list next_hops, *tmp; + struct dest_entries_list *next_hop; + struct sk_buff *skb_tmp; + int num_next_hops; + + num_next_hops = tracker_next_hops((struct mcast_tracker_packet*) + skb->data, &next_hops, bat_priv); + if (!num_next_hops) + return; + + list_for_each_entry(next_hop, &next_hops.list, list) { + skb_tmp = skb_copy(skb, GFP_ATOMIC); + if (!skb_tmp) + goto free; + + /* cut the tracker packets for the according destinations */ + zero_tracker_packet((struct mcast_tracker_packet*) + skb_tmp->data, next_hop->dest, bat_priv); + shrink_tracker_packet(skb_tmp); + if (skb_tmp->len == sizeof(struct mcast_tracker_packet)) { + dev_kfree_skb(skb_tmp); + continue; + } + + /* Send 'em! */ + send_skb_packet(skb_tmp, next_hop->batman_if, next_hop->dest); + } + +free: + list_for_each_entry_safe(next_hop, tmp, &next_hops.list, list) { + kref_put(&next_hop->batman_if->refcount, hardif_free_ref); + list_del(&next_hop->list); + kfree(next_hop); + } +} + static void mcast_tracker_timer(struct work_struct *work) { struct bat_priv *bat_priv = container_of(work, struct bat_priv, mcast_tracker_work.work); + struct sk_buff *tracker_packet = NULL;
+ if (atomic_read(&bat_priv->mcast_mode) == MCAST_MODE_PROACT_TRACKING) + tracker_packet = mcast_proact_tracker_prepare(bat_priv); + + if (!tracker_packet) + goto out; + + route_mcast_tracker_packet(tracker_packet, bat_priv); + dev_kfree_skb(tracker_packet); + +out: start_mcast_tracker(bat_priv); }
diff --git a/batman-adv/multicast.h b/batman-adv/multicast.h index 26ce6d8..d1a3e83 100644 --- a/batman-adv/multicast.h +++ b/batman-adv/multicast.h @@ -27,6 +27,8 @@ int mcast_tracker_interval_set(struct net_device *net_dev, char *buff, int mcast_tracker_timeout_set(struct net_device *net_dev, char *buff, size_t count); void mcast_tracker_reset(struct bat_priv *bat_priv); +void route_mcast_tracker_packet(struct sk_buff *tracker_packet, + struct bat_priv *bat_priv); int mcast_init(struct bat_priv *bat_priv); void mcast_free(struct bat_priv *bat_priv);
diff --git a/batman-adv/packet.h b/batman-adv/packet.h index d77af90..6f42a2f 100644 --- a/batman-adv/packet.h +++ b/batman-adv/packet.h @@ -24,12 +24,13 @@
#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
-#define BAT_PACKET 0x01 -#define BAT_ICMP 0x02 -#define BAT_UNICAST 0x03 -#define BAT_BCAST 0x04 -#define BAT_VIS 0x05 -#define BAT_UNICAST_FRAG 0x06 +#define BAT_PACKET 0x01 +#define BAT_ICMP 0x02 +#define BAT_UNICAST 0x03 +#define BAT_BCAST 0x04 +#define BAT_VIS 0x05 +#define BAT_UNICAST_FRAG 0x06 +#define BAT_MCAST_TRACKER 0x07
/* this file is included by batctl which needs these defines */ #define COMPAT_VERSION 14 @@ -125,6 +126,22 @@ struct bcast_packet { uint32_t seqno; } __packed;
+/* marks the path for multicast streams */ +struct mcast_tracker_packet { + uint8_t packet_type; /* BAT_MCAST_TRACKER */ + uint8_t version; /* batman version field */ + uint8_t orig[6]; + uint8_t ttl; + uint8_t num_mcast_entries; + uint8_t align[2]; +} __packed; + +struct mcast_entry { + uint8_t mcast_addr[6]; + uint8_t num_dest; /* number of multicast data receivers */ + uint8_t align; +} __packed; + struct vis_packet { uint8_t packet_type; uint8_t version; /* batman version field */
Before/while a tracker packet is being searched for next hops for its destination entries, it will also be checked if the number of destination and mcast entries might exceed the tracker_packet_len. Otherwise we might read/write in unallocated memory. Such a broken tracker packet could potentially occure when we are going to reuse route_mcast_tracker_packet for tracker packets received from a neighbour node.
In such a case, we are just reducing the stated mcast / dest numbers in the tracker packet to fit the size of the allocated buffer.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- multicast.c | 25 +++++++++++++++++++++++-- 1 files changed, 23 insertions(+), 2 deletions(-)
diff --git a/batman-adv/multicast.c b/batman-adv/multicast.c index f414394..ffeb800 100644 --- a/batman-adv/multicast.c +++ b/batman-adv/multicast.c @@ -403,17 +403,38 @@ out: return ret; }
-/* Collect nexthops for all dest entries specified in this tracker packet */ +/* Collect nexthops for all dest entries specified in this tracker packet. + * It also reduces the number of elements in the tracker packet if they exceed + * the buffers length (e.g. because of a received, broken tracker packet) to + * avoid writing in unallocated memory. */ static int tracker_next_hops(struct mcast_tracker_packet *tracker_packet, + int tracker_packet_len, struct dest_entries_list *next_hops, struct bat_priv *bat_priv) { int num_next_hops = 0, ret; struct tracker_packet_state state; + uint8_t *tail = (uint8_t *)tracker_packet + tracker_packet_len;
INIT_LIST_HEAD(&next_hops->list);
tracker_packet_for_each_dest(&state, tracker_packet) { + /* avoid writing outside of unallocated memory later */ + if (state.dest_entry + ETH_ALEN > tail) { + bat_dbg(DBG_BATMAN, bat_priv, + "mcast tracker packet is broken, too many " + "entries claimed for its length, repairing"); + + tracker_packet->num_mcast_entries = state.mcast_num; + + if (state.dest_num) { + tracker_packet->num_mcast_entries++; + state.mcast_entry->num_dest = state.dest_num; + } + + break; + } + ret = add_router_of_dest(next_hops, state.dest_entry, bat_priv); if (!ret) @@ -521,7 +542,7 @@ void route_mcast_tracker_packet(struct sk_buff *skb, int num_next_hops;
num_next_hops = tracker_next_hops((struct mcast_tracker_packet*) - skb->data, &next_hops, bat_priv); + skb->data, skb->len, &next_hops, bat_priv); if (!num_next_hops) return;
This commit adds the ability to also forward a received multicast tracker packet (if necessary). It also makes use of the same splitting methods introduced with one of the previous commits, in case of multiple next hop destinations.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- hard-interface.c | 5 +++++ routing.c | 20 ++++++++++++++++++++ routing.h | 1 + 3 files changed, 26 insertions(+), 0 deletions(-)
diff --git a/batman-adv/hard-interface.c b/batman-adv/hard-interface.c index 1455e7f..8fa8ff7 100644 --- a/batman-adv/hard-interface.c +++ b/batman-adv/hard-interface.c @@ -631,6 +631,11 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, ret = recv_bcast_packet(skb, batman_if); break;
+ /* multicast tracker packet */ + case BAT_MCAST_TRACKER: + ret = recv_mcast_tracker_packet(skb, batman_if); + break; + /* vis packet */ case BAT_VIS: ret = recv_vis_packet(skb, batman_if); diff --git a/batman-adv/routing.c b/batman-adv/routing.c index 81719d9..60dcf39 100644 --- a/batman-adv/routing.c +++ b/batman-adv/routing.c @@ -35,6 +35,7 @@ #include "gateway_common.h" #include "gateway_client.h" #include "unicast.h" +#include "multicast.h"
void slide_own_bcast_window(struct batman_if *batman_if) { @@ -1496,6 +1497,25 @@ out: return ret; }
+int recv_mcast_tracker_packet(struct sk_buff *skb, struct batman_if *recv_if) +{ + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + int hdr_size = sizeof(struct mcast_tracker_packet); + + /* keep skb linear */ + if (skb_linearize(skb) < 0) + return NET_RX_DROP; + + if (check_unicast_packet(skb, hdr_size) < 0) + return NET_RX_DROP; + + route_mcast_tracker_packet(skb, bat_priv); + + dev_kfree_skb(skb); + + return NET_RX_SUCCESS; +} + int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if) { struct vis_packet *vis_packet; diff --git a/batman-adv/routing.h b/batman-adv/routing.h index d138644..83f2752 100644 --- a/batman-adv/routing.h +++ b/batman-adv/routing.h @@ -38,6 +38,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); +int recv_mcast_tracker_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if); struct neigh_node *find_router(struct bat_priv *bat_priv,
On reception of a multicast tracker packet (both locally generated or received over an interface), a node now memorizes its forwarding state for a tuple of multicast-group, originator, and next-hops (+ their according outgoing interface).
The first two elements are necessary to determine, whether a node shall forward a multicast data packet on reception later. The next-hop and according interface information is necessary to quickly determine, if a multicast data packet shall be forwarded via unicast to each single next hop or via broadcast.
This commit does not yet purge multicast forwarding table entries after the set tracker timeout yet.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- main.c | 1 + multicast.c | 278 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++- types.h | 2 + 3 files changed, 278 insertions(+), 3 deletions(-)
diff --git a/batman-adv/main.c b/batman-adv/main.c index 4233c7b..b2f3e62 100644 --- a/batman-adv/main.c +++ b/batman-adv/main.c @@ -83,6 +83,7 @@ int mesh_init(struct net_device *soft_iface)
spin_lock_init(&bat_priv->forw_bat_list_lock); spin_lock_init(&bat_priv->forw_bcast_list_lock); + spin_lock_init(&bat_priv->mcast_forw_table_lock); spin_lock_init(&bat_priv->hna_lhash_lock); spin_lock_init(&bat_priv->hna_ghash_lock); spin_lock_init(&bat_priv->gw_list_lock); diff --git a/batman-adv/multicast.c b/batman-adv/multicast.c index ffeb800..b1718c6 100644 --- a/batman-adv/multicast.c +++ b/batman-adv/multicast.c @@ -27,6 +27,10 @@ #include "originator.h" #include "compat.h"
+/* If auto mode for tracker timeout has been selected, + * how many times of tracker_interval to wait */ +#define TRACKER_TIMEOUT_AUTO_X 5 + struct tracker_packet_state { int mcast_num, dest_num; struct mcast_entry *mcast_entry; @@ -98,6 +102,34 @@ struct dest_entries_list { struct batman_if *batman_if; };
+ +struct mcast_forw_nexthop_entry { + struct hlist_node list; + uint8_t neigh_addr[6]; + unsigned long timeout; /* old jiffies value */ +}; + +struct mcast_forw_if_entry { + struct hlist_node list; + int16_t if_num; + int num_nexthops; + struct hlist_head mcast_nexthop_list; +}; + +struct mcast_forw_orig_entry { + struct hlist_node list; + uint8_t orig[6]; + uint32_t last_mcast_seqno; + unsigned long mcast_bits[NUM_WORDS]; + struct hlist_head mcast_if_list; +}; + +struct mcast_forw_table_entry { + struct hlist_node list; + uint8_t mcast_addr[6]; + struct hlist_head mcast_orig_list; +}; + /* how long to wait until sending a multicast tracker packet */ static int tracker_send_delay(struct bat_priv *bat_priv) { @@ -132,6 +164,222 @@ void mcast_tracker_reset(struct bat_priv *bat_priv) start_mcast_tracker(bat_priv); }
+static void prepare_forw_if_entry(struct hlist_head *forw_if_list, + int16_t if_num, uint8_t *neigh_addr) +{ + struct mcast_forw_if_entry *forw_if_entry; + struct mcast_forw_nexthop_entry *forw_nexthop_entry; + struct hlist_node *node; + + hlist_for_each_entry(forw_if_entry, node, forw_if_list, list) + if (forw_if_entry->if_num == if_num) + goto skip_create_if; + + forw_if_entry = kmalloc(sizeof(struct mcast_forw_if_entry), + GFP_ATOMIC); + if (!forw_if_entry) + return; + + forw_if_entry->if_num = if_num; + forw_if_entry->num_nexthops = 0; + INIT_HLIST_HEAD(&forw_if_entry->mcast_nexthop_list); + hlist_add_head(&forw_if_entry->list, forw_if_list); + +skip_create_if: + hlist_for_each_entry(forw_nexthop_entry, node, + &forw_if_entry->mcast_nexthop_list, list) { + if (!memcmp(forw_nexthop_entry->neigh_addr, + neigh_addr, ETH_ALEN)) + return; + } + + forw_nexthop_entry = kmalloc(sizeof(struct mcast_forw_nexthop_entry), + GFP_ATOMIC); + if (!forw_nexthop_entry && forw_if_entry->num_nexthops) + return; + else if (!forw_nexthop_entry) + goto free; + + memcpy(forw_nexthop_entry->neigh_addr, neigh_addr, ETH_ALEN); + forw_if_entry->num_nexthops++; + if (forw_if_entry->num_nexthops < 0) { + kfree(forw_nexthop_entry); + goto free; + } + + hlist_add_head(&forw_nexthop_entry->list, + &forw_if_entry->mcast_nexthop_list); + return; +free: + hlist_del(&forw_if_entry->list); + kfree(forw_if_entry); +} + +static struct hlist_head *prepare_forw_table_entry( + struct hlist_head *forw_table, + uint8_t *mcast_addr, uint8_t *orig) +{ + struct mcast_forw_table_entry *forw_table_entry; + struct mcast_forw_orig_entry *orig_entry; + + forw_table_entry = kmalloc(sizeof(struct mcast_forw_table_entry), + GFP_ATOMIC); + if (!forw_table_entry) + return NULL; + + memcpy(forw_table_entry->mcast_addr, mcast_addr, ETH_ALEN); + hlist_add_head(&forw_table_entry->list, forw_table); + + INIT_HLIST_HEAD(&forw_table_entry->mcast_orig_list); + orig_entry = kmalloc(sizeof(struct mcast_forw_orig_entry), GFP_ATOMIC); + if (!orig_entry) + goto free; + + memcpy(orig_entry->orig, orig, ETH_ALEN); + INIT_HLIST_HEAD(&orig_entry->mcast_if_list); + hlist_add_head(&orig_entry->list, &forw_table_entry->mcast_orig_list); + + return &orig_entry->mcast_if_list; + +free: + hlist_del(&forw_table_entry->list); + kfree(forw_table_entry); + return NULL; +} + +static int sync_nexthop(struct mcast_forw_nexthop_entry *sync_nexthop_entry, + struct hlist_head *nexthop_list) +{ + struct mcast_forw_nexthop_entry *nexthop_entry; + struct hlist_node *node; + int synced = 0; + + hlist_for_each_entry(nexthop_entry, node, nexthop_list, list) { + if (memcmp(sync_nexthop_entry->neigh_addr, + nexthop_entry->neigh_addr, ETH_ALEN)) + continue; + + nexthop_entry->timeout = jiffies; + hlist_del(&sync_nexthop_entry->list); + kfree(sync_nexthop_entry); + + synced = 1; + break; + } + + if (!synced) { + sync_nexthop_entry->timeout = jiffies; + hlist_add_head(&sync_nexthop_entry->list, nexthop_list); + return 1; + } + + return 0; +} + +static void sync_if(struct mcast_forw_if_entry *sync_if_entry, + struct hlist_head *if_list) +{ + struct mcast_forw_if_entry *if_entry; + struct mcast_forw_nexthop_entry *sync_nexthop_entry; + struct hlist_node *node, *node2, *node_tmp; + int synced = 0; + + hlist_for_each_entry(if_entry, node, if_list, list) { + if (sync_if_entry->if_num != if_entry->if_num) + continue; + + hlist_for_each_entry_safe(sync_nexthop_entry, node2, node_tmp, + &sync_if_entry->mcast_nexthop_list, list) + if (sync_nexthop(sync_nexthop_entry, + &if_entry->mcast_nexthop_list)) + if_entry->num_nexthops++; + + hlist_del(&sync_if_entry->list); + kfree(sync_if_entry); + + synced = 1; + break; + } + + if (!synced) + hlist_add_head(&sync_if_entry->list, if_list); +} + +static void sync_orig(struct mcast_forw_orig_entry *sync_orig_entry, + struct hlist_head *orig_list) +{ + struct mcast_forw_orig_entry *orig_entry; + struct mcast_forw_if_entry *sync_if_entry; + struct hlist_node *node, *node2, *node_tmp; + int synced = 0; + + hlist_for_each_entry(orig_entry, node, orig_list, list) { + if (memcmp(sync_orig_entry->orig, + orig_entry->orig, ETH_ALEN)) + continue; + + hlist_for_each_entry_safe(sync_if_entry, node2, node_tmp, + &sync_orig_entry->mcast_if_list, list) + sync_if(sync_if_entry, &orig_entry->mcast_if_list); + + hlist_del(&sync_orig_entry->list); + kfree(sync_orig_entry); + + synced = 1; + break; + } + + if (!synced) + hlist_add_head(&sync_orig_entry->list, orig_list); +} + + +/* syncs all multicast entries of sync_table_entry to forw_table */ +static void sync_table(struct mcast_forw_table_entry *sync_table_entry, + struct hlist_head *forw_table) +{ + struct mcast_forw_table_entry *table_entry; + struct mcast_forw_orig_entry *sync_orig_entry; + struct hlist_node *node, *node2, *node_tmp; + int synced = 0; + + hlist_for_each_entry(table_entry, node, forw_table, list) { + if (memcmp(sync_table_entry->mcast_addr, + table_entry->mcast_addr, ETH_ALEN)) + continue; + + hlist_for_each_entry_safe(sync_orig_entry, node2, node_tmp, + &sync_table_entry->mcast_orig_list, list) + sync_orig(sync_orig_entry, + &table_entry->mcast_orig_list); + + hlist_del(&sync_table_entry->list); + kfree(sync_table_entry); + + synced = 1; + break; + } + + if (!synced) + hlist_add_head(&sync_table_entry->list, forw_table); +} + +/* Updates the old multicast forwarding table with the information gained + * from the generated/received tracker packet. It also frees the generated + * table for syncing (*forw_table). */ +static void update_mcast_forw_table(struct hlist_head *forw_table, + struct bat_priv *bat_priv) +{ + struct mcast_forw_table_entry *sync_table_entry; + struct hlist_node *node, *node_tmp; + + spin_lock_bh(&bat_priv->mcast_forw_table_lock); + hlist_for_each_entry_safe(sync_table_entry, node, node_tmp, forw_table, + list) + sync_table(sync_table_entry, &bat_priv->mcast_forw_table); + spin_unlock_bh(&bat_priv->mcast_forw_table_lock); +} + /** * Searches if a certain multicast address of another originator is also * one of ours. @@ -359,9 +607,12 @@ out: * interface to the forw_if_list - but only if this router has not been * added yet */ static int add_router_of_dest(struct dest_entries_list *next_hops, - uint8_t *dest, struct bat_priv *bat_priv) + uint8_t *dest, + struct hlist_head *forw_if_list, + struct bat_priv *bat_priv) { struct dest_entries_list *next_hop_tmp, *next_hop_entry; + int16_t if_num; struct orig_node *orig_node; int ret = 1;
@@ -382,9 +633,14 @@ static int add_router_of_dest(struct dest_entries_list *next_hops, memcpy(next_hop_entry->dest, orig_node->router->addr, ETH_ALEN); next_hop_entry->batman_if = orig_node->router->if_incoming; + if_num = next_hop_entry->batman_if->if_num; kref_get(&next_hop_entry->batman_if->refcount); rcu_read_unlock();
+ if (forw_if_list) + prepare_forw_if_entry(forw_if_list, if_num, + next_hop_entry->dest); + list_for_each_entry(next_hop_tmp, &next_hops->list, list) if (!memcmp(next_hop_tmp->dest, next_hop_entry->dest, ETH_ALEN)) @@ -410,13 +666,16 @@ out: static int tracker_next_hops(struct mcast_tracker_packet *tracker_packet, int tracker_packet_len, struct dest_entries_list *next_hops, + struct hlist_head *forw_table, struct bat_priv *bat_priv) { int num_next_hops = 0, ret; struct tracker_packet_state state; uint8_t *tail = (uint8_t *)tracker_packet + tracker_packet_len; + struct hlist_head *forw_table_if = NULL;
INIT_LIST_HEAD(&next_hops->list); + INIT_HLIST_HEAD(forw_table);
tracker_packet_for_each_dest(&state, tracker_packet) { /* avoid writing outside of unallocated memory later */ @@ -435,8 +694,15 @@ static int tracker_next_hops(struct mcast_tracker_packet *tracker_packet, break; }
+ if (state.dest_num) + goto skip; + + forw_table_if = prepare_forw_table_entry(forw_table, + state.mcast_entry->mcast_addr, + tracker_packet->orig); +skip: ret = add_router_of_dest(next_hops, state.dest_entry, - bat_priv); + forw_table_if, bat_priv); if (!ret) num_next_hops++; } @@ -538,14 +804,18 @@ void route_mcast_tracker_packet(struct sk_buff *skb, { struct dest_entries_list next_hops, *tmp; struct dest_entries_list *next_hop; + struct hlist_head forw_table; struct sk_buff *skb_tmp; int num_next_hops;
num_next_hops = tracker_next_hops((struct mcast_tracker_packet*) - skb->data, skb->len, &next_hops, bat_priv); + skb->data, skb->len, &next_hops, + &forw_table, bat_priv); if (!num_next_hops) return;
+ update_mcast_forw_table(&forw_table, bat_priv); + list_for_each_entry(next_hop, &next_hops.list, list) { skb_tmp = skb_copy(skb, GFP_ATOMIC); if (!skb_tmp) @@ -694,6 +964,8 @@ ok: int mcast_init(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->mcast_tracker_work, mcast_tracker_timer); + INIT_HLIST_HEAD(&bat_priv->mcast_forw_table); + start_mcast_tracker(bat_priv);
return 1; diff --git a/batman-adv/types.h b/batman-adv/types.h index f24798e..0acfd2e 100644 --- a/batman-adv/types.h +++ b/batman-adv/types.h @@ -160,6 +160,7 @@ struct bat_priv { struct hlist_head forw_bcast_list; struct hlist_head gw_list; struct list_head vis_send_list; + struct hlist_head mcast_forw_table; struct hashtable_t *orig_hash; struct hashtable_t *hna_local_hash; struct hashtable_t *hna_global_hash; @@ -172,6 +173,7 @@ struct bat_priv { spinlock_t vis_hash_lock; /* protects vis_hash */ spinlock_t vis_list_lock; /* protects vis_info::recv_list */ spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ + spinlock_t mcast_forw_table_lock; /* protects mcast_forw_table */ int16_t num_local_hna; atomic_t hna_local_changed; struct delayed_work hna_work;
With this commit the full multicast forwarding table, which is used for determining whether to forward a multicast data packet or not, can now be displayed via mcast_forw_table in BATMAN's debugfs directory.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- bat_debugfs.c | 9 +++++ multicast.c | 103 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ multicast.h | 1 + 3 files changed, 113 insertions(+), 0 deletions(-)
diff --git a/batman-adv/bat_debugfs.c b/batman-adv/bat_debugfs.c index 0e9d435..30cd4e9 100644 --- a/batman-adv/bat_debugfs.c +++ b/batman-adv/bat_debugfs.c @@ -32,6 +32,7 @@ #include "soft-interface.h" #include "vis.h" #include "icmp_socket.h" +#include "multicast.h"
static struct dentry *bat_debugfs;
@@ -250,6 +251,12 @@ static int transtable_local_open(struct inode *inode, struct file *file) return single_open(file, hna_local_seq_print_text, net_dev); }
+static int mcast_forw_table_open(struct inode *inode, struct file *file) +{ + struct net_device *net_dev = (struct net_device *)inode->i_private; + return single_open(file, mcast_forw_table_seq_print_text, net_dev); +} + static int vis_data_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; @@ -278,6 +285,7 @@ static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open); static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open); static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open); static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open); +static BAT_DEBUGINFO(mcast_forw_table, S_IRUGO, mcast_forw_table_open); static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
static struct bat_debuginfo *mesh_debuginfos[] = { @@ -286,6 +294,7 @@ static struct bat_debuginfo *mesh_debuginfos[] = { &bat_debuginfo_softif_neigh, &bat_debuginfo_transtable_global, &bat_debuginfo_transtable_local, + &bat_debuginfo_mcast_forw_table, &bat_debuginfo_vis_data, NULL, }; diff --git a/batman-adv/multicast.c b/batman-adv/multicast.c index b1718c6..b53825f 100644 --- a/batman-adv/multicast.c +++ b/batman-adv/multicast.c @@ -164,6 +164,24 @@ void mcast_tracker_reset(struct bat_priv *bat_priv) start_mcast_tracker(bat_priv); }
+static inline int get_remaining_timeout( + struct mcast_forw_nexthop_entry *nexthop_entry, + struct bat_priv *bat_priv) +{ + int tracker_timeout = atomic_read(&bat_priv->mcast_tracker_timeout); + if (!tracker_timeout) + tracker_timeout = atomic_read(&bat_priv->mcast_tracker_interval) + * TRACKER_TIMEOUT_AUTO_X; + if (!tracker_timeout) + tracker_timeout = atomic_read(&bat_priv->orig_interval) + * TRACKER_TIMEOUT_AUTO_X / 2; + + tracker_timeout = jiffies_to_msecs(nexthop_entry->timeout) + + tracker_timeout - jiffies_to_msecs(jiffies); + + return (tracker_timeout > 0 ? tracker_timeout : 0); +} + static void prepare_forw_if_entry(struct hlist_head *forw_if_list, int16_t if_num, uint8_t *neigh_addr) { @@ -961,6 +979,91 @@ ok: return count; }
+static inline struct batman_if *if_num_to_batman_if(int16_t if_num) +{ + struct batman_if *batman_if; + + list_for_each_entry_rcu(batman_if, &if_list, list) + if (batman_if->if_num == if_num) + return batman_if; + + return NULL; +} + +static void seq_print_if_entry(struct mcast_forw_if_entry *if_entry, + struct bat_priv *bat_priv, struct seq_file *seq) +{ + struct mcast_forw_nexthop_entry *nexthop_entry; + struct hlist_node *node; + struct batman_if *batman_if; + + rcu_read_lock(); + batman_if = if_num_to_batman_if(if_entry->if_num); + if (!batman_if) { + rcu_read_unlock(); + return; + } + + seq_printf(seq, "\t\t%s\n", batman_if->net_dev->name); + rcu_read_unlock(); + + hlist_for_each_entry(nexthop_entry, node, + &if_entry->mcast_nexthop_list, list) + seq_printf(seq, "\t\t\t%pM - %i\n", nexthop_entry->neigh_addr, + get_remaining_timeout(nexthop_entry, bat_priv)); +} + +static void seq_print_orig_entry(struct mcast_forw_orig_entry *orig_entry, + struct bat_priv *bat_priv, + struct seq_file *seq) +{ + struct mcast_forw_if_entry *if_entry; + struct hlist_node *node; + + seq_printf(seq, "\t%pM\n", orig_entry->orig); + hlist_for_each_entry(if_entry, node, &orig_entry->mcast_if_list, + list) + seq_print_if_entry(if_entry, bat_priv, seq); +} + +static void seq_print_table_entry(struct mcast_forw_table_entry *table_entry, + struct bat_priv *bat_priv, + struct seq_file *seq) +{ + struct mcast_forw_orig_entry *orig_entry; + struct hlist_node *node; + + seq_printf(seq, "%pM\n", table_entry->mcast_addr); + hlist_for_each_entry(orig_entry, node, &table_entry->mcast_orig_list, + list) + seq_print_orig_entry(orig_entry, bat_priv, seq); +} + +int mcast_forw_table_seq_print_text(struct seq_file *seq, void *offset) +{ + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct mcast_forw_table_entry *table_entry; + struct hlist_node *node; + + seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n", + SOURCE_VERSION, REVISION_VERSION_STR, + bat_priv->primary_if->net_dev->name, + bat_priv->primary_if->net_dev->dev_addr, net_dev->name); + seq_printf(seq, "Multicast group MAC\tOriginator\t" + "Outgoing interface\tNexthop - timeout in msecs\n"); + + spin_lock_bh(&bat_priv->mcast_forw_table_lock); + + hlist_for_each_entry(table_entry, node, &bat_priv->mcast_forw_table, + list) + seq_print_table_entry(table_entry, bat_priv, seq); + + spin_unlock_bh(&bat_priv->mcast_forw_table_lock); + + return 0; +} + int mcast_init(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->mcast_tracker_work, mcast_tracker_timer); diff --git a/batman-adv/multicast.h b/batman-adv/multicast.h index d1a3e83..63d0e97 100644 --- a/batman-adv/multicast.h +++ b/batman-adv/multicast.h @@ -29,6 +29,7 @@ int mcast_tracker_timeout_set(struct net_device *net_dev, char *buff, void mcast_tracker_reset(struct bat_priv *bat_priv); void route_mcast_tracker_packet(struct sk_buff *tracker_packet, struct bat_priv *bat_priv); +int mcast_forw_table_seq_print_text(struct seq_file *seq, void *offset); int mcast_init(struct bat_priv *bat_priv); void mcast_free(struct bat_priv *bat_priv);
With this commit, the multicast forwarding table, which has been previously filled up due to multicast tracker packets, will now be checked frequently (once per second) for timeouted entries. If so these entries get removed from the table.
Note, that a more frequent check interval is not necessary, as multicast data will not only be forwarded if an entry exists, but also if that one might not have timeouted yet.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- multicast.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ multicast.h | 1 + originator.c | 2 + 3 files changed, 78 insertions(+), 0 deletions(-)
diff --git a/batman-adv/multicast.c b/batman-adv/multicast.c index b53825f..fefef41 100644 --- a/batman-adv/multicast.c +++ b/batman-adv/multicast.c @@ -860,6 +860,81 @@ free: } }
+static void purge_mcast_nexthop_list(struct hlist_head *mcast_nexthop_list, + int *num_nexthops, + struct bat_priv *bat_priv) +{ + struct mcast_forw_nexthop_entry *nexthop_entry; + struct hlist_node *node, *node_tmp; + + hlist_for_each_entry_safe(nexthop_entry, node, node_tmp, + mcast_nexthop_list, list) { + if (get_remaining_timeout(nexthop_entry, bat_priv)) + continue; + + hlist_del(&nexthop_entry->list); + kfree(nexthop_entry); + *num_nexthops = *num_nexthops - 1; + } +} + +static void purge_mcast_if_list(struct hlist_head *mcast_if_list, + struct bat_priv *bat_priv) +{ + struct mcast_forw_if_entry *if_entry; + struct hlist_node *node, *node_tmp; + + hlist_for_each_entry_safe(if_entry, node, node_tmp, mcast_if_list, + list) { + purge_mcast_nexthop_list(&if_entry->mcast_nexthop_list, + &if_entry->num_nexthops, + bat_priv); + + if (!hlist_empty(&if_entry->mcast_nexthop_list)) + continue; + + hlist_del(&if_entry->list); + kfree(if_entry); + } +} + +static void purge_mcast_orig_list(struct hlist_head *mcast_orig_list, + struct bat_priv *bat_priv) +{ + struct mcast_forw_orig_entry *orig_entry; + struct hlist_node *node, *node_tmp; + + hlist_for_each_entry_safe(orig_entry, node, node_tmp, mcast_orig_list, + list) { + purge_mcast_if_list(&orig_entry->mcast_if_list, bat_priv); + + if (!hlist_empty(&orig_entry->mcast_if_list)) + continue; + + hlist_del(&orig_entry->list); + kfree(orig_entry); + } +} + +void purge_mcast_forw_table(struct bat_priv *bat_priv) +{ + struct mcast_forw_table_entry *table_entry; + struct hlist_node *node, *node_tmp; + + spin_lock_bh(&bat_priv->mcast_forw_table_lock); + hlist_for_each_entry_safe(table_entry, node, node_tmp, + &bat_priv->mcast_forw_table, list) { + purge_mcast_orig_list(&table_entry->mcast_orig_list, bat_priv); + + if (!hlist_empty(&table_entry->mcast_orig_list)) + continue; + + hlist_del(&table_entry->list); + kfree(table_entry); + } + spin_unlock_bh(&bat_priv->mcast_forw_table_lock); +} + static void mcast_tracker_timer(struct work_struct *work) { struct bat_priv *bat_priv = container_of(work, struct bat_priv, diff --git a/batman-adv/multicast.h b/batman-adv/multicast.h index 63d0e97..40f9da0 100644 --- a/batman-adv/multicast.h +++ b/batman-adv/multicast.h @@ -29,6 +29,7 @@ int mcast_tracker_timeout_set(struct net_device *net_dev, char *buff, void mcast_tracker_reset(struct bat_priv *bat_priv); void route_mcast_tracker_packet(struct sk_buff *tracker_packet, struct bat_priv *bat_priv); +void purge_mcast_forw_table(struct bat_priv *bat_priv); int mcast_forw_table_seq_print_text(struct seq_file *seq, void *offset); int mcast_init(struct bat_priv *bat_priv); void mcast_free(struct bat_priv *bat_priv); diff --git a/batman-adv/originator.c b/batman-adv/originator.c index 637d5f1..81b4865 100644 --- a/batman-adv/originator.c +++ b/batman-adv/originator.c @@ -30,6 +30,7 @@ #include "hard-interface.h" #include "unicast.h" #include "soft-interface.h" +#include "multicast.h"
static void purge_orig(struct work_struct *work);
@@ -403,6 +404,7 @@ static void purge_orig(struct work_struct *work) struct bat_priv *bat_priv = container_of(delayed_work, struct bat_priv, orig_work);
+ purge_mcast_forw_table(bat_priv); _purge_orig(bat_priv); start_purge_timer(bat_priv); }
This patch adds the capability to encapsulate and send a node's own multicast data packets. Based on the previously established multicast forwarding table, the sender can decide wheather it actually has to send the multicast data to one or more of its interfaces or not.
Furthermore, the sending procedure also decides whether to broadcast or unicast a multicast data packet to its next-hops, depending on the configured mcast_fanout (default: < 3 next hops on an interface, send seperate unicast packets).
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- multicast.c | 165 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ multicast.h | 1 + packet.h | 9 +++ soft-interface.c | 27 ++++++++- types.h | 1 + 5 files changed, 199 insertions(+), 4 deletions(-)
diff --git a/batman-adv/multicast.c b/batman-adv/multicast.c index fefef41..685d3f3 100644 --- a/batman-adv/multicast.c +++ b/batman-adv/multicast.c @@ -23,6 +23,7 @@ #include "multicast.h" #include "hash.h" #include "send.h" +#include "soft-interface.h" #include "hard-interface.h" #include "originator.h" #include "compat.h" @@ -1139,6 +1140,170 @@ int mcast_forw_table_seq_print_text(struct seq_file *seq, void *offset) return 0; }
+static inline void nexthops_from_if_list(struct hlist_head *mcast_if_list, + struct list_head *nexthop_list, + struct bat_priv *bat_priv) +{ + struct batman_if *batman_if; + struct mcast_forw_if_entry *if_entry; + struct mcast_forw_nexthop_entry *nexthop_entry; + struct hlist_node *node, *node2; + struct dest_entries_list *dest_entry; + int mcast_fanout = atomic_read(&bat_priv->mcast_fanout); + + hlist_for_each_entry(if_entry, node, mcast_if_list, list) { + rcu_read_lock(); + batman_if = if_num_to_batman_if(if_entry->if_num); + if (!batman_if) { + rcu_read_unlock(); + continue; + } + + kref_get(&batman_if->refcount); + rcu_read_unlock(); + + + /* send via broadcast */ + if (if_entry->num_nexthops > mcast_fanout) { + dest_entry = kmalloc(sizeof(struct dest_entries_list), + GFP_ATOMIC); + memcpy(dest_entry->dest, broadcast_addr, ETH_ALEN); + dest_entry->batman_if = batman_if; + list_add(&dest_entry->list, nexthop_list); + continue; + } + + /* send separate unicast packets */ + hlist_for_each_entry(nexthop_entry, node2, + &if_entry->mcast_nexthop_list, list) { + if (!get_remaining_timeout(nexthop_entry, bat_priv)) + continue; + + dest_entry = kmalloc(sizeof(struct dest_entries_list), + GFP_ATOMIC); + memcpy(dest_entry->dest, nexthop_entry->neigh_addr, + ETH_ALEN); + + kref_get(&batman_if->refcount); + dest_entry->batman_if = batman_if; + list_add(&dest_entry->list, nexthop_list); + } + kref_put(&batman_if->refcount, hardif_free_ref); + } +} + +static inline void nexthops_from_orig_list(uint8_t *orig, + struct hlist_head *mcast_orig_list, + struct list_head *nexthop_list, + struct bat_priv *bat_priv) +{ + struct mcast_forw_orig_entry *orig_entry; + struct hlist_node *node; + + hlist_for_each_entry(orig_entry, node, mcast_orig_list, list) { + if (memcmp(orig, orig_entry->orig, ETH_ALEN)) + continue; + + nexthops_from_if_list(&orig_entry->mcast_if_list, nexthop_list, + bat_priv); + break; + } +} + +static inline void nexthops_from_table(uint8_t *dest, uint8_t *orig, + struct hlist_head *mcast_forw_table, + struct list_head *nexthop_list, + struct bat_priv *bat_priv) +{ + struct mcast_forw_table_entry *table_entry; + struct hlist_node *node; + + hlist_for_each_entry(table_entry, node, mcast_forw_table, list) { + if (memcmp(dest, table_entry->mcast_addr, ETH_ALEN)) + continue; + + nexthops_from_orig_list(orig, &table_entry->mcast_orig_list, + nexthop_list, bat_priv); + break; + } +} + +static void route_mcast_packet(struct sk_buff *skb, struct bat_priv *bat_priv) +{ + struct sk_buff *skb1; + struct mcast_packet *mcast_packet; + struct ethhdr *ethhdr; + int num_bcasts = 3, i; + struct list_head nexthop_list; + struct dest_entries_list *dest_entry, *tmp; + + mcast_packet = (struct mcast_packet *)skb->data; + ethhdr = (struct ethhdr *)(mcast_packet + 1); + + INIT_LIST_HEAD(&nexthop_list); + + mcast_packet->ttl--; + + spin_lock_bh(&bat_priv->mcast_forw_table_lock); + nexthops_from_table(ethhdr->h_dest, mcast_packet->orig, + &bat_priv->mcast_forw_table, &nexthop_list, + bat_priv); + spin_unlock_bh(&bat_priv->mcast_forw_table_lock); + + list_for_each_entry_safe(dest_entry, tmp, &nexthop_list, list) { + if (is_broadcast_ether_addr(dest_entry->dest)) { + for (i = 0; i < num_bcasts; i++) { + skb1 = skb_clone(skb, GFP_ATOMIC); + send_skb_packet(skb1, dest_entry->batman_if, + dest_entry->dest); + } + } else { + skb1 = skb_clone(skb, GFP_ATOMIC); + send_skb_packet(skb1, dest_entry->batman_if, + dest_entry->dest); + } + kref_put(&dest_entry->batman_if->refcount, hardif_free_ref); + list_del(&dest_entry->list); + kfree(dest_entry); + } +} + +int mcast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) +{ + struct mcast_packet *mcast_packet; + + if (!bat_priv->primary_if) + goto dropped; + + if (my_skb_head_push(skb, sizeof(struct mcast_packet)) < 0) + goto dropped; + + mcast_packet = (struct mcast_packet *)skb->data; + mcast_packet->version = COMPAT_VERSION; + mcast_packet->ttl = TTL; + + /* batman packet type: broadcast */ + mcast_packet->packet_type = BAT_MCAST; + + /* hw address of first interface is the orig mac because only + * this mac is known throughout the mesh */ + memcpy(mcast_packet->orig, + bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); + + /* set broadcast sequence number */ + mcast_packet->seqno = + htonl(atomic_inc_return(&bat_priv->mcast_seqno)); + + route_mcast_packet(skb, bat_priv); + + kfree_skb(skb); + return 0; + +dropped: + kfree_skb(skb); + return 1; +} + int mcast_init(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->mcast_tracker_work, mcast_tracker_timer); diff --git a/batman-adv/multicast.h b/batman-adv/multicast.h index 40f9da0..2fe9910 100644 --- a/batman-adv/multicast.h +++ b/batman-adv/multicast.h @@ -31,6 +31,7 @@ void route_mcast_tracker_packet(struct sk_buff *tracker_packet, struct bat_priv *bat_priv); void purge_mcast_forw_table(struct bat_priv *bat_priv); int mcast_forw_table_seq_print_text(struct seq_file *seq, void *offset); +int mcast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); int mcast_init(struct bat_priv *bat_priv); void mcast_free(struct bat_priv *bat_priv);
diff --git a/batman-adv/packet.h b/batman-adv/packet.h index 6f42a2f..daa5d54 100644 --- a/batman-adv/packet.h +++ b/batman-adv/packet.h @@ -31,6 +31,7 @@ #define BAT_VIS 0x05 #define BAT_UNICAST_FRAG 0x06 #define BAT_MCAST_TRACKER 0x07 +#define BAT_MCAST 0x08
/* this file is included by batctl which needs these defines */ #define COMPAT_VERSION 14 @@ -126,6 +127,14 @@ struct bcast_packet { uint32_t seqno; } __packed;
+struct mcast_packet { + uint8_t packet_type; /* BAT_MCAST */ + uint8_t version; /* batman version field */ + uint8_t orig[6]; + uint32_t seqno; + uint8_t ttl; +} __packed; + /* marks the path for multicast streams */ struct mcast_tracker_packet { uint8_t packet_type; /* BAT_MCAST_TRACKER */ diff --git a/batman-adv/soft-interface.c b/batman-adv/soft-interface.c index f25fe9d..3e16522 100644 --- a/batman-adv/soft-interface.c +++ b/batman-adv/soft-interface.c @@ -38,6 +38,7 @@ #include <linux/if_vlan.h> #include "unicast.h" #include "routing.h" +#include "multicast.h"
static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); @@ -347,7 +348,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) struct vlan_ethhdr *vhdr; int data_len = skb->len, ret; short vid = -1; - bool do_bcast = false; + bool bcast_dst = false, mcast_dst = false;
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) goto dropped; @@ -384,12 +385,22 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) if (ret < 0) goto dropped;
- if (ret == 0) - do_bcast = true; + /* dhcp request, which should be sent to the gateway + * directly? */ + if (ret) + goto unicast; + + if (is_broadcast_ether_addr(ethhdr->h_dest)) + bcast_dst = true; + else if (atomic_read(&bat_priv->mcast_mode) == + MCAST_MODE_PROACT_TRACKING) + mcast_dst = true; + else + bcast_dst = true; }
/* ethernet packet should be broadcasted */ - if (do_bcast) { + if (bcast_dst) { if (!bat_priv->primary_if) goto dropped;
@@ -418,8 +429,15 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) * the original skb. */ kfree_skb(skb);
+ /* multicast data with path optimization */ + } else if (mcast_dst) { + ret = mcast_send_skb(skb, bat_priv); + if (ret != 0) + goto dropped_freed; + /* unicast packet */ } else { +unicast: ret = unicast_send_skb(skb, bat_priv); if (ret != 0) goto dropped_freed; @@ -608,6 +626,7 @@ struct net_device *softif_create(char *name)
atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); atomic_set(&bat_priv->bcast_seqno, 1); + atomic_set(&bat_priv->mcast_seqno, 1); atomic_set(&bat_priv->hna_local_changed, 0);
bat_priv->primary_if = NULL; diff --git a/batman-adv/types.h b/batman-adv/types.h index 0acfd2e..c3dde7c 100644 --- a/batman-adv/types.h +++ b/batman-adv/types.h @@ -147,6 +147,7 @@ struct bat_priv { atomic_t mcast_fanout; /* uint */ atomic_t log_level; /* uint */ atomic_t bcast_seqno; + atomic_t mcast_seqno; atomic_t bcast_queue_left; atomic_t batman_queue_left; char num_ifaces;
We need to check similar things for BAT_MCAST packets later too, therefore moving them to a seperate function.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- routing.c | 43 ++++++++++++++++++++++++++----------------- 1 files changed, 26 insertions(+), 17 deletions(-)
diff --git a/batman-adv/routing.c b/batman-adv/routing.c index 60dcf39..e6953b9 100644 --- a/batman-adv/routing.c +++ b/batman-adv/routing.c @@ -1267,6 +1267,31 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size) return 0; }
+static int check_broadcast_packet(struct sk_buff *skb, int hdr_size) +{ + struct ethhdr *ethhdr; + + /* drop packet if it has not necessary minimum size */ + if (unlikely(!pskb_may_pull(skb, hdr_size))) + return -1; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with broadcast indication but unicast recipient */ + if (!is_broadcast_ether_addr(ethhdr->h_dest)) + return -1; + + /* packet with broadcast sender address */ + if (is_broadcast_ether_addr(ethhdr->h_source)) + return -1; + + /* ignore broadcasts sent by myself */ + if (is_my_mac(ethhdr->h_source)) + return -1; + + return 0; +} + int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, int hdr_size) { @@ -1414,27 +1439,11 @@ int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if) struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct orig_node *orig_node = NULL; struct bcast_packet *bcast_packet; - struct ethhdr *ethhdr; int hdr_size = sizeof(struct bcast_packet); int ret = NET_RX_DROP; int32_t seq_diff;
- /* drop packet if it has not necessary minimum size */ - if (unlikely(!pskb_may_pull(skb, hdr_size))) - goto out; - - ethhdr = (struct ethhdr *)skb_mac_header(skb); - - /* packet with broadcast indication but unicast recipient */ - if (!is_broadcast_ether_addr(ethhdr->h_dest)) - goto out; - - /* packet with broadcast sender address */ - if (is_broadcast_ether_addr(ethhdr->h_source)) - goto out; - - /* ignore broadcasts sent by myself */ - if (is_my_mac(ethhdr->h_source)) + if (check_broadcast_packet(skb, hdr_size) < 0) goto out;
bcast_packet = (struct bcast_packet *)skb->data;
This patch adds the forwarding of multicast data packets to the local soft interface if this receiving node is a member of the same multicast group as specified in the multicast packet.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- hard-interface.c | 5 +++++ routing.c | 29 +++++++++++++++++++++++++++++ routing.h | 1 + 3 files changed, 35 insertions(+), 0 deletions(-)
diff --git a/batman-adv/hard-interface.c b/batman-adv/hard-interface.c index 8fa8ff7..7e318f0 100644 --- a/batman-adv/hard-interface.c +++ b/batman-adv/hard-interface.c @@ -631,6 +631,11 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, ret = recv_bcast_packet(skb, batman_if); break;
+ /* multicast packet */ + case BAT_MCAST: + ret = recv_mcast_packet(skb, batman_if); + break; + /* multicast tracker packet */ case BAT_MCAST_TRACKER: ret = recv_mcast_tracker_packet(skb, batman_if); diff --git a/batman-adv/routing.c b/batman-adv/routing.c index e6953b9..067545c 100644 --- a/batman-adv/routing.c +++ b/batman-adv/routing.c @@ -1506,6 +1506,35 @@ out: return ret; }
+int recv_mcast_packet(struct sk_buff *skb, struct batman_if *recv_if) +{ + struct ethhdr *ethhdr; + struct netdev_hw_addr *mc_entry; + int ret = 1; + int hdr_size = sizeof(struct mcast_packet); + + /* multicast data packets might be received via unicast or broadcast */ + if (check_unicast_packet(skb, hdr_size) < 0 && + check_broadcast_packet(skb, hdr_size) < 0) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *)(skb->data + sizeof(struct mcast_packet)); + + /* multicast for me? */ + netif_addr_lock_bh(recv_if->soft_iface); + netdev_for_each_mc_addr(mc_entry, recv_if->soft_iface) { + ret = memcmp(mc_entry->addr, ethhdr->h_dest, ETH_ALEN); + if (!ret) + break; + } + netif_addr_unlock_bh(recv_if->soft_iface); + + if (!ret) + interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); + + return NET_RX_SUCCESS; +} + int recv_mcast_tracker_packet(struct sk_buff *skb, struct batman_if *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); diff --git a/batman-adv/routing.h b/batman-adv/routing.h index 83f2752..7d23c3f 100644 --- a/batman-adv/routing.h +++ b/batman-adv/routing.h @@ -38,6 +38,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); +int recv_mcast_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_mcast_tracker_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
This patch enables the forwarding of multicast data and uses the same methods for deciding to forward via broad- or unicast(s) as the local packet encapsulation already did.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- multicast.c | 2 +- multicast.h | 1 + routing.c | 4 ++++ 3 files changed, 6 insertions(+), 1 deletions(-)
diff --git a/batman-adv/multicast.c b/batman-adv/multicast.c index 685d3f3..24bc85a 100644 --- a/batman-adv/multicast.c +++ b/batman-adv/multicast.c @@ -1228,7 +1228,7 @@ static inline void nexthops_from_table(uint8_t *dest, uint8_t *orig, } }
-static void route_mcast_packet(struct sk_buff *skb, struct bat_priv *bat_priv) +void route_mcast_packet(struct sk_buff *skb, struct bat_priv *bat_priv) { struct sk_buff *skb1; struct mcast_packet *mcast_packet; diff --git a/batman-adv/multicast.h b/batman-adv/multicast.h index 2fe9910..abd828e 100644 --- a/batman-adv/multicast.h +++ b/batman-adv/multicast.h @@ -31,6 +31,7 @@ void route_mcast_tracker_packet(struct sk_buff *tracker_packet, struct bat_priv *bat_priv); void purge_mcast_forw_table(struct bat_priv *bat_priv); int mcast_forw_table_seq_print_text(struct seq_file *seq, void *offset); +void route_mcast_packet(struct sk_buff *skb, struct bat_priv *bat_priv); int mcast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); int mcast_init(struct bat_priv *bat_priv); void mcast_free(struct bat_priv *bat_priv); diff --git a/batman-adv/routing.c b/batman-adv/routing.c index 067545c..05482d4 100644 --- a/batman-adv/routing.c +++ b/batman-adv/routing.c @@ -1508,6 +1508,7 @@ out:
int recv_mcast_packet(struct sk_buff *skb, struct batman_if *recv_if) { + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct ethhdr *ethhdr; struct netdev_hw_addr *mc_entry; int ret = 1; @@ -1518,6 +1519,9 @@ int recv_mcast_packet(struct sk_buff *skb, struct batman_if *recv_if) check_broadcast_packet(skb, hdr_size) < 0) return NET_RX_DROP;
+ /* forward multicast packet if necessary */ + route_mcast_packet(skb, bat_priv); + ethhdr = (struct ethhdr *)(skb->data + sizeof(struct mcast_packet));
/* multicast for me? */
This commit adds duplicate checks to avoid endless rebroadcasts in the case of forwarding multicast data packets via broadcasting.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- originator.c | 3 ++ routing.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--- types.h | 5 ++++ 3 files changed, 68 insertions(+), 4 deletions(-)
diff --git a/batman-adv/originator.c b/batman-adv/originator.c index 81b4865..977e75c 100644 --- a/batman-adv/originator.c +++ b/batman-adv/originator.c @@ -224,6 +224,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr) INIT_LIST_HEAD(&orig_node->bond_list); spin_lock_init(&orig_node->ogm_cnt_lock); spin_lock_init(&orig_node->bcast_seqno_lock); + spin_lock_init(&orig_node->mcast_seqno_lock); spin_lock_init(&orig_node->neigh_list_lock); kref_init(&orig_node->refcount);
@@ -235,6 +236,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr) orig_node->num_mca = 0; orig_node->bcast_seqno_reset = jiffies - 1 - msecs_to_jiffies(RESET_PROTECTION_MS); + orig_node->mcast_seqno_reset = jiffies - 1 + - msecs_to_jiffies(RESET_PROTECTION_MS); orig_node->batman_seqno_reset = jiffies - 1 - msecs_to_jiffies(RESET_PROTECTION_MS);
diff --git a/batman-adv/routing.c b/batman-adv/routing.c index 05482d4..64bff51 100644 --- a/batman-adv/routing.c +++ b/batman-adv/routing.c @@ -1509,20 +1509,65 @@ out: int recv_mcast_packet(struct sk_buff *skb, struct batman_if *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct orig_node *orig_node = NULL; + struct mcast_packet *mcast_packet; struct ethhdr *ethhdr; struct netdev_hw_addr *mc_entry; - int ret = 1; + int32_t seq_diff; + int ret = NET_RX_DROP; int hdr_size = sizeof(struct mcast_packet);
/* multicast data packets might be received via unicast or broadcast */ if (check_unicast_packet(skb, hdr_size) < 0 && check_broadcast_packet(skb, hdr_size) < 0) - return NET_RX_DROP; + goto out; + + mcast_packet = (struct mcast_packet *)skb->data; + + /* ignore broadcasts originated by myself */ + if (is_my_mac(mcast_packet->orig)) + goto out; + + if (mcast_packet->ttl < 2) + goto out; + + rcu_read_lock(); + orig_node = ((struct orig_node *) + hash_find(bat_priv->orig_hash, compare_orig, choose_orig, + mcast_packet->orig)); + + if (!orig_node) + goto rcu_unlock; + + kref_get(&orig_node->refcount); + rcu_read_unlock(); + + spin_lock_bh(&orig_node->mcast_seqno_lock); + + /* check whether the packet is a duplicate */ + if (get_bit_status(orig_node->mcast_bits, + orig_node->last_mcast_seqno, + ntohl(mcast_packet->seqno))) + goto spin_unlock; + + seq_diff = ntohl(mcast_packet->seqno) - orig_node->last_mcast_seqno; + + /* check whether the packet is old and the host just restarted. */ + if (window_protected(bat_priv, seq_diff, + &orig_node->mcast_seqno_reset)) + goto spin_unlock; + + /* mark broadcast in flood history, update window position + * if required. */ + if (bit_get_packet(bat_priv, orig_node->mcast_bits, seq_diff, 1)) + orig_node->last_mcast_seqno = ntohl(mcast_packet->seqno); + + spin_unlock_bh(&orig_node->mcast_seqno_lock);
/* forward multicast packet if necessary */ route_mcast_packet(skb, bat_priv);
- ethhdr = (struct ethhdr *)(skb->data + sizeof(struct mcast_packet)); + ethhdr = (struct ethhdr *)(mcast_packet + 1);
/* multicast for me? */ netif_addr_lock_bh(recv_if->soft_iface); @@ -1536,7 +1581,18 @@ int recv_mcast_packet(struct sk_buff *skb, struct batman_if *recv_if) if (!ret) interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
- return NET_RX_SUCCESS; + ret = NET_RX_SUCCESS; + goto out; + +rcu_unlock: + rcu_read_unlock(); + goto out; +spin_unlock: + spin_unlock_bh(&orig_node->mcast_seqno_lock); +out: + if (orig_node) + kref_put(&orig_node->refcount, orig_node_free_ref); + return ret; }
int recv_mcast_tracker_packet(struct sk_buff *skb, struct batman_if *recv_if) diff --git a/batman-adv/types.h b/batman-adv/types.h index c3dde7c..1af391e 100644 --- a/batman-adv/types.h +++ b/batman-adv/types.h @@ -72,6 +72,7 @@ struct orig_node { uint8_t *bcast_own_sum; unsigned long last_valid; unsigned long bcast_seqno_reset; + unsigned long mcast_seqno_reset; unsigned long batman_seqno_reset; uint8_t gw_flags; uint8_t flags; @@ -82,7 +83,9 @@ struct orig_node { uint32_t last_real_seqno; uint8_t last_ttl; unsigned long bcast_bits[NUM_WORDS]; + unsigned long mcast_bits[NUM_WORDS]; uint32_t last_bcast_seqno; + uint32_t last_mcast_seqno; struct hlist_head neigh_list; struct list_head frag_list; spinlock_t neigh_list_lock; /* protects neighbor list */ @@ -94,6 +97,8 @@ struct orig_node { * neigh_node->real_packet_count */ spinlock_t bcast_seqno_lock; /* protects bcast_bits, * last_bcast_seqno */ + spinlock_t mcast_seqno_lock; /* protects mcast_bits, + * last_mcast_seqno */ atomic_t bond_candidates; struct list_head bond_list; };
We may only optimize the multicast packet flow, if an mcast_mode has been activated and if we are a multicast receiver of the same group. Otherwise flood the multicast packet without optimizations.
This allows us to still flood multicast packets of protocols where it is not easily possible for a multicast sender to be a multicast receiver of the same group instead of dropping them (for instance IPv6 NDP).
This commit therefore also makes IPv6 usable again, if the proact_tracking multicast mode has been activated.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- soft-interface.c | 28 ++++++++++++++++++++++++++-- 1 files changed, 26 insertions(+), 2 deletions(-)
diff --git a/batman-adv/soft-interface.c b/batman-adv/soft-interface.c index 3e16522..c4b2f6f 100644 --- a/batman-adv/soft-interface.c +++ b/batman-adv/soft-interface.c @@ -340,6 +340,31 @@ static int interface_change_mtu(struct net_device *dev, int new_mtu) return 0; }
+static int mcast_may_optimize(uint8_t *dest, struct net_device *soft_iface) +{ + struct netdev_hw_addr *mc_entry; + struct bat_priv *bat_priv = netdev_priv(soft_iface); + int mcast_mode = atomic_read(&bat_priv->mcast_mode); + + if (mcast_mode != MCAST_MODE_PROACT_TRACKING) + return 0; + + /* Still allow flooding of multicast packets of protocols where it is + * not easily possible for a multicast sender to be a multicast + * receiver of the same group (for instance IPv6 NDP) */ + netif_addr_lock_bh(soft_iface); + netdev_for_each_mc_addr(mc_entry, soft_iface) { + if (memcmp(dest, mc_entry->addr, ETH_ALEN)) + continue; + + netif_addr_unlock_bh(soft_iface); + return 1; + } + netif_addr_unlock_bh(soft_iface); + + return 0; +} + int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) { struct ethhdr *ethhdr = (struct ethhdr *)skb->data; @@ -392,8 +417,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
if (is_broadcast_ether_addr(ethhdr->h_dest)) bcast_dst = true; - else if (atomic_read(&bat_priv->mcast_mode) == - MCAST_MODE_PROACT_TRACKING) + else if (mcast_may_optimize(ethhdr->h_dest, soft_iface)) mcast_dst = true; else bcast_dst = true;
Depending on the scenario, people might want to adjust the number of (re)broadcast of data packets - usually higher values in sparse or lower values in dense networks.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- bat_sysfs.c | 2 ++ send.c | 3 ++- types.h | 1 + 3 files changed, 5 insertions(+), 1 deletions(-)
diff --git a/batman-adv/bat_sysfs.c b/batman-adv/bat_sysfs.c index f6e918f..406ab7c 100644 --- a/batman-adv/bat_sysfs.c +++ b/batman-adv/bat_sysfs.c @@ -520,6 +520,7 @@ static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode); BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, update_mcast_tracker); BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL); +BAT_ATTR_UINT(num_bcasts, S_IRUGO | S_IWUSR, 0, INT_MAX, NULL); BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, post_gw_deselect); static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, @@ -544,6 +545,7 @@ static struct bat_attribute *mesh_attrs[] = { &bat_attr_gw_mode, &bat_attr_orig_interval, &bat_attr_hop_penalty, + &bat_attr_num_bcasts, &bat_attr_gw_sel_class, &bat_attr_gw_bandwidth, &bat_attr_mcast_mode, diff --git a/batman-adv/send.c b/batman-adv/send.c index 0d4a2a6..80dfa80 100644 --- a/batman-adv/send.c +++ b/batman-adv/send.c @@ -510,6 +510,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work) struct sk_buff *skb1; struct net_device *soft_iface = forw_packet->if_incoming->soft_iface; struct bat_priv *bat_priv = netdev_priv(soft_iface); + int num_bcasts = atomic_read(&bat_priv->num_bcasts);
spin_lock_bh(&bat_priv->forw_bcast_list_lock); hlist_del(&forw_packet->list); @@ -534,7 +535,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work) forw_packet->num_packets++;
/* if we still have some more bcasts to send */ - if (forw_packet->num_packets < 3) { + if (forw_packet->num_packets < num_bcasts) { _add_bcast_packet_to_list(bat_priv, forw_packet, ((5 * HZ) / 1000)); return; diff --git a/batman-adv/types.h b/batman-adv/types.h index 1af391e..a27b442 100644 --- a/batman-adv/types.h +++ b/batman-adv/types.h @@ -146,6 +146,7 @@ struct bat_priv { atomic_t gw_bandwidth; /* gw bandwidth */ atomic_t orig_interval; /* uint */ atomic_t hop_penalty; /* uint */ + atomic_t num_bcasts; /* uint */ atomic_t mcast_mode; /* MCAST_MODE_* */ atomic_t mcast_tracker_interval;/* uint, auto */ atomic_t mcast_tracker_timeout; /* uint, auto */
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- multicast.c | 102 +++++++++++++++++++++++++++++++++++++---------------------- 1 files changed, 64 insertions(+), 38 deletions(-)
diff --git a/batman-adv/multicast.c b/batman-adv/multicast.c index 24bc85a..f85d876 100644 --- a/batman-adv/multicast.c +++ b/batman-adv/multicast.c @@ -108,6 +108,7 @@ struct mcast_forw_nexthop_entry { struct hlist_node list; uint8_t neigh_addr[6]; unsigned long timeout; /* old jiffies value */ + struct rcu_head rcu; };
struct mcast_forw_if_entry { @@ -115,6 +116,7 @@ struct mcast_forw_if_entry { int16_t if_num; int num_nexthops; struct hlist_head mcast_nexthop_list; + struct rcu_head rcu; };
struct mcast_forw_orig_entry { @@ -123,12 +125,14 @@ struct mcast_forw_orig_entry { uint32_t last_mcast_seqno; unsigned long mcast_bits[NUM_WORDS]; struct hlist_head mcast_if_list; + struct rcu_head rcu; };
struct mcast_forw_table_entry { struct hlist_node list; uint8_t mcast_addr[6]; struct hlist_head mcast_orig_list; + struct rcu_head rcu; };
/* how long to wait until sending a multicast tracker packet */ @@ -861,6 +865,39 @@ free: } }
+static void nexthop_entry_free(struct rcu_head *rcu) +{ + struct mcast_forw_nexthop_entry *nexthop_entry; + + nexthop_entry = container_of(rcu, struct mcast_forw_nexthop_entry, + rcu); + kfree(nexthop_entry); +} + +static void if_entry_free(struct rcu_head *rcu) +{ + struct mcast_forw_if_entry *if_entry; + + if_entry = container_of(rcu, struct mcast_forw_if_entry, rcu); + kfree(if_entry); +} + +static void orig_entry_free(struct rcu_head *rcu) +{ + struct mcast_forw_orig_entry *orig_entry; + + orig_entry = container_of(rcu, struct mcast_forw_orig_entry, rcu); + kfree(orig_entry); +} + +static void table_entry_free(struct rcu_head *rcu) +{ + struct mcast_forw_table_entry *table_entry; + + table_entry = container_of(rcu, struct mcast_forw_table_entry, rcu); + kfree(table_entry); +} + static void purge_mcast_nexthop_list(struct hlist_head *mcast_nexthop_list, int *num_nexthops, struct bat_priv *bat_priv) @@ -873,8 +910,8 @@ static void purge_mcast_nexthop_list(struct hlist_head *mcast_nexthop_list, if (get_remaining_timeout(nexthop_entry, bat_priv)) continue;
- hlist_del(&nexthop_entry->list); - kfree(nexthop_entry); + hlist_del_rcu(&nexthop_entry->list); + call_rcu(&nexthop_entry->rcu, nexthop_entry_free); *num_nexthops = *num_nexthops - 1; } } @@ -894,8 +931,8 @@ static void purge_mcast_if_list(struct hlist_head *mcast_if_list, if (!hlist_empty(&if_entry->mcast_nexthop_list)) continue;
- hlist_del(&if_entry->list); - kfree(if_entry); + hlist_del_rcu(&if_entry->list); + call_rcu(&if_entry->rcu, if_entry_free); } }
@@ -912,8 +949,8 @@ static void purge_mcast_orig_list(struct hlist_head *mcast_orig_list, if (!hlist_empty(&orig_entry->mcast_if_list)) continue;
- hlist_del(&orig_entry->list); - kfree(orig_entry); + hlist_del_rcu(&orig_entry->list); + call_rcu(&orig_entry->rcu, orig_entry_free); } }
@@ -930,8 +967,8 @@ void purge_mcast_forw_table(struct bat_priv *bat_priv) if (!hlist_empty(&table_entry->mcast_orig_list)) continue;
- hlist_del(&table_entry->list); - kfree(table_entry); + hlist_del_rcu(&table_entry->list); + call_rcu(&table_entry->rcu, table_entry_free); } spin_unlock_bh(&bat_priv->mcast_forw_table_lock); } @@ -1073,18 +1110,14 @@ static void seq_print_if_entry(struct mcast_forw_if_entry *if_entry, struct hlist_node *node; struct batman_if *batman_if;
- rcu_read_lock(); batman_if = if_num_to_batman_if(if_entry->if_num); - if (!batman_if) { - rcu_read_unlock(); + if (!batman_if) return; - }
seq_printf(seq, "\t\t%s\n", batman_if->net_dev->name); - rcu_read_unlock();
- hlist_for_each_entry(nexthop_entry, node, - &if_entry->mcast_nexthop_list, list) + hlist_for_each_entry_rcu(nexthop_entry, node, + &if_entry->mcast_nexthop_list, list) seq_printf(seq, "\t\t\t%pM - %i\n", nexthop_entry->neigh_addr, get_remaining_timeout(nexthop_entry, bat_priv)); } @@ -1097,8 +1130,8 @@ static void seq_print_orig_entry(struct mcast_forw_orig_entry *orig_entry, struct hlist_node *node;
seq_printf(seq, "\t%pM\n", orig_entry->orig); - hlist_for_each_entry(if_entry, node, &orig_entry->mcast_if_list, - list) + hlist_for_each_entry_rcu(if_entry, node, &orig_entry->mcast_if_list, + list) seq_print_if_entry(if_entry, bat_priv, seq); }
@@ -1110,8 +1143,8 @@ static void seq_print_table_entry(struct mcast_forw_table_entry *table_entry, struct hlist_node *node;
seq_printf(seq, "%pM\n", table_entry->mcast_addr); - hlist_for_each_entry(orig_entry, node, &table_entry->mcast_orig_list, - list) + hlist_for_each_entry_rcu(orig_entry, node, + &table_entry->mcast_orig_list, list) seq_print_orig_entry(orig_entry, bat_priv, seq); }
@@ -1129,13 +1162,11 @@ int mcast_forw_table_seq_print_text(struct seq_file *seq, void *offset) seq_printf(seq, "Multicast group MAC\tOriginator\t" "Outgoing interface\tNexthop - timeout in msecs\n");
- spin_lock_bh(&bat_priv->mcast_forw_table_lock); - - hlist_for_each_entry(table_entry, node, &bat_priv->mcast_forw_table, - list) + rcu_read_lock(); + hlist_for_each_entry_rcu(table_entry, node, + &bat_priv->mcast_forw_table, list) seq_print_table_entry(table_entry, bat_priv, seq); - - spin_unlock_bh(&bat_priv->mcast_forw_table_lock); + rcu_read_unlock();
return 0; } @@ -1151,17 +1182,12 @@ static inline void nexthops_from_if_list(struct hlist_head *mcast_if_list, struct dest_entries_list *dest_entry; int mcast_fanout = atomic_read(&bat_priv->mcast_fanout);
- hlist_for_each_entry(if_entry, node, mcast_if_list, list) { - rcu_read_lock(); + hlist_for_each_entry_rcu(if_entry, node, mcast_if_list, list) { batman_if = if_num_to_batman_if(if_entry->if_num); - if (!batman_if) { - rcu_read_unlock(); + if (!batman_if) continue; - }
kref_get(&batman_if->refcount); - rcu_read_unlock(); -
/* send via broadcast */ if (if_entry->num_nexthops > mcast_fanout) { @@ -1174,8 +1200,8 @@ static inline void nexthops_from_if_list(struct hlist_head *mcast_if_list, }
/* send separate unicast packets */ - hlist_for_each_entry(nexthop_entry, node2, - &if_entry->mcast_nexthop_list, list) { + hlist_for_each_entry_rcu(nexthop_entry, node2, + &if_entry->mcast_nexthop_list, list) { if (!get_remaining_timeout(nexthop_entry, bat_priv)) continue;
@@ -1200,7 +1226,7 @@ static inline void nexthops_from_orig_list(uint8_t *orig, struct mcast_forw_orig_entry *orig_entry; struct hlist_node *node;
- hlist_for_each_entry(orig_entry, node, mcast_orig_list, list) { + hlist_for_each_entry_rcu(orig_entry, node, mcast_orig_list, list) { if (memcmp(orig, orig_entry->orig, ETH_ALEN)) continue;
@@ -1218,7 +1244,7 @@ static inline void nexthops_from_table(uint8_t *dest, uint8_t *orig, struct mcast_forw_table_entry *table_entry; struct hlist_node *node;
- hlist_for_each_entry(table_entry, node, mcast_forw_table, list) { + hlist_for_each_entry_rcu(table_entry, node, mcast_forw_table, list) { if (memcmp(dest, table_entry->mcast_addr, ETH_ALEN)) continue;
@@ -1244,11 +1270,11 @@ void route_mcast_packet(struct sk_buff *skb, struct bat_priv *bat_priv)
mcast_packet->ttl--;
- spin_lock_bh(&bat_priv->mcast_forw_table_lock); + rcu_read_lock(); nexthops_from_table(ethhdr->h_dest, mcast_packet->orig, &bat_priv->mcast_forw_table, &nexthop_list, bat_priv); - spin_unlock_bh(&bat_priv->mcast_forw_table_lock); + rcu_read_unlock();
list_for_each_entry_safe(dest_entry, tmp, &nexthop_list, list) { if (is_broadcast_ether_addr(dest_entry->dest)) {
b.a.t.m.a.n@lists.open-mesh.org