On reception of a multicast tracker packet (both locally generated or received over an interface), a node now memorizes its forwarding state for a tuple of multicast-group, originator, and next-hops (+ their according outgoing interface).
The first two elements are necessary to determine, whether a node shall forward a multicast data packet on reception later. The next-hop and according interface information is necessary to quickly determine, if a multicast data packet shall be forwarded via unicast to each single next hop or via broadcast.
This commit does not yet purge multicast forwarding table entries after the set tracker timeout yet.
Signed-off-by: Linus Lüssing linus.luessing@saxnet.de --- multicast.c | 278 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++- types.h | 2 + 2 files changed, 277 insertions(+), 3 deletions(-)
diff --git a/multicast.c b/multicast.c index 2b3d613..c7edbef 100644 --- a/multicast.c +++ b/multicast.c @@ -25,6 +25,10 @@ #include "send.h" #include "compat.h"
+/* If auto mode for tracker timeout has been selected, + * how many times of tracker_interval to wait */ +#define TRACKER_TIMEOUT_AUTO_X 5 + #define tracker_packet_for_each_dest(mcast_entry, dest_entry, mcast_num, dest_num, tracker_packet) \ for (mcast_num = 0, mcast_entry = (struct mcast_entry *)(tracker_packet + 1), \ dest_entry = (uint8_t *)(mcast_entry + 1); \ @@ -40,6 +44,34 @@ struct dest_entries_list { struct batman_if *batman_if; };
+ +struct mcast_forw_nexthop_entry { + struct list_head list; + uint8_t neigh_addr[6]; + unsigned long timeout; /* old jiffies value */ +}; + +struct mcast_forw_if_entry { + struct list_head list; + int16_t if_num; + int num_nexthops; + struct list_head mcast_nexthop_list; +}; + +struct mcast_forw_orig_entry { + struct list_head list; + uint8_t orig[6]; + uint32_t last_mcast_seqno; + TYPE_OF_WORD mcast_bits[NUM_WORDS]; + struct list_head mcast_if_list; +}; + +struct mcast_forw_table_entry { + struct list_head list; + uint8_t mcast_addr[6]; + struct list_head mcast_orig_list; +}; + /* how long to wait until sending a multicast tracker packet */ static int tracker_send_delay(struct bat_priv *bat_priv) { @@ -74,6 +106,218 @@ void mcast_tracker_reset(struct bat_priv *bat_priv) start_mcast_tracker(bat_priv); }
+static void prepare_forw_if_entry(struct list_head *forw_if_list, + int16_t if_num, uint8_t *neigh_addr) +{ + struct mcast_forw_if_entry *forw_if_entry; + struct mcast_forw_nexthop_entry *forw_nexthop_entry; + + list_for_each_entry (forw_if_entry, forw_if_list, list) + if (forw_if_entry->if_num == if_num) + goto skip_create_if; + + forw_if_entry = kmalloc(sizeof(struct mcast_forw_if_entry), + GFP_ATOMIC); + if (!forw_if_entry) + return; + + forw_if_entry->if_num = if_num; + forw_if_entry->num_nexthops = 0; + INIT_LIST_HEAD(&forw_if_entry->mcast_nexthop_list); + list_add(&forw_if_entry->list, forw_if_list); + +skip_create_if: + list_for_each_entry (forw_nexthop_entry, + &forw_if_entry->mcast_nexthop_list, list) { + if (!memcmp(forw_nexthop_entry->neigh_addr, + neigh_addr, ETH_ALEN)) + return; + } + + forw_nexthop_entry = kmalloc(sizeof(struct mcast_forw_nexthop_entry), + GFP_ATOMIC); + if (!forw_nexthop_entry && forw_if_entry->num_nexthops) + return; + else if(!forw_nexthop_entry) + goto free; + + memcpy(forw_nexthop_entry->neigh_addr, neigh_addr, ETH_ALEN); + forw_if_entry->num_nexthops++; + if (forw_if_entry->num_nexthops < 0) { + kfree(forw_nexthop_entry); + goto free; + } + + list_add(&forw_nexthop_entry->list, + &forw_if_entry->mcast_nexthop_list); + return; +free: + list_del(&forw_if_entry->list); + kfree(forw_if_entry); +} + +static struct list_head *prepare_forw_table_entry( + struct mcast_forw_table_entry *forw_table, + uint8_t *mcast_addr, uint8_t *orig) +{ + struct mcast_forw_table_entry *forw_table_entry; + struct mcast_forw_orig_entry *orig_entry; + + forw_table_entry = kmalloc(sizeof(struct mcast_forw_table_entry), + GFP_ATOMIC); + if (!forw_table_entry) + return NULL; + + memcpy(forw_table_entry->mcast_addr, mcast_addr, ETH_ALEN); + list_add(&forw_table_entry->list, &forw_table->list); + + INIT_LIST_HEAD(&forw_table_entry->mcast_orig_list); + orig_entry = kmalloc(sizeof(struct mcast_forw_orig_entry), GFP_ATOMIC); + if (!orig_entry) + goto free; + + memcpy(orig_entry->orig, orig, ETH_ALEN); + INIT_LIST_HEAD(&orig_entry->mcast_if_list); + list_add(&orig_entry->list, &forw_table_entry->mcast_orig_list); + + return &orig_entry->mcast_if_list; + +free: + list_del(&forw_table_entry->list); + kfree(forw_table_entry); + return NULL; +} + +static int sync_nexthop(struct mcast_forw_nexthop_entry *sync_nexthop_entry, + struct list_head *nexthop_list) +{ + struct mcast_forw_nexthop_entry *nexthop_entry; + int synced = 0; + + list_for_each_entry(nexthop_entry, nexthop_list, list) { + if (memcmp(sync_nexthop_entry->neigh_addr, + nexthop_entry->neigh_addr, ETH_ALEN)) + continue; + + nexthop_entry->timeout = jiffies; + list_del(&sync_nexthop_entry->list); + kfree(sync_nexthop_entry); + + synced = 1; + break; + } + + if (!synced) { + sync_nexthop_entry->timeout = jiffies; + list_move(&sync_nexthop_entry->list, nexthop_list); + return 1; + } + + return 0; +} + +static void sync_if(struct mcast_forw_if_entry *sync_if_entry, + struct list_head *if_list) +{ + struct mcast_forw_if_entry *if_entry; + struct mcast_forw_nexthop_entry *sync_nexthop_entry, *tmp; + int synced = 0; + + list_for_each_entry(if_entry, if_list, list) { + if (sync_if_entry->if_num != if_entry->if_num) + continue; + + list_for_each_entry_safe(sync_nexthop_entry, tmp, + &sync_if_entry->mcast_nexthop_list, list) + if (sync_nexthop(sync_nexthop_entry, + &if_entry->mcast_nexthop_list)) + if_entry->num_nexthops++; + + list_del(&sync_if_entry->list); + kfree(sync_if_entry); + + synced = 1; + break; + } + + if (!synced) + list_move(&sync_if_entry->list, if_list); +} + +/* syncs all multicast entries of sync_table_entry to forw_table */ +static void sync_orig(struct mcast_forw_orig_entry *sync_orig_entry, + struct list_head *orig_list) +{ + struct mcast_forw_orig_entry *orig_entry; + struct mcast_forw_if_entry *sync_if_entry, *tmp; + int synced = 0; + + list_for_each_entry(orig_entry, orig_list, list) { + if (memcmp(sync_orig_entry->orig, + orig_entry->orig, ETH_ALEN)) + continue; + + list_for_each_entry_safe(sync_if_entry, tmp, + &sync_orig_entry->mcast_if_list, list) + sync_if(sync_if_entry, &orig_entry->mcast_if_list); + + list_del(&sync_orig_entry->list); + kfree(sync_orig_entry); + + synced = 1; + break; + } + + if (!synced) + list_move(&sync_orig_entry->list, orig_list); +} + + +/* syncs all multicast entries of sync_table_entry to forw_table */ +static void sync_table(struct mcast_forw_table_entry *sync_table_entry, + struct list_head *forw_table) +{ + struct mcast_forw_table_entry *table_entry; + struct mcast_forw_orig_entry *sync_orig_entry, *tmp; + int synced = 0; + + list_for_each_entry(table_entry, forw_table, list) { + if (memcmp(sync_table_entry->mcast_addr, + table_entry->mcast_addr, ETH_ALEN)) + continue; + + list_for_each_entry_safe(sync_orig_entry, tmp, + &sync_table_entry->mcast_orig_list, list) + sync_orig(sync_orig_entry, + &table_entry->mcast_orig_list); + + list_del(&sync_table_entry->list); + kfree(sync_table_entry); + + synced = 1; + break; + } + + if (!synced) + list_move(&sync_table_entry->list, forw_table); +} + +/* Updates the old multicast forwarding table with the information gained + * from the generated/received tracker packet. It also frees the generated + * table for syncing (*forw_table). */ +static void update_mcast_forw_table(struct mcast_forw_table_entry *forw_table, + struct bat_priv *bat_priv) +{ + struct mcast_forw_table_entry *sync_table_entry, *tmp; + unsigned long flags; + + spin_lock_irqsave(&bat_priv->mcast_forw_table_lock, flags); + list_for_each_entry_safe(sync_table_entry, tmp, &forw_table->list, + list) + sync_table(sync_table_entry, &bat_priv->mcast_forw_table); + spin_unlock_irqrestore(&bat_priv->mcast_forw_table_lock, flags); +} + static inline int find_mca_match(struct orig_node *orig_node, int mca_pos, uint8_t *mc_addr_list, int num_mcast_entries) { @@ -246,13 +490,16 @@ out: * interface to the forw_if_list - but only if this router has not been * added yet */ static int add_router_of_dest(struct dest_entries_list *next_hops, - uint8_t *dest, struct bat_priv *bat_priv) + uint8_t *dest, + struct list_head *forw_if_list, + struct bat_priv *bat_priv) { struct dest_entries_list *next_hop_tmp, *next_hop_entry; unsigned long flags; struct element_t *bucket; struct orig_node *orig_node; HASHIT(hashit); + int16_t if_num;
next_hop_entry = kmalloc(sizeof(struct dest_entries_list), GFP_ATOMIC); if (!next_hop_entry) @@ -273,12 +520,17 @@ static int add_router_of_dest(struct dest_entries_list *next_hops, memcpy(next_hop_entry->dest, orig_node->router->addr, ETH_ALEN); next_hop_entry->batman_if = orig_node->router->if_incoming; + if_num = next_hop_entry->batman_if->if_num; break; } spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); if (!next_hop_entry->batman_if) goto free;
+ if (forw_if_list) + prepare_forw_if_entry(forw_if_list, if_num, + next_hop_entry->dest); + list_for_each_entry(next_hop_tmp, &next_hops->list, list) if (!memcmp(next_hop_tmp->dest, next_hop_entry->dest, ETH_ALEN)) @@ -300,14 +552,17 @@ free: static int tracker_next_hops(struct mcast_tracker_packet *tracker_packet, int tracker_packet_len, struct dest_entries_list *next_hops, + struct mcast_forw_table_entry *forw_table, struct bat_priv *bat_priv) { int num_next_hops = 0, mcast_num, dest_num, ret; struct mcast_entry *mcast_entry; uint8_t *dest_entry; uint8_t *tail = (uint8_t *)tracker_packet + tracker_packet_len; + struct list_head *forw_table_if = NULL;
INIT_LIST_HEAD(&next_hops->list); + INIT_LIST_HEAD(&forw_table->list);
tracker_packet_for_each_dest(mcast_entry, dest_entry, mcast_num, dest_num, tracker_packet) { @@ -327,8 +582,15 @@ static int tracker_next_hops(struct mcast_tracker_packet *tracker_packet, goto out; }
+ if (dest_num) + goto skip; + + forw_table_if = prepare_forw_table_entry(forw_table, + mcast_entry->mcast_addr, + tracker_packet->orig); +skip: ret = add_router_of_dest(next_hops, dest_entry, - bat_priv); + forw_table_if, bat_priv); if (!ret) num_next_hops++; } @@ -336,6 +598,8 @@ out: return num_next_hops; }
+/* Zero destination entries not destined for the specified next hop in the + * tracker packet */ static void zero_tracker_packet(struct mcast_tracker_packet *tracker_packet, uint8_t *next_hop, struct bat_priv *bat_priv) { @@ -380,6 +644,8 @@ static void zero_tracker_packet(struct mcast_tracker_packet *tracker_packet, spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); }
+/* Remove zeroed destination entries and empty multicast entries in tracker + * packet */ static int shrink_tracker_packet(struct mcast_tracker_packet *tracker_packet, int tracker_packet_len) { @@ -466,15 +732,19 @@ void route_mcast_tracker_packet( struct mcast_tracker_packet *next_hop_tracker_packets, *next_hop_tracker_packet; struct dest_entries_list *next_hop; + struct mcast_forw_table_entry forw_table; struct sk_buff *skb; int num_next_hops, i; int *tracker_packet_lengths;
rcu_read_lock(); num_next_hops = tracker_next_hops(tracker_packet, tracker_packet_len, - &next_hops, bat_priv); + &next_hops, &forw_table, bat_priv); if (!num_next_hops) goto out; + + update_mcast_forw_table(&forw_table, bat_priv); + next_hop_tracker_packets = kmalloc(tracker_packet_len * num_next_hops, GFP_ATOMIC); if (!next_hop_tracker_packets) @@ -657,6 +927,8 @@ ok: int mcast_init(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->mcast_tracker_work, mcast_tracker_timer); + INIT_LIST_HEAD(&bat_priv->mcast_forw_table); + start_mcast_tracker(bat_priv);
return 1; diff --git a/types.h b/types.h index 0129b1f..17ccd5a 100644 --- a/types.h +++ b/types.h @@ -153,6 +153,7 @@ struct bat_priv { struct hlist_head forw_bcast_list; struct hlist_head gw_list; struct list_head vis_send_list; + struct list_head mcast_forw_table; struct hashtable_t *orig_hash; struct hashtable_t *hna_local_hash; struct hashtable_t *hna_global_hash; @@ -166,6 +167,7 @@ struct bat_priv { spinlock_t vis_hash_lock; /* protects vis_hash */ spinlock_t vis_list_lock; /* protects vis_info::recv_list */ spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ + spinlock_t mcast_forw_table_lock; /* protects mcast_forw_table */ int16_t num_local_hna; atomic_t hna_local_changed; struct delayed_work hna_work;