So far on purging broadcast and ogm queues we temporarily give up the
spin lock of these queues to be able to cancel any scheduled forwarding
work. However this is unsafe and can lead to a general protection error
in batadv_purge_outstanding_packets().
With this patch we split the queue purging into two steps: First
removing forward packets from those queues and signaling the
cancelation. Secondly, we are actively canceling any scheduled
forwarding, wait for any running forwarding to finish and only free a
forw_packet afterwards.
Signed-off-by: Linus Lüssing <linus.luessing(a)web.de>
---
Fixes issue #168
send.c | 117 ++++++++++++++++++++++++++++++++++++++-------------------------
types.h | 1 +
2 files changed, 71 insertions(+), 47 deletions(-)
diff --git a/send.c b/send.c
index 0a0bb45..f93476b 100644
--- a/send.c
+++ b/send.c
@@ -245,6 +245,10 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
bat_priv = netdev_priv(soft_iface);
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
+ if (hlist_unhashed(&forw_packet->list)) {
+ spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
+ return;
+ }
hlist_del(&forw_packet->list);
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -293,6 +297,10 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
delayed_work);
bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
spin_lock_bh(&bat_priv->forw_bat_list_lock);
+ if (hlist_unhashed(&forw_packet->list)) {
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
+ return;
+ }
hlist_del(&forw_packet->list);
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
@@ -316,13 +324,68 @@ out:
batadv_forw_packet_free(forw_packet);
}
+/**
+ * batadv_cancel_packets - Cancels a list of forward packets
+ * @forw_list: The to be canceled forward packets
+ * @canceled_list: The backup list.
+ *
+ * This canceles any scheduled forwarding packet tasks in the provided
+ * forw_list. The packets are being moved from the forw_list to the
+ * canceled_list afterwards to unhash the forward packet list pointer,
+ * allowing any already running task to notice the cancelation.
+ */
+static void batadv_cancel_packets(struct hlist_head *forw_list,
+ struct hlist_head *canceled_list,
+ const struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_forw_packet *forw_packet;
+ struct hlist_node *tmp_node, *safe_tmp_node;
+
+ hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
+ forw_list, list) {
+ /* if purge_outstanding_packets() was called with an argument
+ * we delete only packets belonging to the given interface
+ */
+ if ((hard_iface) &&
+ (forw_packet->if_incoming != hard_iface))
+ continue;
+
+ hlist_del_init(&forw_packet->list);
+ hlist_add_head(&forw_packet->canceled_list, canceled_list);
+ }
+}
+
+/**
+ * batadv_canceled_packets_free - Frees canceled forward packets
+ * @head: A list of to be freed forw_packets
+ *
+ * This function canceles the scheduling of any packet in the provided list,
+ * waits for any possibly running packet forwarding thread to finish and
+ * finally, safely frees this forward packet.
+ *
+ * This function might sleep.
+ */
+static void batadv_canceled_packets_free(struct hlist_head *head)
+{
+ struct batadv_forw_packet *forw_packet;
+ struct hlist_node *tmp_node, *safe_tmp_node;
+
+ hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, head,
+ canceled_list) {
+ cancel_delayed_work_sync(&forw_packet->delayed_work);
+
+ hlist_del(&forw_packet->canceled_list);
+ batadv_forw_packet_free(forw_packet);
+ }
+}
+
void
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
const struct batadv_hard_iface *hard_iface)
{
- struct batadv_forw_packet *forw_packet;
- struct hlist_node *tmp_node, *safe_tmp_node;
- bool pending;
+ struct hlist_head head;
+
+ INIT_HLIST_HEAD(&head);
if (hard_iface)
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -334,53 +397,13 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
/* free bcast list */
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
- hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
- &bat_priv->forw_bcast_list, list) {
- /* if purge_outstanding_packets() was called with an argument
- * we delete only packets belonging to the given interface
- */
- if ((hard_iface) &&
- (forw_packet->if_incoming != hard_iface))
- continue;
-
- spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
-
- /* batadv_send_outstanding_bcast_packet() will lock the list to
- * delete the item from the list
- */
- pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock_bh(&bat_priv->forw_bcast_list_lock);
-
- if (pending) {
- hlist_del(&forw_packet->list);
- batadv_forw_packet_free(forw_packet);
- }
- }
+ batadv_cancel_packets(&bat_priv->forw_bcast_list, &head, hard_iface);
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
/* free batman packet list */
spin_lock_bh(&bat_priv->forw_bat_list_lock);
- hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
- &bat_priv->forw_bat_list, list) {
- /* if purge_outstanding_packets() was called with an argument
- * we delete only packets belonging to the given interface
- */
- if ((hard_iface) &&
- (forw_packet->if_incoming != hard_iface))
- continue;
-
- spin_unlock_bh(&bat_priv->forw_bat_list_lock);
-
- /* send_outstanding_bat_packet() will lock the list to
- * delete the item from the list
- */
- pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock_bh(&bat_priv->forw_bat_list_lock);
-
- if (pending) {
- hlist_del(&forw_packet->list);
- batadv_forw_packet_free(forw_packet);
- }
- }
+ batadv_cancel_packets(&bat_priv->forw_bat_list, &head, hard_iface);
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
+
+ batadv_canceled_packets_free(&head);
}
diff --git a/types.h b/types.h
index aba8364..f62a35f 100644
--- a/types.h
+++ b/types.h
@@ -853,6 +853,7 @@ struct batadv_skb_cb {
*/
struct batadv_forw_packet {
struct hlist_node list;
+ struct hlist_node canceled_list;
unsigned long send_time;
uint8_t own;
struct sk_buff *skb;
--
1.7.10.4
When removing a single interface while a broadcast or ogm packet is
still pending then we will free the forward packet without releasing the
queue slots again.
This patch is supposed to fix this issue.
Signed-off-by: Linus Lüssing <linus.luessing(a)web.de>
---
send.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/send.c b/send.c
index ed7072a..2d539d6 100644
--- a/send.c
+++ b/send.c
@@ -356,6 +356,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
+ if (!forw_packet->own)
+ atomic_inc(&bat_priv->bcast_queue_left);
+
batadv_forw_packet_free(forw_packet);
}
}
@@ -382,6 +385,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
+ if (!forw_packet->own)
+ atomic_inc(&bat_priv->batman_queue_left);
+
batadv_forw_packet_free(forw_packet);
}
}
--
1.7.10.4
From: Mihail Costea <mihail.costea90(a)gmail.com>
Mades DAT support more types by making its data a void*, adding type field
to dat_entry and adding data_type to necessary functions.
This change is needed in order to make DAT support any type of data, like IPv6 too.
Adds generic function for transforming DAT data to string.
The function is used in order to avoid defining different debug messages
for different DAT data types. For example, if we had IPv6 as a DAT data,
then "%pI4" should be "%pI6c", but all
the other text of the debug message would be the same.
Also everything is memorized in a struct in order to avoid further
switch cases for all types.
Signed-off-by: Mihail Costea <mihail.costea90(a)gmail.com>
Signed-off-by: Stefan Popa <Stefan.A.Popa(a)intel.com>
Reviewed-by: Stefan Popa <Stefan.A.Popa(a)intel.com>
---
distributed-arp-table.c | 197 +++++++++++++++++++++++++++++++++++------------
distributed-arp-table.h | 1 +
types.h | 24 +++++-
3 files changed, 169 insertions(+), 53 deletions(-)
diff --git a/distributed-arp-table.c b/distributed-arp-table.c
index f2543c2..90565d0 100644
--- a/distributed-arp-table.c
+++ b/distributed-arp-table.c
@@ -31,9 +31,32 @@
#include "types.h"
#include "translation-table.h"
+static struct batadv_dat_type_info batadv_dat_types_info[] = {
+ {
+ .size = sizeof(__be32),
+ .str_fmt = "%pI4",
+ },
+};
+
static void batadv_dat_purge(struct work_struct *work);
/**
+ * batadv_dat_data_to_str: transforms DAT data to string
+ * @data: the DAT data
+ * @type: type of data
+ * @buf: the buf where the data string is stored
+ * @buf_len: buf length
+ *
+ * Returns buf.
+ */
+static char *batadv_dat_data_to_str(void *data, uint8_t type,
+ char *buf, size_t buf_len)
+{
+ snprintf(buf, buf_len, batadv_dat_types_info[type].str_fmt, data);
+return buf;
+}
+
+/**
* batadv_dat_start_timer - initialise the DAT periodic worker
* @bat_priv: the bat priv with all the soft interface information
*/
@@ -45,6 +68,19 @@ static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
}
/**
+ * batadv_dat_entry_free_ref_rcu - free a dat entry using its rcu
+ * @rcu: the dat entry rcu
+ */
+static void batadv_dat_entry_free_ref_rcu(struct rcu_head *rcu)
+{
+ struct batadv_dat_entry *dat_entry;
+
+ dat_entry = container_of(rcu, struct batadv_dat_entry, rcu);
+ kfree(dat_entry->data);
+ kfree(dat_entry);
+}
+
+/**
* batadv_dat_entry_free_ref - decrement the dat_entry refcounter and possibly
* free it
* @dat_entry: the entry to free
@@ -52,7 +88,7 @@ static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
static void batadv_dat_entry_free_ref(struct batadv_dat_entry *dat_entry)
{
if (atomic_dec_and_test(&dat_entry->refcount))
- kfree_rcu(dat_entry, rcu);
+ call_rcu(&dat_entry->rcu, batadv_dat_entry_free_ref_rcu);
}
/**
@@ -136,12 +172,21 @@ static void batadv_dat_purge(struct work_struct *work)
*
* Returns 1 if the two entries are the same, 0 otherwise.
*/
-static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
+static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
{
- const void *data1 = container_of(node, struct batadv_dat_entry,
- hash_entry);
+ struct batadv_dat_entry *dat_entry1 =
+ container_of(node, struct batadv_dat_entry,
+ hash_entry);
+ struct batadv_dat_entry *dat_entry2 =
+ container_of(data2,
+ struct batadv_dat_entry, data);
+ size_t data_size = batadv_dat_types_info[dat_entry1->type].size;
- return (memcmp(data1, data2, sizeof(__be32)) == 0 ? 1 : 0);
+ if (dat_entry1->type != dat_entry2->type)
+ return 0;
+
+ return (memcmp(dat_entry1->data, dat_entry2->data,
+ data_size) == 0 ? 1 : 0);
}
/**
@@ -198,8 +243,9 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
}
/**
- * batadv_hash_dat - compute the hash value for an IP address
+ * batadv_hash_dat - compute the hash value for a DAT data
* @data: data to hash
+ * @data_type: type of data
* @size: size of the hash table
*
* Returns the selected index in the hash table for the given data.
@@ -209,7 +255,8 @@ static uint32_t batadv_hash_dat(const void *data, uint32_t size)
uint32_t hash = 0;
const struct batadv_dat_entry *dat = data;
- hash = batadv_hash_bytes(hash, &dat->ip, sizeof(dat->ip));
+ hash = batadv_hash_bytes(hash, dat->data,
+ batadv_dat_types_info[dat->type].size);
hash = batadv_hash_bytes(hash, &dat->vid, sizeof(dat->vid));
hash += (hash << 3);
@@ -223,32 +270,40 @@ static uint32_t batadv_hash_dat(const void *data, uint32_t size)
* batadv_dat_entry_hash_find - look for a given dat_entry in the local hash
* table
* @bat_priv: the bat priv with all the soft interface information
- * @ip: search key
+ * @data: search key
+ * @data_type: type of data
* @vid: VLAN identifier
*
* Returns the dat_entry if found, NULL otherwise.
*/
static struct batadv_dat_entry *
-batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
- unsigned short vid)
+batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, void *data,
+ uint8_t data_type, unsigned short vid)
{
struct hlist_head *head;
struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
struct batadv_hashtable *hash = bat_priv->dat.hash;
- uint32_t index;
+ uint32_t index, data_size = batadv_dat_types_info[data_type].size;
if (!hash)
return NULL;
- to_find.ip = ip;
+ to_find.data = kmalloc(data_size, GFP_ATOMIC);
+ if (!to_find.data)
+ return NULL;
+ memcpy(to_find.data, data, data_size);
+ to_find.type = data_type;
to_find.vid = vid;
index = batadv_hash_dat(&to_find, hash->size);
head = &hash->table[index];
+ kfree(to_find.data);
rcu_read_lock();
hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
- if (dat_entry->ip != ip)
+ if (dat_entry->type != data_type)
+ continue;
+ if (memcmp(dat_entry->data, data, data_size))
continue;
if (!atomic_inc_not_zero(&dat_entry->refcount))
@@ -265,25 +320,30 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
/**
* batadv_dat_entry_add - add a new dat entry or update it if already exists
* @bat_priv: the bat priv with all the soft interface information
- * @ip: ipv4 to add/edit
- * @mac_addr: mac address to assign to the given ipv4
+ * @data: the data to add/edit
+ * @data_type: type of the data added to DAT
+ * @mac_addr: mac address to assign to the given data
* @vid: VLAN identifier
*/
-static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
- uint8_t *mac_addr, unsigned short vid)
+static void batadv_dat_entry_add(struct batadv_priv *bat_priv, void *data,
+ uint8_t data_type, uint8_t *mac_addr,
+ unsigned short vid)
{
struct batadv_dat_entry *dat_entry;
int hash_added;
+ char dbg_data[BATADV_DAT_DATA_MAX_LEN];
+ size_t data_size = batadv_dat_types_info[data_type].size;
- dat_entry = batadv_dat_entry_hash_find(bat_priv, ip, vid);
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, data, data_type, vid);
/* if this entry is already known, just update it */
if (dat_entry) {
if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr))
memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
dat_entry->last_update = jiffies;
- batadv_dbg(BATADV_DBG_DAT, bat_priv,
- "Entry updated: %pI4 %pM (vid: %u)\n",
- &dat_entry->ip, dat_entry->mac_addr, vid);
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "Entry updated: %s %pM (vid: %u)\n",
+ batadv_dat_data_to_str(dat_entry->data, data_type,
+ dbg_data, sizeof(dbg_data)),
+ dat_entry->mac_addr, vid);
goto out;
}
@@ -291,7 +351,12 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
if (!dat_entry)
goto out;
- dat_entry->ip = ip;
+ dat_entry->data = kmalloc(data_size, GFP_ATOMIC);
+ if (!dat_entry->data)
+ goto out;
+ memcpy(dat_entry->data, data, data_size);
+
+ dat_entry->type = data_type;
dat_entry->vid = vid;
memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
dat_entry->last_update = jiffies;
@@ -307,8 +372,10 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
goto out;
}
- batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM (vid: %u)\n",
- &dat_entry->ip, dat_entry->mac_addr, vid);
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %s %pM (vid: %u)\n",
+ batadv_dat_data_to_str(dat_entry->data, data_type,
+ dbg_data, sizeof(dbg_data)),
+ dat_entry->mac_addr, vid);
out:
if (dat_entry)
@@ -520,7 +587,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
* batadv_dat_select_candidates - select the nodes which the DHT message has to
* be sent to
* @bat_priv: the bat priv with all the soft interface information
- * @ip_dst: ipv4 to look up in the DHT
+ * @data: data to look up in the DHT
+ * @data_type: type of data
*
* An originator O is selected if and only if its DHT_ID value is one of three
* closest values (from the LEFT, with wrap around if needed) then the hash
@@ -529,11 +597,15 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
* Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM.
*/
static struct batadv_dat_candidate *
-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+batadv_dat_select_candidates(struct batadv_priv *bat_priv, void *data,
+ uint8_t data_type)
{
int select;
- batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
+ batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, data_key;
struct batadv_dat_candidate *res;
+ struct batadv_dat_entry to_find;
+ char dbg_data[BATADV_DAT_DATA_MAX_LEN];
+ size_t data_size = batadv_dat_types_info[data_type].size;
if (!bat_priv->orig_hash)
return NULL;
@@ -542,15 +614,23 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
if (!res)
return NULL;
- ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst,
- BATADV_DAT_ADDR_MAX);
+ to_find.data = kmalloc(data_size, GFP_ATOMIC);
+ if (!to_find.data)
+ return NULL;
+ memcpy(to_find.data, data, data_size);
+ to_find.type = data_type;
+ data_key = (batadv_dat_addr_t)batadv_hash_dat(&to_find,
+ BATADV_DAT_ADDR_MAX);
+ kfree(to_find.data);
batadv_dbg(BATADV_DBG_DAT, bat_priv,
- "dat_select_candidates(): IP=%pI4 hash(IP)=%u\n", &ip_dst,
- ip_key);
+ "dat_select_candidates(): DATA=%s hash(DATA)=%u\n",
+ batadv_dat_data_to_str(data, data_type, dbg_data,
+ sizeof(dbg_data)),
+ data_key);
for (select = 0; select < BATADV_DAT_CANDIDATES_NUM; select++)
- batadv_choose_next_candidate(bat_priv, res, select, ip_key,
+ batadv_choose_next_candidate(bat_priv, res, select, data_key,
&last_max);
return res;
@@ -560,7 +640,8 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
* batadv_dat_send_data - send a payload to the selected candidates
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
- * @ip: the DHT key
+ * @data: the DHT key
+ * @data_type: type of data
* @packet_subtype: unicast4addr packet subtype to use
*
* This function copies the skb with pskb_copy() and is sent as unicast packet
@@ -570,8 +651,8 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
* otherwise.
*/
static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
- struct sk_buff *skb, __be32 ip,
- int packet_subtype)
+ struct sk_buff *skb, void *data,
+ uint8_t data_type, int packet_subtype)
{
int i;
bool ret = false;
@@ -579,12 +660,15 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
struct batadv_neigh_node *neigh_node = NULL;
struct sk_buff *tmp_skb;
struct batadv_dat_candidate *cand;
+ char dbg_data[BATADV_DAT_DATA_MAX_LEN];
- cand = batadv_dat_select_candidates(bat_priv, ip);
+ cand = batadv_dat_select_candidates(bat_priv, data, data_type);
if (!cand)
goto out;
- batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip);
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %s\n",
+ batadv_dat_data_to_str(data, data_type, dbg_data,
+ sizeof(dbg_data)));
for (i = 0; i < BATADV_DAT_CANDIDATES_NUM; i++) {
if (cand[i].type == BATADV_DAT_CANDIDATE_NOT_FOUND)
@@ -754,6 +838,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
unsigned long last_seen_jiffies;
int last_seen_msecs, last_seen_secs, last_seen_mins;
uint32_t i;
+ char dbg_data[BATADV_DAT_DATA_MAX_LEN];
primary_if = batadv_seq_print_text_primary_if_get(seq);
if (!primary_if)
@@ -774,8 +859,12 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
last_seen_msecs = last_seen_msecs % 60000;
last_seen_secs = last_seen_msecs / 1000;
- seq_printf(seq, " * %15pI4 %14pM %6i:%02i\n",
- &dat_entry->ip, dat_entry->mac_addr,
+ seq_printf(seq, " * %15s %14pM %6i:%02i\n",
+ batadv_dat_data_to_str(dat_entry->data,
+ dat_entry->type,
+ dbg_data,
+ sizeof(dbg_data)),
+ dat_entry->mac_addr,
last_seen_mins, last_seen_secs);
}
rcu_read_unlock();
@@ -926,9 +1015,10 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
hw_src = batadv_arp_hw_src(skb, 0);
ip_dst = batadv_arp_ip_dst(skb, 0);
- batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+ batadv_dat_entry_add(bat_priv, &ip_src, BATADV_DAT_IPV4, hw_src, vid);
- dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, &ip_dst,
+ BATADV_DAT_IPV4, vid);
if (dat_entry) {
/* If the ARP request is destined for a local client the local
* client will answer itself. DAT would only generate a
@@ -962,7 +1052,8 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
ret = true;
} else {
/* Send the request to the DHT */
- ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
+ ret = batadv_dat_send_data(bat_priv, skb, &ip_dst,
+ BATADV_DAT_IPV4,
BATADV_P_DAT_DHT_GET);
}
out:
@@ -1008,9 +1099,10 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
batadv_dbg_arp(bat_priv, skb, type, hdr_size,
"Parsing incoming ARP REQUEST");
- batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+ batadv_dat_entry_add(bat_priv, &ip_src, BATADV_DAT_IPV4, hw_src, vid);
- dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, &ip_dst,
+ BATADV_DAT_IPV4, vid);
if (!dat_entry)
goto out;
@@ -1074,14 +1166,16 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
hw_dst = batadv_arp_hw_dst(skb, hdr_size);
ip_dst = batadv_arp_ip_dst(skb, hdr_size);
- batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
- batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
+ batadv_dat_entry_add(bat_priv, &ip_src, BATADV_DAT_IPV4, hw_src, vid);
+ batadv_dat_entry_add(bat_priv, &ip_dst, BATADV_DAT_IPV4, hw_dst, vid);
/* Send the ARP reply to the candidates for both the IP addresses that
* the node obtained from the ARP reply
*/
- batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
- batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
+ batadv_dat_send_data(bat_priv, skb, &ip_src, BATADV_DAT_IPV4,
+ BATADV_P_DAT_DHT_PUT);
+ batadv_dat_send_data(bat_priv, skb, &ip_dst, BATADV_DAT_IPV4,
+ BATADV_P_DAT_DHT_PUT);
}
/**
* batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local
@@ -1119,8 +1213,8 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
/* Update our internal cache with both the IP addresses the node got
* within the ARP reply
*/
- batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
- batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
+ batadv_dat_entry_add(bat_priv, &ip_src, BATADV_DAT_IPV4, hw_src, vid);
+ batadv_dat_entry_add(bat_priv, &ip_dst, BATADV_DAT_IPV4, hw_dst, vid);
/* if this REPLY is directed to a client of mine, let's deliver the
* packet to the interface
@@ -1167,7 +1261,8 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
goto out;
ip_dst = batadv_arp_ip_dst(forw_packet->skb, hdr_size);
- dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, &ip_dst,
+ BATADV_DAT_IPV4, vid);
/* check if the node already got this entry */
if (!dat_entry) {
batadv_dbg(BATADV_DBG_DAT, bat_priv,
diff --git a/distributed-arp-table.h b/distributed-arp-table.h
index 60d853b..557bab9 100644
--- a/distributed-arp-table.h
+++ b/distributed-arp-table.h
@@ -28,6 +28,7 @@
#include <linux/if_arp.h>
#define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
+#define BATADV_DAT_DATA_MAX_LEN 16
void batadv_dat_status_update(struct net_device *net_dev);
bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
diff --git a/types.h b/types.h
index 20a1bef..69c187e 100644
--- a/types.h
+++ b/types.h
@@ -931,7 +931,8 @@ struct batadv_algo_ops {
/**
* struct batadv_dat_entry - it is a single entry of batman-adv ARP backend. It
* is used to stored ARP entries needed for the global DAT cache
- * @ip: the IPv4 corresponding to this DAT/ARP entry
+ * @data: the data corresponding to this DAT entry
+ * @type: the type corresponding to this DAT entry
* @mac_addr: the MAC address associated to the stored IPv4
* @vid: the vlan ID associated to this entry
* @last_update: time in jiffies when this entry was refreshed last time
@@ -940,7 +941,8 @@ struct batadv_algo_ops {
* @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_dat_entry {
- __be32 ip;
+ void *data;
+ uint8_t type;
uint8_t mac_addr[ETH_ALEN];
unsigned short vid;
unsigned long last_update;
@@ -950,6 +952,24 @@ struct batadv_dat_entry {
};
/**
+ * batadv_dat_types - types used in batadv_dat_entry for IP
+ * @BATADV_DAT_IPv4: IPv4 address type
+ */
+enum batadv_dat_types {
+ BATADV_DAT_IPV4 = 0,
+};
+
+/**
+ * batadv_dat_type_info - info needed for a DAT type data
+ * @size: the size of the type data
+ * @str_fmt: string format used by the data
+ */
+struct batadv_dat_type_info {
+ size_t size;
+ char *str_fmt;
+};
+
+/**
* struct batadv_dat_candidate - candidate destination for DAT operations
* @type: the type of the selected candidate. It can one of the following:
* - BATADV_DAT_CANDIDATE_NOT_FOUND
--
1.7.10.4
This patch adds the multicast debug level to check for own
multicast flag changes for instance.
Signed-off-by: Linus Lüssing <linus.luessing(a)web.de>
---
Changes in v3:
* none
Changes in v2:
* added a sentence about 'mcast' log level to manpage
README | 1 +
man/batctl.8 | 6 +++---
sys.c | 5 +++++
3 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/README b/README
index b5fd259..c5e3575 100644
--- a/README
+++ b/README
@@ -389,6 +389,7 @@ $ batctl loglevel
[ ] messages related to bridge loop avoidance (bla)
[ ] messages related to arp snooping and distributed arp table (dat)
[ ] messages related to network coding (nc)
+[ ] messages related to multicast (mcast)
batctl nc_nodes
===============
diff --git a/man/batctl.8 b/man/batctl.8
index 110020e..66c28bb 100644
--- a/man/batctl.8
+++ b/man/batctl.8
@@ -98,9 +98,9 @@ level. Level 'none' disables all verbose logging. Level 'batman' enables message
Level 'routes' enables messages related to routes being added / changed / deleted. Level 'tt' enables messages related to
translation table operations. Level 'bla' enables messages related to the bridge loop avoidance. Level 'dat' enables
messages related to ARP snooping and the Distributed Arp Table. Level 'nc' enables messages related to network coding.
-Level 'all' enables all messages. The messages are sent to the batman-adv debug log. Use \fBbatctl log\fP to retrieve it.
-Make sure to have debugging output enabled when compiling the module otherwise the output as well as the loglevel options
-won't be available.
+Level 'mcast' enables messages related to multicast optimizations. Level 'all' enables all messages. The messages
+are sent to the batman-adv debug log. Use \fBbatctl log\fP to retrieve it. Make sure to have debugging output enabled
+when compiling the module otherwise the output as well as the loglevel options won't be available.
.br
.IP "\fBlog\fP|\fBl\fP [\fB\-n\fP]\fP"
batctl will read the batman-adv debug log which has to be compiled into the kernel module. If "\-n" is given batctl will not
diff --git a/sys.c b/sys.c
index 676bef1..4fa0e24 100644
--- a/sys.c
+++ b/sys.c
@@ -280,6 +280,7 @@ static void log_level_usage(void)
fprintf(stderr, " \t bla Messages related to bridge loop avoidance\n");
fprintf(stderr, " \t dat Messages related to arp snooping and distributed arp table\n");
fprintf(stderr, " \t nc Messages related to network coding\n");
+ fprintf(stderr, " \t mcast Messages related to multicast\n");
}
int handle_loglevel(char *mesh_iface, int argc, char **argv)
@@ -325,6 +326,8 @@ int handle_loglevel(char *mesh_iface, int argc, char **argv)
log_level |= BIT(4);
else if (strcmp(argv[i], "nc") == 0)
log_level |= BIT(5);
+ else if (strcmp(argv[i], "mcast") == 0)
+ log_level |= BIT(6);
else {
log_level_usage();
goto out;
@@ -359,6 +362,8 @@ int handle_loglevel(char *mesh_iface, int argc, char **argv)
"messages related to arp snooping and distributed arp table", "dat");
printf("[%c] %s (%s)\n", (log_level & BIT(5)) ? 'x' : ' ',
"messages related to network coding", "nc");
+ printf("[%c] %s (%s)\n", (log_level & BIT(6)) ? 'x' : ' ',
+ "messages related to multicast", "mcast");
out:
free(path_buff);
--
1.7.10.4
From: Simon Wunderlich <simon(a)open-mesh.com>
There are network setups where the current bridge loop avoidance can't
detect bridge loops. The minimal setup affected would consist of two
LANs and two separate meshes, connected in a ring like that:
A...(mesh1)...B
| |
(LAN1) (LAN2)
| |
C...(mesh2)...D
Since both the meshes and backbones are separate, the bridge loop
avoidance has not enough information to detect and avoid the loop
in this case. Even if these scenarios can't be fixed easily,
these kind of loops can be detected.
This patch implements a periodic check (running every 60 seconds for
now) which sends a broadcast frame with a random MAC address on
each backbone VLAN. If a broadcast frame with the same MAC address
is received shortly after on the mesh, we know that there must be a
loop and report that incident as well as throw an uevent to let others
handle that problem.
Signed-off-by: Simon Wunderlich <simon(a)open-mesh.com>
---
bridge_loop_avoidance.c | 138 ++++++++++++++++++++++++++++++++++++++++++++++++
main.h | 4 ++
packet.h | 1 +
sysfs.c | 6 ++-
types.h | 8 +++
5 files changed, 155 insertions(+), 2 deletions(-)
diff --git a/bridge_loop_avoidance.c b/bridge_loop_avoidance.c
index 0f0ca43..db88b5f 100644
--- a/bridge_loop_avoidance.c
+++ b/bridge_loop_avoidance.c
@@ -22,6 +22,7 @@
#include "bridge_loop_avoidance.h"
#include "translation-table.h"
#include "send.h"
+#include "sysfs.h"
#include <linux/etherdevice.h>
#include <linux/crc16.h>
@@ -340,6 +341,14 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
ethhdr->h_source, ethhdr->h_dest,
BATADV_PRINT_VID(vid));
break;
+ case BATADV_CLAIM_TYPE_LOOPDETECT:
+ ether_addr_copy(ethhdr->h_source, mac);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_send_claim(): LOOPDETECT of %pM to %pM on vid %d\n",
+ ethhdr->h_source, ethhdr->h_dest,
+ BATADV_PRINT_VID(vid));
+
+ break;
}
if (vid & BATADV_VLAN_HAS_TAG)
@@ -360,6 +369,36 @@ out:
}
/**
+ * batadv_bla_loopdetect_report - worker for reporting the loop
+ * @work: work queue item
+ *
+ * Throws an uevent, as the loopdetect check function can't do that itself
+ * since the kernel may sleep while throwing uevents.
+ */
+static void batadv_bla_loopdetect_report(struct work_struct *work)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+ struct batadv_priv *bat_priv;
+ char vid_str[6] = { '\0' };
+
+ backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
+ report_work);
+ bat_priv = backbone_gw->bat_priv;
+
+ batadv_info(bat_priv->soft_iface,
+ "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
+ BATADV_PRINT_VID(backbone_gw->vid));
+ snprintf(vid_str, sizeof(vid_str), "%d",
+ BATADV_PRINT_VID(backbone_gw->vid));
+ vid_str[sizeof(vid_str) - 1] = 0;
+
+ batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
+ vid_str);
+
+ batadv_backbone_gw_free_ref(backbone_gw);
+}
+
+/**
* batadv_bla_get_backbone_gw
* @bat_priv: the bat priv with all the soft interface information
* @orig: the mac address of the originator
@@ -397,6 +436,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
atomic_set(&entry->request_sent, 0);
atomic_set(&entry->wait_periods, 0);
ether_addr_copy(entry->orig, orig);
+ INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
/* one for the hash, one for returning */
atomic_set(&entry->refcount, 2);
@@ -943,6 +983,10 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
if (vlan_depth > 1)
return 1;
+ /* Let the loopdetect frames on the mesh in any case. */
+ if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
+ return 0;
+
/* check if it is a claim frame. */
ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
ethhdr);
@@ -1142,6 +1186,26 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
}
}
+/**
+ * batadv_bla_send_loopdetect - send a loopdetect frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @backbone_gw: the backbone gateway for which a loop should be detected
+ *
+ * To detect loops that the bridge loop avoidance can't handle, send a loop
+ * detection packet on the backbone. Unlike other BLA frames, this frame will
+ * be allowed on the mesh by other nodes. If it is received on the mesh, this
+ * indicates that there is a loop.
+ */
+static void
+batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
+ struct batadv_bla_backbone_gw *backbone_gw)
+{
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
+ backbone_gw->vid);
+ batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
+ backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
+}
+
/* periodic work to do:
* * purge structures when they are too old
* * send announcements
@@ -1155,6 +1219,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_hashtable *hash;
struct batadv_hard_iface *primary_if;
+ bool send_loopdetect = false;
int i;
delayed_work = container_of(work, struct delayed_work, work);
@@ -1170,6 +1235,22 @@ static void batadv_bla_periodic_work(struct work_struct *work)
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
goto out;
+ if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
+ /* set a new random mac address for the next bridge loop
+ * detection frames. Set the locally administered bit to avoid
+ * collisions with users mac addresses.
+ */
+ random_ether_addr(bat_priv->bla.loopdetect_addr);
+ bat_priv->bla.loopdetect_addr[0] = 0xba;
+ bat_priv->bla.loopdetect_addr[1] = 0xbe;
+ bat_priv->bla.loopdetect_lasttime = jiffies;
+ atomic_set(&bat_priv->bla.loopdetect_next,
+ BATADV_BLA_LOOPDETECT_PERIODS);
+
+ /* mark for sending loop detect on all VLANs */
+ send_loopdetect = true;
+ }
+
hash = bat_priv->bla.backbone_hash;
if (!hash)
goto out;
@@ -1186,6 +1267,9 @@ static void batadv_bla_periodic_work(struct work_struct *work)
backbone_gw->lasttime = jiffies;
batadv_bla_send_announce(bat_priv, backbone_gw);
+ if (send_loopdetect)
+ batadv_bla_send_loopdetect(bat_priv,
+ backbone_gw);
/* request_sent is only set after creation to avoid
* problems when we are not yet known as backbone gw
@@ -1254,6 +1338,9 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
bat_priv->bla.bcast_duplist_curr = 0;
+ atomic_set(&bat_priv->bla.loopdetect_next,
+ BATADV_BLA_LOOPDETECT_PERIODS);
+
if (bat_priv->bla.claim_hash)
return 0;
@@ -1449,6 +1536,55 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
}
/**
+ * batadv_bla_loopdetect_check - check and handle a detected loop
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the packet to check
+ * @primary_if: interface where the request came on
+ * @vid: the VLAN ID of the frame
+ *
+ * Checks if this packet is a loop detect frame which has been sent by us,
+ * throw an uevent and log the event if that is the case.
+ *
+ * Returns true if it is a loop detect frame which is to be dropped, false
+ * otherwise.
+ */
+static bool
+batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ struct batadv_hard_iface *primary_if,
+ unsigned short vid)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+ struct ethhdr *ethhdr;
+
+ ethhdr = eth_hdr(skb);
+
+ /* Only check for the MAC address and skip more checks here for
+ * performance reasons - this function is on the hotpath, after all.
+ */
+ if (!batadv_compare_eth(ethhdr->h_source,
+ bat_priv->bla.loopdetect_addr))
+ return false;
+
+ /* If the packet came too late, don't forward it on the mesh
+ * but don't consider that as loop. It might be a coincidence.
+ */
+ if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
+ BATADV_BLA_LOOPDETECT_TIMEOUT))
+ return true;
+
+ backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
+ primary_if->net_dev->dev_addr,
+ vid, true);
+ if (unlikely(!backbone_gw))
+ return true;
+
+ queue_work(batadv_event_workqueue, &backbone_gw->report_work);
+ /* backbone_gw is unreferenced in the report work function function */
+
+ return true;
+}
+
+/**
* batadv_bla_rx
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
@@ -1480,6 +1616,8 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
goto allow;
+ if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
+ goto handled;
if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
/* don't allow broadcasts while requests are in flight */
diff --git a/main.h b/main.h
index 4c557eb..d109434 100644
--- a/main.h
+++ b/main.h
@@ -112,6 +112,8 @@
#define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 3)
#define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10)
#define BATADV_BLA_WAIT_PERIODS 3
+#define BATADV_BLA_LOOPDETECT_PERIODS 6
+#define BATADV_BLA_LOOPDETECT_TIMEOUT 3000 /* 3 seconds */
#define BATADV_DUPLIST_SIZE 16
#define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */
@@ -134,10 +136,12 @@ enum batadv_uev_action {
BATADV_UEV_ADD = 0,
BATADV_UEV_DEL,
BATADV_UEV_CHANGE,
+ BATADV_UEV_LOOPDETECT,
};
enum batadv_uev_type {
BATADV_UEV_GW = 0,
+ BATADV_UEV_BLA,
};
#define BATADV_GW_THRESHOLD 50
diff --git a/packet.h b/packet.h
index 34e096d..9df747a 100644
--- a/packet.h
+++ b/packet.h
@@ -169,6 +169,7 @@ enum batadv_bla_claimframe {
BATADV_CLAIM_TYPE_UNCLAIM = 0x01,
BATADV_CLAIM_TYPE_ANNOUNCE = 0x02,
BATADV_CLAIM_TYPE_REQUEST = 0x03,
+ BATADV_CLAIM_TYPE_LOOPDETECT = 0x04,
};
/**
diff --git a/sysfs.c b/sysfs.c
index fc47baa..8150f77 100644
--- a/sysfs.c
+++ b/sysfs.c
@@ -94,11 +94,13 @@ batadv_kobj_to_vlan(struct batadv_priv *bat_priv, struct kobject *obj)
static char *batadv_uev_action_str[] = {
"add",
"del",
- "change"
+ "change",
+ "loopdetect",
};
static char *batadv_uev_type_str[] = {
- "gw"
+ "gw",
+ "bla",
};
/* Use this, if you have customized show and store functions for vlan attrs */
diff --git a/types.h b/types.h
index 462a70c..7456928 100644
--- a/types.h
+++ b/types.h
@@ -536,6 +536,9 @@ struct batadv_priv_tt {
* @num_requests; number of bla requests in flight
* @claim_hash: hash table containing mesh nodes this host has claimed
* @backbone_hash: hash table containing all detected backbone gateways
+ * @loopdetect_addr: MAC address used for own loopdetection frames
+ * @loopdetect_lasttime: time when the loopdetection frames were sent
+ * @loopdetect_next: how many periods to wait for the next loopdetect process
* @bcast_duplist: recently received broadcast packets array (for broadcast
* duplicate suppression)
* @bcast_duplist_curr: index of last broadcast packet added to bcast_duplist
@@ -548,6 +551,9 @@ struct batadv_priv_bla {
atomic_t num_requests;
struct batadv_hashtable *claim_hash;
struct batadv_hashtable *backbone_hash;
+ uint8_t loopdetect_addr[ETH_ALEN];
+ unsigned long loopdetect_lasttime;
+ atomic_t loopdetect_next;
struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
int bcast_duplist_curr;
/* protects bcast_duplist & bcast_duplist_curr */
@@ -866,6 +872,7 @@ struct batadv_socket_packet {
* backbone gateway - no bcast traffic is formwared until the situation was
* resolved
* @crc: crc16 checksum over all claims
+ * @report_work: work struct for reporting detected loops
* @refcount: number of contexts the object is used
* @rcu: struct used for freeing in an RCU-safe manner
*/
@@ -879,6 +886,7 @@ struct batadv_bla_backbone_gw {
atomic_t wait_periods;
atomic_t request_sent;
uint16_t crc;
+ struct work_struct report_work;
atomic_t refcount;
struct rcu_head rcu;
};
--
2.1.0.rc1
When comparing Ethernet address it is better to use the more
generic batadv_compare_eth. The latter is also optimised for
architectures having a fast unaligned access.
Signed-off-by: Antonio Quartulli <antonio(a)meshcoding.com>
---
network-coding.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/network-coding.c b/network-coding.c
index f1b604d..0049e7a 100644
--- a/network-coding.c
+++ b/network-coding.c
@@ -481,12 +481,10 @@ static int batadv_nc_hash_compare(const struct hlist_node *node,
nc_path2 = data2;
/* Return 1 if the two keys are identical */
- if (memcmp(nc_path1->prev_hop, nc_path2->prev_hop,
- sizeof(nc_path1->prev_hop)) != 0)
+ if (!batadv_compare_eth(nc_path1->prev_hop, nc_path2->prev_hop))
return 0;
- if (memcmp(nc_path1->next_hop, nc_path2->next_hop,
- sizeof(nc_path1->next_hop)) != 0)
+ if (!batadv_compare_eth(nc_path1->next_hop, nc_path2->next_hop))
return 0;
return 1;
--
1.8.5.2