So far on purging broadcast and ogm queues we temporarily give up the spin lock of these queues to be able to cancel any scheduled forwarding work. However this is unsafe and can lead to a general protection error in batadv_purge_outstanding_packets().
With this patch we split the queue purging into two steps: First removing forward packets from those queues and signaling the cancelation. Secondly, we are actively canceling any scheduled forwarding, wait for any running forwarding to finish and only free a forw_packet afterwards.
Signed-off-by: Linus Lüssing linus.luessing@web.de --- Fixes issue #168
send.c | 117 ++++++++++++++++++++++++++++++++++++++------------------------- types.h | 1 + 2 files changed, 71 insertions(+), 47 deletions(-)
diff --git a/send.c b/send.c index 0a0bb45..f93476b 100644 --- a/send.c +++ b/send.c @@ -245,6 +245,10 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) bat_priv = netdev_priv(soft_iface);
spin_lock_bh(&bat_priv->forw_bcast_list_lock); + if (hlist_unhashed(&forw_packet->list)) { + spin_unlock_bh(&bat_priv->forw_bcast_list_lock); + return; + } hlist_del(&forw_packet->list); spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -293,6 +297,10 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) delayed_work); bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); spin_lock_bh(&bat_priv->forw_bat_list_lock); + if (hlist_unhashed(&forw_packet->list)) { + spin_unlock_bh(&bat_priv->forw_bat_list_lock); + return; + } hlist_del(&forw_packet->list); spin_unlock_bh(&bat_priv->forw_bat_list_lock);
@@ -316,13 +324,68 @@ out: batadv_forw_packet_free(forw_packet); }
+/** + * batadv_cancel_packets - Cancels a list of forward packets + * @forw_list: The to be canceled forward packets + * @canceled_list: The backup list. + * + * This canceles any scheduled forwarding packet tasks in the provided + * forw_list. The packets are being moved from the forw_list to the + * canceled_list afterwards to unhash the forward packet list pointer, + * allowing any already running task to notice the cancelation. + */ +static void batadv_cancel_packets(struct hlist_head *forw_list, + struct hlist_head *canceled_list, + const struct batadv_hard_iface *hard_iface) +{ + struct batadv_forw_packet *forw_packet; + struct hlist_node *tmp_node, *safe_tmp_node; + + hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, + forw_list, list) { + /* if purge_outstanding_packets() was called with an argument + * we delete only packets belonging to the given interface + */ + if ((hard_iface) && + (forw_packet->if_incoming != hard_iface)) + continue; + + hlist_del_init(&forw_packet->list); + hlist_add_head(&forw_packet->canceled_list, canceled_list); + } +} + +/** + * batadv_canceled_packets_free - Frees canceled forward packets + * @head: A list of to be freed forw_packets + * + * This function canceles the scheduling of any packet in the provided list, + * waits for any possibly running packet forwarding thread to finish and + * finally, safely frees this forward packet. + * + * This function might sleep. + */ +static void batadv_canceled_packets_free(struct hlist_head *head) +{ + struct batadv_forw_packet *forw_packet; + struct hlist_node *tmp_node, *safe_tmp_node; + + hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, head, + canceled_list) { + cancel_delayed_work_sync(&forw_packet->delayed_work); + + hlist_del(&forw_packet->canceled_list); + batadv_forw_packet_free(forw_packet); + } +} + void batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, const struct batadv_hard_iface *hard_iface) { - struct batadv_forw_packet *forw_packet; - struct hlist_node *tmp_node, *safe_tmp_node; - bool pending; + struct hlist_head head; + + INIT_HLIST_HEAD(&head);
if (hard_iface) batadv_dbg(BATADV_DBG_BATMAN, bat_priv, @@ -334,53 +397,13 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
/* free bcast list */ spin_lock_bh(&bat_priv->forw_bcast_list_lock); - hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, - &bat_priv->forw_bcast_list, list) { - /* if purge_outstanding_packets() was called with an argument - * we delete only packets belonging to the given interface - */ - if ((hard_iface) && - (forw_packet->if_incoming != hard_iface)) - continue; - - spin_unlock_bh(&bat_priv->forw_bcast_list_lock); - - /* batadv_send_outstanding_bcast_packet() will lock the list to - * delete the item from the list - */ - pending = cancel_delayed_work_sync(&forw_packet->delayed_work); - spin_lock_bh(&bat_priv->forw_bcast_list_lock); - - if (pending) { - hlist_del(&forw_packet->list); - batadv_forw_packet_free(forw_packet); - } - } + batadv_cancel_packets(&bat_priv->forw_bcast_list, &head, hard_iface); spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
/* free batman packet list */ spin_lock_bh(&bat_priv->forw_bat_list_lock); - hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, - &bat_priv->forw_bat_list, list) { - /* if purge_outstanding_packets() was called with an argument - * we delete only packets belonging to the given interface - */ - if ((hard_iface) && - (forw_packet->if_incoming != hard_iface)) - continue; - - spin_unlock_bh(&bat_priv->forw_bat_list_lock); - - /* send_outstanding_bat_packet() will lock the list to - * delete the item from the list - */ - pending = cancel_delayed_work_sync(&forw_packet->delayed_work); - spin_lock_bh(&bat_priv->forw_bat_list_lock); - - if (pending) { - hlist_del(&forw_packet->list); - batadv_forw_packet_free(forw_packet); - } - } + batadv_cancel_packets(&bat_priv->forw_bat_list, &head, hard_iface); spin_unlock_bh(&bat_priv->forw_bat_list_lock); + + batadv_canceled_packets_free(&head); } diff --git a/types.h b/types.h index aba8364..f62a35f 100644 --- a/types.h +++ b/types.h @@ -853,6 +853,7 @@ struct batadv_skb_cb { */ struct batadv_forw_packet { struct hlist_node list; + struct hlist_node canceled_list; unsigned long send_time; uint8_t own; struct sk_buff *skb;
We need to perform the addition of to be forwarded packets into our ogm and broadcast queues and starting of the forward packet timer in one atomic step. Otherwise we might potentially get a segmentation fault when trying to start the timer of a forw_packet because the queue purging routines might have freed the forw_packet already within the short opportunity between the queue list addition and the queue_delayed_work() call.
Signed-off-by: Linus Lüssing linus.luessing@web.de --- bat_iv_ogm.c | 12 ++++++------ send.c | 8 ++------ 2 files changed, 8 insertions(+), 12 deletions(-)
diff --git a/bat_iv_ogm.c b/bat_iv_ogm.c index 7654b76..ee0b11f 100644 --- a/bat_iv_ogm.c +++ b/bat_iv_ogm.c @@ -440,17 +440,17 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, if (direct_link) forw_packet_aggr->direct_link_flags |= 1;
- /* add new packet to packet list */ - spin_lock_bh(&bat_priv->forw_bat_list_lock); - hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list); - spin_unlock_bh(&bat_priv->forw_bat_list_lock); - - /* start timer for this packet */ + /* initialize job for this packet */ INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, batadv_send_outstanding_bat_ogm_packet); + + /* add new packet to packet list and start its timer */ + spin_lock_bh(&bat_priv->forw_bat_list_lock); + hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list); queue_delayed_work(batadv_event_workqueue, &forw_packet_aggr->delayed_work, send_time - jiffies); + spin_unlock_bh(&bat_priv->forw_bat_list_lock);
return; out: diff --git a/send.c b/send.c index f93476b..4bd0c00 100644 --- a/send.c +++ b/send.c @@ -152,16 +152,12 @@ _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, struct batadv_forw_packet *forw_packet, unsigned long send_time) { - INIT_HLIST_NODE(&forw_packet->list); - - /* add new packet to packet list */ + /* add new packet to packet list and start its timer */ spin_lock_bh(&bat_priv->forw_bcast_list_lock); hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list); - spin_unlock_bh(&bat_priv->forw_bcast_list_lock); - - /* start timer for this packet */ queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work, send_time); + spin_unlock_bh(&bat_priv->forw_bcast_list_lock); }
/* add a broadcast packet to the queue and setup timers. broadcast packets
On Wednesday, February 27, 2013 17:58:15 Linus Lüssing wrote:
+/**
- batadv_canceled_packets_free - Frees canceled forward packets
- @head: A list of to be freed forw_packets
- This function canceles the scheduling of any packet in the provided
list, + * waits for any possibly running packet forwarding thread to finish and + * finally, safely frees this forward packet.
- This function might sleep.
- */
+static void batadv_canceled_packets_free(struct hlist_head *head) +{
struct batadv_forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
head, + canceled_list) {
cancel_delayed_work_sync(&forw_packet->delayed_work);
hlist_del(&forw_packet->canceled_list);
batadv_forw_packet_free(forw_packet);
}
+}
I don't quite see how your patch can work when only one interface is deactivated and not the mesh as a whole. batadv_purge_outstanding_packets() also is called from batadv_hardif_disable_interface() in which case the broadcast packets re-arm themselves by appending the work item to their respective queues. That is why we have the magic "pending" check in the cleanup functions.
Cheers, Marek
On Sunday, March 03, 2013 13:06:41 Marek Lindner wrote:
On Wednesday, February 27, 2013 17:58:15 Linus Lüssing wrote:
+/**
- batadv_canceled_packets_free - Frees canceled forward packets
- @head: A list of to be freed forw_packets
- This function canceles the scheduling of any packet in the provided
list, + * waits for any possibly running packet forwarding thread to finish and + * finally, safely frees this forward packet.
- This function might sleep.
- */
+static void batadv_canceled_packets_free(struct hlist_head *head) +{
struct batadv_forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
head, + canceled_list) {
cancel_delayed_work_sync(&forw_packet->delayed_work);
hlist_del(&forw_packet->canceled_list);
batadv_forw_packet_free(forw_packet);
}
+}
I don't quite see how your patch can work when only one interface is deactivated and not the mesh as a whole. batadv_purge_outstanding_packets() also is called from batadv_hardif_disable_interface() in which case the broadcast packets re-arm themselves by appending the work item to their respective queues. That is why we have the magic "pending" check in the cleanup functions.
One idea could be to check the incoming interface status in batadv_send_outstanding_bcast_packet() right below the mesh status check. If the interface is going down we could exit immdiately. This could be even a better solution than the pending check we have today.
Cheers, Marek
When removing a single interface and if there are still broadcast packets in the queue and they re-arm, then those queue slots are lost.
This patch is supposed to fix this issue.
Signed-off-by: Linus Lüssing linus.luessing@web.de --- bat_iv_ogm.c | 32 ++++--------------- send.c | 97 ++++++++++++++++++++++++++++++++++++++++++++-------------- send.h | 5 +++ types.h | 1 + 4 files changed, 86 insertions(+), 49 deletions(-)
diff --git a/bat_iv_ogm.c b/bat_iv_ogm.c index 071f288..4e33e0e 100644 --- a/bat_iv_ogm.c +++ b/bat_iv_ogm.c @@ -387,26 +387,13 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, struct batadv_forw_packet *forw_packet_aggr; unsigned char *skb_buff; unsigned int skb_size; + atomic_t *queue_left = own_packet ? NULL : &bat_priv->batman_queue_left;
- if (!atomic_inc_not_zero(&if_incoming->refcount)) + forw_packet_aggr = batadv_forw_packet_alloc(if_incoming, queue_left, + bat_priv); + if (!forw_packet_aggr) return;
- /* own packet should always be scheduled */ - if (!own_packet) { - if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) { - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "batman packet queue full\n"); - goto out; - } - } - - forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC); - if (!forw_packet_aggr) { - if (!own_packet) - atomic_inc(&bat_priv->batman_queue_left); - goto out; - } - if ((atomic_read(&bat_priv->aggregated_ogms)) && (packet_len < BATADV_MAX_AGGREGATION_BYTES)) skb_size = BATADV_MAX_AGGREGATION_BYTES; @@ -417,10 +404,8 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
forw_packet_aggr->skb = dev_alloc_skb(skb_size); if (!forw_packet_aggr->skb) { - if (!own_packet) - atomic_inc(&bat_priv->batman_queue_left); - kfree(forw_packet_aggr); - goto out; + batadv_forw_packet_free(forw_packet_aggr); + return; } skb_reserve(forw_packet_aggr->skb, ETH_HLEN + NET_IP_ALIGN);
@@ -431,7 +416,6 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, memcpy(skb_buff, packet_buff, packet_len);
forw_packet_aggr->own = own_packet; - forw_packet_aggr->if_incoming = if_incoming; forw_packet_aggr->num_packets = 0; forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS; forw_packet_aggr->send_time = send_time; @@ -451,10 +435,6 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, queue_delayed_work(batadv_event_workqueue, &forw_packet_aggr->delayed_work, send_time - jiffies); - - return; -out: - batadv_hardif_free_ref(if_incoming); }
/* aggregate a new packet into the existing ogm packet */ diff --git a/send.c b/send.c index eb16b04..08ac5e8 100644 --- a/send.c +++ b/send.c @@ -138,12 +138,77 @@ void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface) bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface); }
-static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) +/** + * batadv_forw_packet_alloc - Allocates a forwarding packet + * @if_incoming: The (optional) if_incoming to be grabbed + * @queue_left: The (optional) queue counter to decrease + * @bat_priv: The bat_priv for the mesh of this forw_packet + * + * Allocates a forwarding packet and tries to get a reference to the + * (optional) if_incoming and queue_left. If queue_left is NULL then + * bat_priv is optional, too. + * + * On success, returns the allocated forwarding packet. Otherwise returns + * NULL. + */ +struct batadv_forw_packet *batadv_forw_packet_alloc( + struct batadv_hard_iface *if_incoming, + atomic_t *queue_left, + struct batadv_priv *bat_priv) +{ + struct batadv_forw_packet *forw_packet = NULL; + + if (if_incoming && !atomic_inc_not_zero(&if_incoming->refcount)) + goto out; + + if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) { + if (queue_left == &bat_priv->bcast_queue_left) + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "bcast queue full\n"); + else if (queue_left == &bat_priv->batman_queue_left) + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "batman queue full\n"); + else + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "a mysterious queue is full\n"); + goto err; + } + + forw_packet = kmalloc(sizeof(struct batadv_forw_packet), GFP_ATOMIC); + if (!forw_packet) + goto err2; + + forw_packet->skb = NULL; + forw_packet->if_incoming = if_incoming; + forw_packet->queue_left = queue_left; + + goto out; + +err2: + if (queue_left) + atomic_inc(queue_left); +err: + if (if_incoming) + batadv_hardif_free_ref(if_incoming); +out: + return forw_packet; +} + +/** + * batadv_forw_packet_free - Frees a forwarding packet + * @forw_packet: The packet to free + * + * This frees a forwarding packet and releases any ressources it might + * have claimed. + */ +void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) { if (forw_packet->skb) kfree_skb(forw_packet->skb); if (forw_packet->if_incoming) batadv_hardif_free_ref(forw_packet->if_incoming); + if (forw_packet->queue_left) + atomic_inc(forw_packet->queue_left); kfree(forw_packet); }
@@ -177,25 +242,21 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, const struct sk_buff *skb, unsigned long delay) { - struct batadv_hard_iface *primary_if = NULL; + struct batadv_hard_iface *primary_if; struct batadv_forw_packet *forw_packet; struct batadv_bcast_packet *bcast_packet; struct sk_buff *newskb;
- if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "bcast packet queue full\n"); - goto out; - } - primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) - goto out_and_inc; - - forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); + goto out;
+ forw_packet = batadv_forw_packet_alloc(primary_if, + &bat_priv->bcast_queue_left, + bat_priv); + batadv_hardif_free_ref(primary_if); if (!forw_packet) - goto out_and_inc; + goto out;
newskb = skb_copy(skb, GFP_ATOMIC); if (!newskb) @@ -208,7 +269,6 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, skb_reset_mac_header(newskb);
forw_packet->skb = newskb; - forw_packet->if_incoming = primary_if;
/* how often did we send the bcast packet ? */ forw_packet->num_packets = 0; @@ -220,12 +280,8 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, return NETDEV_TX_OK;
packet_free: - kfree(forw_packet); -out_and_inc: - atomic_inc(&bat_priv->bcast_queue_left); + batadv_forw_packet_free(forw_packet); out: - if (primary_if) - batadv_hardif_free_ref(primary_if); return NETDEV_TX_BUSY; }
@@ -282,7 +338,6 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
out: batadv_forw_packet_free(forw_packet); - atomic_inc(&bat_priv->bcast_queue_left); }
void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) @@ -312,10 +367,6 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) batadv_schedule_bat_ogm(forw_packet->if_incoming);
out: - /* don't count own packet */ - if (!forw_packet->own) - atomic_inc(&bat_priv->batman_queue_left); - batadv_forw_packet_free(forw_packet); }
diff --git a/send.h b/send.h index 38e662f..7004486 100644 --- a/send.h +++ b/send.h @@ -27,6 +27,11 @@ bool batadv_send_skb_to_orig(struct sk_buff *skb, struct batadv_orig_node *orig_node, struct batadv_hard_iface *recv_if); void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface); +struct batadv_forw_packet *batadv_forw_packet_alloc( + struct batadv_hard_iface *if_incoming, + atomic_t *queue_left, + struct batadv_priv *bat_priv); +void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet); int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, const struct sk_buff *skb, unsigned long delay); diff --git a/types.h b/types.h index 5f542bd..8c28142 100644 --- a/types.h +++ b/types.h @@ -863,6 +863,7 @@ struct batadv_forw_packet { uint8_t num_packets; struct delayed_work delayed_work; struct batadv_hard_iface *if_incoming; + atomic_t *queue_left; };
/**
There's no need to for an explicit hlist_node initialization if it is added to a list right away, like it's the case with the hlist_add_head()s here.
Signed-off-by: Linus Lüssing linus.luessing@web.de --- bat_iv_ogm.c | 2 -- send.c | 2 -- 2 files changed, 4 deletions(-)
diff --git a/bat_iv_ogm.c b/bat_iv_ogm.c index 4e33e0e..77568ba 100644 --- a/bat_iv_ogm.c +++ b/bat_iv_ogm.c @@ -409,8 +409,6 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, } skb_reserve(forw_packet_aggr->skb, ETH_HLEN + NET_IP_ALIGN);
- INIT_HLIST_NODE(&forw_packet_aggr->list); - skb_buff = skb_put(forw_packet_aggr->skb, packet_len); forw_packet_aggr->packet_len = packet_len; memcpy(skb_buff, packet_buff, packet_len); diff --git a/send.c b/send.c index 08ac5e8..c151982 100644 --- a/send.c +++ b/send.c @@ -217,8 +217,6 @@ _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, struct batadv_forw_packet *forw_packet, unsigned long send_time) { - INIT_HLIST_NODE(&forw_packet->list); - /* add new packet to packet list */ spin_lock_bh(&bat_priv->forw_bcast_list_lock); hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
- Perform queue list addition and work queueing atomically.
Otherwise we might potentially miss waiting for the re-armed task on interface removal, leading to freeing packets with the re-armed tasks trying to process them afterwards, therefore causing segmentation faults.
Signed-off-by: Linus Lüssing linus.luessing@web.de --- bat_iv_ogm.c | 12 ++++++------ send.c | 6 ++---- 2 files changed, 8 insertions(+), 10 deletions(-)
diff --git a/bat_iv_ogm.c b/bat_iv_ogm.c index 77568ba..de3a4c1 100644 --- a/bat_iv_ogm.c +++ b/bat_iv_ogm.c @@ -422,17 +422,17 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, if (direct_link) forw_packet_aggr->direct_link_flags |= 1;
- /* add new packet to packet list */ - spin_lock_bh(&bat_priv->forw_bat_list_lock); - hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list); - spin_unlock_bh(&bat_priv->forw_bat_list_lock); - - /* start timer for this packet */ + /* initialize job for this packet */ INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, batadv_send_outstanding_bat_ogm_packet); + + /* add new packet to packet list and start its timer */ + spin_lock_bh(&bat_priv->forw_bat_list_lock); + hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list); queue_delayed_work(batadv_event_workqueue, &forw_packet_aggr->delayed_work, send_time - jiffies); + spin_unlock_bh(&bat_priv->forw_bat_list_lock); }
/* aggregate a new packet into the existing ogm packet */ diff --git a/send.c b/send.c index c151982..1ddfae7 100644 --- a/send.c +++ b/send.c @@ -217,14 +217,12 @@ _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, struct batadv_forw_packet *forw_packet, unsigned long send_time) { - /* add new packet to packet list */ + /* add new packet to packet list and start its timer */ spin_lock_bh(&bat_priv->forw_bcast_list_lock); hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list); - spin_unlock_bh(&bat_priv->forw_bcast_list_lock); - - /* start timer for this packet */ queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work, send_time); + spin_unlock_bh(&bat_priv->forw_bcast_list_lock); }
/* add a broadcast packet to the queue and setup timers. broadcast packets
On Monday 18 March 2013 06:09:55 Linus Lüssing wrote:
- Perform queue list addition and work queueing atomically.
Otherwise we might potentially miss waiting for the re-armed task on interface removal, leading to freeing packets with the re-armed tasks trying to process them afterwards, therefore causing segmentation faults.
Signed-off-by: Linus Lüssing linus.luessing@web.de
It looks like this patch and the other two don't apply anymore. Can you please resent them or mark them correctly in patchwork [1, 2, 3].
Thanks, Sven
[1] https://patchwork.open-mesh.org/patch/2798/ [2] https://patchwork.open-mesh.org/patch/2812/ [3] https://patchwork.open-mesh.org/patch/2786/
On Thursday 10 March 2016 18:00:58 Sven Eckelmann wrote: [...]
It looks like this patch and the other two don't apply anymore. Can you please resent them or mark them correctly in patchwork [1, 2, 3].
Thanks, Sven
[1] https://patchwork.open-mesh.org/patch/2798/ [2] https://patchwork.open-mesh.org/patch/2812/ [3] https://patchwork.open-mesh.org/patch/2786/
What is the state here?
Kind regards, Sven
On Samstag, 9. April 2016 18:23:56 CEST Sven Eckelmann wrote:
On Thursday 10 March 2016 18:00:58 Sven Eckelmann wrote: [...]
It looks like this patch and the other two don't apply anymore. Can you please resent them or mark them correctly in patchwork [1, 2, 3].
Thanks,
Sven
[1] https://patchwork.open-mesh.org/patch/2798/ [2] https://patchwork.open-mesh.org/patch/2812/ [3] https://patchwork.open-mesh.org/patch/2786/
What is the state here?
What is the state of these three patches?
Kind regards, Sven
On Fri, Jul 22, 2016 at 12:27:04AM +0200, Sven Eckelmann wrote:
On Samstag, 9. April 2016 18:23:56 CEST Sven Eckelmann wrote:
On Thursday 10 March 2016 18:00:58 Sven Eckelmann wrote: [...]
It looks like this patch and the other two don't apply anymore. Can you please resent them or mark them correctly in patchwork [1, 2, 3].
Thanks,
Sven
[1] https://patchwork.open-mesh.org/patch/2798/ [2] https://patchwork.open-mesh.org/patch/2812/ [3] https://patchwork.open-mesh.org/patch/2786/
What is the state here?
What is the state of these three patches?
Kind regards, Sven
Hi Sven,
Looks like I'm still having this issue from time to time, even on the master branch. Will try to refresh and rephrase them later.
Regards, Linus
- Avoid rearmed bcast packet being missed by cancel_delayed_work_sync().
When a scheduled broadcast packet sending work executes and removes itself from the broadcast queue, it becomes invisible for the purging methods until it re-arms.
This means a to be re-armed broadcast work might slip through a cancel_delayed_work_sync() between the removal from and readdition to the broadcast queue.
To avoid this we perform the removal and readdition in one atomic step.
Signed-off-by: Linus Lüssing linus.luessing@web.de --- send.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-)
diff --git a/send.c b/send.c index 1ddfae7..34c54e9 100644 --- a/send.c +++ b/send.c @@ -217,12 +217,9 @@ _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, struct batadv_forw_packet *forw_packet, unsigned long send_time) { - /* add new packet to packet list and start its timer */ - spin_lock_bh(&bat_priv->forw_bcast_list_lock); hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list); queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work, send_time); - spin_unlock_bh(&bat_priv->forw_bcast_list_lock); }
/* add a broadcast packet to the queue and setup timers. broadcast packets @@ -272,7 +269,10 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, INIT_DELAYED_WORK(&forw_packet->delayed_work, batadv_send_outstanding_bcast_packet);
+ spin_lock_bh(&bat_priv->forw_bcast_list_lock); _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay); + spin_unlock_bh(&bat_priv->forw_bcast_list_lock); + return NETDEV_TX_OK;
packet_free: @@ -296,10 +296,6 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) soft_iface = forw_packet->if_incoming->soft_iface; bat_priv = netdev_priv(soft_iface);
- spin_lock_bh(&bat_priv->forw_bcast_list_lock); - hlist_del(&forw_packet->list); - spin_unlock_bh(&bat_priv->forw_bcast_list_lock); - if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) goto out;
@@ -327,12 +323,21 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
/* if we still have some more bcasts to send */ if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) { + spin_lock_bh(&bat_priv->forw_bcast_list_lock); + hlist_del(&forw_packet->list); + _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, msecs_to_jiffies(5)); + spin_unlock_bh(&bat_priv->forw_bcast_list_lock); + return; }
out: + spin_lock_bh(&bat_priv->forw_bcast_list_lock); + hlist_del(&forw_packet->list); + spin_unlock_bh(&bat_priv->forw_bcast_list_lock); + batadv_forw_packet_free(forw_packet); }
- Do not temporarily give up the spin_lock on bcast/ogm queue purging.
So far on purging broadcast and ogm queues we temporarily give up the spin lock of these queues to be able to cancel any scheduled forwarding work. However this is unsafe and can lead to a general protection error in batadv_purge_outstanding_packets().
With this patch we split the queue purging into two steps: First removing forward packets from those queues and signaling the cancelation. Secondly, we are actively canceling any scheduled forwarding, wait for any running forwarding to finish and only free a forw_packet afterwards.
Signed-off-by: Linus Lüssing linus.luessing@web.de --- send.c | 140 ++++++++++++++++++++++++++++++++++++++++++--------------------- types.h | 1 + 2 files changed, 94 insertions(+), 47 deletions(-)
diff --git a/send.c b/send.c index 34c54e9..3bb0b1a 100644 --- a/send.c +++ b/send.c @@ -324,6 +324,10 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) /* if we still have some more bcasts to send */ if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) { spin_lock_bh(&bat_priv->forw_bcast_list_lock); + if (hlist_unhashed(&forw_packet->list)) { + spin_unlock_bh(&bat_priv->forw_bcast_list_lock); + return; + } hlist_del(&forw_packet->list);
_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, @@ -335,6 +339,10 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
out: spin_lock_bh(&bat_priv->forw_bcast_list_lock); + if (hlist_unhashed(&forw_packet->list)) { + spin_unlock_bh(&bat_priv->forw_bcast_list_lock); + return; + } hlist_del(&forw_packet->list); spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -352,6 +360,10 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) delayed_work); bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); spin_lock_bh(&bat_priv->forw_bat_list_lock); + if (hlist_unhashed(&forw_packet->list)) { + spin_unlock_bh(&bat_priv->forw_bat_list_lock); + return; + } hlist_del(&forw_packet->list); spin_unlock_bh(&bat_priv->forw_bat_list_lock);
@@ -371,13 +383,87 @@ out: batadv_forw_packet_free(forw_packet); }
+/** + * batadv_cancel_packets - Cancels a list of forward packets + * @forw_list: The to be canceled forward packets + * @canceled_list: The backup list + * @hard_iface: The interface to cancel forward packets for + * + * This cancels any scheduled forwarding packet tasks in the provided + * forw_list for the given hard_iface. If hard_iface is NULL forwarding packets + * on all hard interfaces will be canceled. + * + * The packets are being moved from the forw_list to the canceled_list + * and the forward packet list pointer will be unhashed, allowing any already + * running task to notice the cancelation. + */ +static void batadv_cancel_packets(struct hlist_head *forw_list, + struct hlist_head *canceled_list, + const struct batadv_hard_iface *hard_iface) +{ + struct batadv_forw_packet *forw_packet; + struct hlist_node *safe_tmp_node; + + hlist_for_each_entry_safe(forw_packet, safe_tmp_node, + forw_list, list) { + /* if purge_outstanding_packets() was called with an argument + * we delete only packets belonging to the given interface + */ + if ((hard_iface) && + (forw_packet->if_incoming != hard_iface)) + continue; + + hlist_del_init(&forw_packet->list); + hlist_add_head(&forw_packet->canceled_list, canceled_list); + } +} + +/** + * batadv_canceled_packets_free - Frees canceled forward packets + * @head: A list of to be freed forw_packets + * + * This function canceles the scheduling of any packet in the provided list, + * waits for any possibly running packet forwarding thread to finish and + * finally, safely frees this forward packet. + * + * This function might sleep. + */ +static void batadv_canceled_packets_free(struct hlist_head *head) +{ + struct batadv_forw_packet *forw_packet; + struct hlist_node *safe_tmp_node; + + hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head, + canceled_list) { + cancel_delayed_work_sync(&forw_packet->delayed_work); + + hlist_del(&forw_packet->canceled_list); + batadv_forw_packet_free(forw_packet); + } +} + +/** + * batadv_purge_outstanding_packets - Stops/purges scheduled bcast/ogm packets + * @bat_priv: The mesh to cancel and purge bcast/ogm packets for + * @hard_iface: The hard interface to cancel and purge bcast_ogm packets on + * + * This method cancels and purges any broadcast and ogm packet on the given + * hard_iface. If hard_iface is NULL, broadcast and ogm packets on all hard + * interfaces will be canceled and purged. + * + * Note that after this method bcast/ogm callbacks might still be running for + * a few instructions (use a flush_workqueue(batadv_event_workqueue) to + * wait for them to finish). + * + * This function might sleep. + */ void batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, const struct batadv_hard_iface *hard_iface) { - struct batadv_forw_packet *forw_packet; - struct hlist_node *safe_tmp_node; - bool pending; + struct hlist_head head; + + INIT_HLIST_HEAD(&head);
if (hard_iface) batadv_dbg(BATADV_DBG_BATMAN, bat_priv, @@ -389,53 +475,13 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
/* free bcast list */ spin_lock_bh(&bat_priv->forw_bcast_list_lock); - hlist_for_each_entry_safe(forw_packet, safe_tmp_node, - &bat_priv->forw_bcast_list, list) { - /* if purge_outstanding_packets() was called with an argument - * we delete only packets belonging to the given interface - */ - if ((hard_iface) && - (forw_packet->if_incoming != hard_iface)) - continue; - - spin_unlock_bh(&bat_priv->forw_bcast_list_lock); - - /* batadv_send_outstanding_bcast_packet() will lock the list to - * delete the item from the list - */ - pending = cancel_delayed_work_sync(&forw_packet->delayed_work); - spin_lock_bh(&bat_priv->forw_bcast_list_lock); - - if (pending) { - hlist_del(&forw_packet->list); - batadv_forw_packet_free(forw_packet); - } - } + batadv_cancel_packets(&bat_priv->forw_bcast_list, &head, hard_iface); spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
/* free batman packet list */ spin_lock_bh(&bat_priv->forw_bat_list_lock); - hlist_for_each_entry_safe(forw_packet, safe_tmp_node, - &bat_priv->forw_bat_list, list) { - /* if purge_outstanding_packets() was called with an argument - * we delete only packets belonging to the given interface - */ - if ((hard_iface) && - (forw_packet->if_incoming != hard_iface)) - continue; - - spin_unlock_bh(&bat_priv->forw_bat_list_lock); - - /* send_outstanding_bat_packet() will lock the list to - * delete the item from the list - */ - pending = cancel_delayed_work_sync(&forw_packet->delayed_work); - spin_lock_bh(&bat_priv->forw_bat_list_lock); - - if (pending) { - hlist_del(&forw_packet->list); - batadv_forw_packet_free(forw_packet); - } - } + batadv_cancel_packets(&bat_priv->forw_bat_list, &head, hard_iface); spin_unlock_bh(&bat_priv->forw_bat_list_lock); + + batadv_canceled_packets_free(&head); } diff --git a/types.h b/types.h index 8c28142..d0d3c07 100644 --- a/types.h +++ b/types.h @@ -855,6 +855,7 @@ struct batadv_skb_cb { */ struct batadv_forw_packet { struct hlist_node list; + struct hlist_node canceled_list; unsigned long send_time; uint8_t own; struct sk_buff *skb;
b.a.t.m.a.n@lists.open-mesh.org