From: Sven Eckelmann sven@narfation.org
It might be possible that 2 threads access the same data in the same rcu grace period. The first thread calls call_rcu() to decrement the refcount and free the data while the second thread increases the refcount to use the data. To avoid this race condition all refcount operations have to be atomic.
Reported-by: Sven Eckelmann sven@narfation.org Signed-off-by: Marek Lindner lindner_marek@yahoo.de --- soft-interface.c | 35 +++++++++++++++++------------------ types.h | 2 +- 2 files changed, 18 insertions(+), 19 deletions(-)
diff --git a/batman-adv/soft-interface.c b/batman-adv/soft-interface.c index bd088f8..bd8b539 100644 --- a/batman-adv/soft-interface.c +++ b/batman-adv/soft-interface.c @@ -78,20 +78,18 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len) return 0; }
-static void softif_neigh_free_ref(struct kref *refcount) -{ - struct softif_neigh *softif_neigh; - - softif_neigh = container_of(refcount, struct softif_neigh, refcount); - kfree(softif_neigh); -} - static void softif_neigh_free_rcu(struct rcu_head *rcu) { struct softif_neigh *softif_neigh;
softif_neigh = container_of(rcu, struct softif_neigh, rcu); - kref_put(&softif_neigh->refcount, softif_neigh_free_ref); + kfree(softif_neigh); +} + +static void softif_neigh_free_ref(struct softif_neigh *softif_neigh) +{ + if (atomic_dec_and_test(&softif_neigh->refcount)) + call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); }
void softif_neigh_purge(struct bat_priv *bat_priv) @@ -118,11 +116,10 @@ void softif_neigh_purge(struct bat_priv *bat_priv) softif_neigh->addr, softif_neigh->vid); softif_neigh_tmp = bat_priv->softif_neigh; bat_priv->softif_neigh = NULL; - kref_put(&softif_neigh_tmp->refcount, - softif_neigh_free_ref); + softif_neigh_free_ref(softif_neigh_tmp); }
- call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); + softif_neigh_free_ref(softif_neigh); }
spin_unlock_bh(&bat_priv->softif_neigh_lock); @@ -143,8 +140,11 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, if (softif_neigh->vid != vid) continue;
+ if (!atomic_inc_not_zero(&softif_neigh->refcount)) + continue; + softif_neigh->last_seen = jiffies; - goto found; + goto out; }
softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC); @@ -154,15 +154,14 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, memcpy(softif_neigh->addr, addr, ETH_ALEN); softif_neigh->vid = vid; softif_neigh->last_seen = jiffies; - kref_init(&softif_neigh->refcount); + /* initialize with 2 - caller decrements counter by one */ + atomic_set(&softif_neigh->refcount, 2);
INIT_HLIST_NODE(&softif_neigh->list); spin_lock_bh(&bat_priv->softif_neigh_lock); hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list); spin_unlock_bh(&bat_priv->softif_neigh_lock);
-found: - kref_get(&softif_neigh->refcount); out: rcu_read_unlock(); return softif_neigh; @@ -266,7 +265,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, softif_neigh->addr, softif_neigh->vid); softif_neigh_tmp = bat_priv->softif_neigh; bat_priv->softif_neigh = softif_neigh; - kref_put(&softif_neigh_tmp->refcount, softif_neigh_free_ref); + softif_neigh_free_ref(softif_neigh_tmp); /* we need to hold the additional reference */ goto err; } @@ -284,7 +283,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, }
out: - kref_put(&softif_neigh->refcount, softif_neigh_free_ref); + softif_neigh_free_ref(softif_neigh); err: kfree_skb(skb); return; diff --git a/batman-adv/types.h b/batman-adv/types.h index ca5f20a..8df6d9d 100644 --- a/batman-adv/types.h +++ b/batman-adv/types.h @@ -270,7 +270,7 @@ struct softif_neigh { uint8_t addr[ETH_ALEN]; unsigned long last_seen; short vid; - struct kref refcount; + atomic_t refcount; struct rcu_head rcu; };