The netlink dump functionality transfers a large number of entries from the kernel to userspace. It is rather likely that the transfer has to interrupted and later continued. During that time, it can happen that either new entries are added or removed. The userspace could than either receive some entries multiple times or miss entries.
Commit 670dc2833d14 ("netlink: advertise incomplete dumps") introduced a mechanism to inform userspace about this problem. Userspace can then decide whether it is necessary or not to retry dumping the information again.
The netlink dump functions have to be switched to exclusive locks to avoid changes while the current message is prepared. The already existing generation sequence counter from the hash helper can be used for this simple hash.
Reported-by: Matthias Schiffer mschiffer@universe-factory.net Signed-off-by: Sven Eckelmann sven@narfation.org --- net/batman-adv/bridge_loop_avoidance.c | 41 +++++++++++++++----------- 1 file changed, 23 insertions(+), 18 deletions(-)
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 5f1aeede..9f374734 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -2325,14 +2325,15 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) * netlink socket * @msg: buffer for the message * @portid: netlink port - * @seq: Sequence number of netlink message + * @cb: Control block containing additional options * @primary_if: primary interface * @backbone_gw: entry to dump * * Return: 0 or error code. */ static int -batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, +batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, + struct netlink_callback *cb, struct batadv_hard_iface *primary_if, struct batadv_bla_backbone_gw *backbone_gw) { @@ -2343,13 +2344,16 @@ batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, void *hdr; int ret = -EINVAL;
- hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, - NLM_F_MULTI, BATADV_CMD_GET_BLA_BACKBONE); + hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, + &batadv_netlink_family, NLM_F_MULTI, + BATADV_CMD_GET_BLA_BACKBONE); if (!hdr) { ret = -ENOBUFS; goto out; }
+ genl_dump_check_consistent(cb, hdr); + is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
spin_lock_bh(&backbone_gw->crc_lock); @@ -2386,28 +2390,33 @@ batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, * a netlink socket * @msg: buffer for the message * @portid: netlink port - * @seq: Sequence number of netlink message + * @cb: Control block containing additional options * @primary_if: primary interface - * @head: bucket to dump + * @hash: hash to dump + * @bucket: bucket index to dump * @idx_skip: How many entries to skip * * Return: always 0. */ static int -batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, +batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, + struct netlink_callback *cb, struct batadv_hard_iface *primary_if, - struct hlist_head *head, int *idx_skip) + struct batadv_hashtable *hash, + unsigned int bucket, int *idx_skip) { struct batadv_bla_backbone_gw *backbone_gw; int idx = 0; int ret = 0;
- rcu_read_lock(); - hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { + spin_lock_bh(&hash->list_locks[bucket]); + cb->seq = atomic_read(&hash->generation) << 1 | 1; + + hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) { if (idx++ < *idx_skip) continue;
- ret = batadv_bla_backbone_dump_entry(msg, portid, seq, + ret = batadv_bla_backbone_dump_entry(msg, portid, cb, primary_if, backbone_gw); if (ret) { *idx_skip = idx - 1; @@ -2417,7 +2426,7 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
*idx_skip = 0; unlock: - rcu_read_unlock(); + spin_unlock_bh(&hash->list_locks[bucket]); return ret; }
@@ -2437,7 +2446,6 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb) struct batadv_hashtable *hash; struct batadv_priv *bat_priv; int bucket = cb->args[0]; - struct hlist_head *head; int idx = cb->args[1]; int ifindex; int ret = 0; @@ -2463,11 +2471,8 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb) }
while (bucket < hash->size) { - head = &hash->table[bucket]; - - if (batadv_bla_backbone_dump_bucket(msg, portid, - cb->nlh->nlmsg_seq, - primary_if, head, &idx)) + if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if, + hash, bucket, &idx)) break; bucket++; }