Instead of transmitting individual OGMv2 packets from the aggregation queue merge those OGMv2 packets into a single one and transmit this aggregate instead.
This reduces overhead as it saves an ethernet header and a transmission per aggregated OGMv2 packet.
Signed-off-by: Linus Lüssing linus.luessing@c0d3.blue --- net/batman-adv/bat_v_ogm.c | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-)
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 36f78889..a743c67d 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -180,15 +180,40 @@ static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface) /** * batadv_v_ogm_aggr_send() - flush & send aggregation queue * @hard_iface: the interface with the aggregation queue to flush + * + * Aggregates all OGMv2 packets currently in the aggregation queue into a + * single OGMv2 packet and transmits this aggregate. + * + * The aggregation queue is empty after this call. */ static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface) { - struct sk_buff *skb; + unsigned int aggr_len = hard_iface->bat_v.aggr_len; + struct sk_buff *skb, *skb_aggr; + unsigned int ogm_len; + + if (!aggr_len) + return; + + skb_aggr = dev_alloc_skb(aggr_len + ETH_HLEN + NET_IP_ALIGN); + if (!skb_aggr) { + batadv_v_ogm_aggr_list_free(hard_iface); + return; + } + + skb_reserve(skb_aggr, ETH_HLEN + NET_IP_ALIGN); + skb_reset_network_header(skb_aggr);
while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list))) { hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb); - batadv_v_ogm_send_to_if(skb, hard_iface); + + ogm_len = batadv_v_ogm_len(skb); + skb_put_data(skb_aggr, skb->data, ogm_len); + + consume_skb(skb); } + + batadv_v_ogm_send_to_if(skb_aggr, hard_iface); }
/**