[linux-next] LinuxNextTracking branch, master, updated. next-20181120
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit f2be6d710d25be7d8d13f49f713d69dea9c71d57
Merge: bae4e109837b419b93fbddcb414c86673b1c90a5 f2ce1065e767fc7da106a5f5381d1e8f842dc6f4
Author: David S. Miller <davem(a)davemloft.net>
Date: Mon Nov 19 10:55:00 2018 -0800
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
diff --combined MAINTAINERS
index 3bd775ba51ce,77b11742785d..68528f176875
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -717,7 -717,7 +717,7 @@@ F: include/linux/mfd/altera-a10sr.
F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
ALTERA TRIPLE SPEED ETHERNET DRIVER
- M: Vince Bridgers <vbridger(a)opensource.altera.com>
+ M: Thor Thayer <thor.thayer(a)linux.intel.com>
L: netdev(a)vger.kernel.org
L: nios2-dev(a)lists.rocketboards.org (moderated for non-subscribers)
S: Maintained
@@@ -3276,6 -3276,12 +3276,12 @@@ F: include/uapi/linux/caif
F: include/net/caif/
F: net/caif/
+ CAKE QDISC
+ M: Toke Høiland-Jørgensen <toke(a)toke.dk>
+ L: cake(a)lists.bufferbloat.net (moderated for non-subscribers)
+ S: Maintained
+ F: net/sched/sch_cake.c
+
CALGARY x86-64 IOMMU
M: Muli Ben-Yehuda <mulix(a)mulix.org>
M: Jon Mason <jdmason(a)kudzu.us>
@@@ -7850,6 -7856,13 +7856,6 @@@ F: include/linux/isdn
F: include/uapi/linux/isdn.h
F: include/uapi/linux/isdn/
-ISDN SUBSYSTEM (Eicon active card driver)
-M: Armin Schindler <mac(a)melware.de>
-L: isdn4linux(a)listserv.isdn4linux.de (subscribers-only)
-W: http://www.melware.de
-S: Maintained
-F: drivers/isdn/hardware/eicon/
-
IT87 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare(a)suse.com>
L: linux-hwmon(a)vger.kernel.org
@@@ -10679,14 -10692,6 +10685,14 @@@ L: linux-nfc(a)lists.01.org (moderated fo
S: Supported
F: drivers/nfc/nxp-nci
+OBJAGG
+M: Jiri Pirko <jiri(a)mellanox.com>
+L: netdev(a)vger.kernel.org
+S: Supported
+F: lib/objagg.c
+F: lib/test_objagg.c
+F: include/linux/objagg.h
+
OBJTOOL
M: Josh Poimboeuf <jpoimboe(a)redhat.com>
M: Peter Zijlstra <peterz(a)infradead.org>
@@@ -10809,9 -10814,9 +10815,9 @@@ F: drivers/media/platform/omap3isp
F: drivers/staging/media/omap4iss/
OMAP MMC SUPPORT
- M: Jarkko Lavinen <jarkko.lavinen(a)nokia.com>
+ M: Aaro Koskinen <aaro.koskinen(a)iki.fi>
L: linux-omap(a)vger.kernel.org
- S: Maintained
+ S: Odd Fixes
F: drivers/mmc/host/omap.c
OMAP POWER MANAGEMENT SUPPORT
@@@ -11746,6 -11751,7 +11752,7 @@@ F: Documentation/devicetree/bindings/pi
PIN CONTROLLER - INTEL
M: Mika Westerberg <mika.westerberg(a)linux.intel.com>
M: Andy Shevchenko <andriy.shevchenko(a)linux.intel.com>
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
S: Maintained
F: drivers/pinctrl/intel/
diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 956e708c777d,d49db46254cd..649bf7c586c1
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@@ -2295,8 -2295,6 +2295,8 @@@ static int cxgb_up(struct adapter *adap
static void cxgb_down(struct adapter *adapter)
{
+ struct hash_mac_addr *entry, *tmp;
+
cancel_work_sync(&adapter->tid_release_task);
cancel_work_sync(&adapter->db_full_task);
cancel_work_sync(&adapter->db_drop_task);
@@@ -2305,12 -2303,6 +2305,12 @@@
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
+
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
adapter->flags &= ~FULL_INIT_DONE;
}
@@@ -5871,7 -5863,7 +5871,7 @@@ fw_attach_fail
if (!is_t4(adapter->params.chip))
cxgb4_ptp_init(adapter);
- if (IS_ENABLED(CONFIG_THERMAL) &&
+ if (IS_REACHABLE(CONFIG_THERMAL) &&
!is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
cxgb4_thermal_init(adapter);
@@@ -5940,7 -5932,7 +5940,7 @@@ static void remove_one(struct pci_dev *
if (!is_t4(adapter->params.chip))
cxgb4_ptp_stop(adapter);
- if (IS_ENABLED(CONFIG_THERMAL))
+ if (IS_REACHABLE(CONFIG_THERMAL))
cxgb4_thermal_remove(adapter);
/* If we allocated filters, free up state associated with any
diff --combined drivers/net/tun.c
index 1e9da697081d,e244f5d7512a..56575f88d1fd
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@@ -188,11 -188,6 +188,11 @@@ struct tun_file
struct xdp_rxq_info xdp_rxq;
};
+struct tun_page {
+ struct page *page;
+ int count;
+};
+
struct tun_flow_entry {
struct hlist_node hash_link;
struct rcu_head rcu;
@@@ -1478,22 -1473,23 +1478,22 @@@ static struct sk_buff *tun_napi_alloc_f
skb->truesize += skb->data_len;
for (i = 1; i < it->nr_segs; i++) {
- struct page_frag *pfrag = ¤t->task_frag;
size_t fragsz = it->iov[i].iov_len;
+ struct page *page;
+ void *frag;
if (fragsz == 0 || fragsz > PAGE_SIZE) {
err = -EINVAL;
goto free;
}
-
- if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
+ frag = netdev_alloc_frag(fragsz);
+ if (!frag) {
err = -ENOMEM;
goto free;
}
-
- skb_fill_page_desc(skb, i - 1, pfrag->page,
- pfrag->offset, fragsz);
- page_ref_inc(pfrag->page);
- pfrag->offset += fragsz;
+ page = virt_to_head_page(frag);
+ skb_fill_page_desc(skb, i - 1, page,
+ frag - page_address(page), fragsz);
}
return skb;
@@@ -1540,6 -1536,7 +1540,7 @@@ static void tun_rx_batched(struct tun_s
if (!rx_batched || (!more && skb_queue_empty(queue))) {
local_bh_disable();
+ skb_record_rx_queue(skb, tfile->queue_index);
netif_receive_skb(skb);
local_bh_enable();
return;
@@@ -1559,8 -1556,11 +1560,11 @@@
struct sk_buff *nskb;
local_bh_disable();
- while ((nskb = __skb_dequeue(&process_queue)))
+ while ((nskb = __skb_dequeue(&process_queue))) {
+ skb_record_rx_queue(nskb, tfile->queue_index);
netif_receive_skb(nskb);
+ }
+ skb_record_rx_queue(skb, tfile->queue_index);
netif_receive_skb(skb);
local_bh_enable();
}
@@@ -2381,16 -2381,9 +2385,16 @@@ static void tun_sock_write_space(struc
kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
}
+static void tun_put_page(struct tun_page *tpage)
+{
+ if (tpage->page)
+ __page_frag_cache_drain(tpage->page, tpage->count);
+}
+
static int tun_xdp_one(struct tun_struct *tun,
struct tun_file *tfile,
- struct xdp_buff *xdp, int *flush)
+ struct xdp_buff *xdp, int *flush,
+ struct tun_page *tpage)
{
struct tun_xdp_hdr *hdr = xdp->data_hard_start;
struct virtio_net_hdr *gso = &hdr->gso;
@@@ -2401,7 -2394,6 +2405,7 @@@
int buflen = hdr->buflen;
int err = 0;
bool skb_xdp = false;
+ struct page *page;
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
@@@ -2428,14 -2420,7 +2432,14 @@@
case XDP_PASS:
break;
default:
- put_page(virt_to_head_page(xdp->data));
+ page = virt_to_head_page(xdp->data);
+ if (tpage->page == page) {
+ ++tpage->count;
+ } else {
+ tun_put_page(tpage);
+ tpage->page = page;
+ tpage->count = 1;
+ }
return 0;
}
}
@@@ -2467,10 -2452,10 +2471,11 @@@ build
goto out;
}
- if (!rcu_dereference(tun->steering_prog))
+ if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
+ !tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
+ skb_record_rx_queue(skb, tfile->queue_index);
netif_receive_skb(skb);
stats = get_cpu_ptr(tun->pcpu_stats);
@@@ -2499,18 -2484,15 +2504,18 @@@ static int tun_sendmsg(struct socket *s
return -EBADFD;
if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ struct tun_page tpage;
int n = ctl->num;
int flush = 0;
+ memset(&tpage, 0, sizeof(tpage));
+
local_bh_disable();
rcu_read_lock();
for (i = 0; i < n; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
- tun_xdp_one(tun, tfile, xdp, &flush);
+ tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
}
if (flush)
@@@ -2519,8 -2501,6 +2524,8 @@@
rcu_read_unlock();
local_bh_enable();
+ tun_put_page(&tpage);
+
ret = total_len;
goto out;
}
diff --combined net/bridge/br_private.h
index 67105c66584a,04c19a37e500..bc2653738fc3
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@@ -102,12 -102,18 +102,18 @@@ struct br_tunnel_info
struct metadata_dst *tunnel_dst;
};
+ /* private vlan flags */
+ enum {
+ BR_VLFLAG_PER_PORT_STATS = BIT(0),
+ };
+
/**
* struct net_bridge_vlan - per-vlan entry
*
* @vnode: rhashtable member
* @vid: VLAN id
* @flags: bridge vlan flags
+ * @priv_flags: private (in-kernel) bridge vlan flags
* @stats: per-cpu VLAN statistics
* @br: if MASTER flag set, this points to a bridge struct
* @port: if MASTER flag unset, this points to a port struct
@@@ -127,6 -133,7 +133,7 @@@ struct net_bridge_vlan
struct rhash_head tnode;
u16 vid;
u16 flags;
+ u16 priv_flags;
struct br_vlan_stats __percpu *stats;
union {
struct net_bridge *br;
@@@ -905,7 -912,7 +912,7 @@@ static inline int br_vlan_get_tag(cons
int err = 0;
if (skb_vlan_tag_present(skb)) {
- *vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
+ *vid = skb_vlan_tag_get_id(skb);
} else {
*vid = 0;
err = -EINVAL;
diff --combined net/bridge/br_vlan.c
index a7e869da21bf,e84be08b8285..b21838b51220
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@@ -197,7 -197,7 +197,7 @@@ static void nbp_vlan_rcu_free(struct rc
v = container_of(rcu, struct net_bridge_vlan, rcu);
WARN_ON(br_vlan_is_master(v));
/* if we had per-port stats configured then free them here */
- if (v->brvlan->stats != v->stats)
+ if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
free_percpu(v->stats);
v->stats = NULL;
kfree(v);
@@@ -264,6 -264,7 +264,7 @@@ static int __vlan_add(struct net_bridge
err = -ENOMEM;
goto out_filt;
}
+ v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
} else {
v->stats = masterv->stats;
}
@@@ -420,7 -421,7 +421,7 @@@ struct sk_buff *br_handle_vlan(struct n
}
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
if (p && (p->flags & BR_VLAN_TUNNEL) &&
br_handle_egress_vlan_tunnel(skb, v)) {
@@@ -493,8 -494,8 +494,8 @@@ static bool __allowed_ingress(const str
__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
else
/* Priority-tagged Frame.
- * At this point, We know that skb->vlan_tci had
- * VLAN_TAG_PRESENT bit and its VID field was 0x000.
+ * At this point, we know that skb->vlan_tci VID
+ * field was 0.
* We update only VID field and preserve PCP field.
*/
skb->vlan_tci |= pvid;
diff --combined net/core/dev.c
index 5927f6a7c301,066aa902d85c..f2bfd2eda7b2
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -3091,17 -3091,10 +3091,17 @@@ EXPORT_SYMBOL(__skb_gso_segment)
/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
-void netdev_rx_csum_fault(struct net_device *dev)
+void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{
if (net_ratelimit()) {
pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
+ if (dev)
+ pr_err("dev features: %pNF\n", &dev->features);
+ pr_err("skb len=%u data_len=%u pkt_type=%u gso_size=%u gso_type=%u nr_frags=%u ip_summed=%u csum=%x csum_complete_sw=%d csum_valid=%d csum_level=%u\n",
+ skb->len, skb->data_len, skb->pkt_type,
+ skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_type,
+ skb_shinfo(skb)->nr_frags, skb->ip_summed, skb->csum,
+ skb->csum_complete_sw, skb->csum_valid, skb->csum_level);
dump_stack();
}
}
@@@ -4896,7 -4889,7 +4896,7 @@@ skip_classify
* and set skb->priority like in vlan_do_receive()
* For the time being, just ignore Priority Code Point
*/
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
}
type = skb->protocol;
@@@ -5393,9 -5386,7 +5393,9 @@@ static struct list_head *gro_list_prepa
}
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
- diffs |= p->vlan_tci ^ skb->vlan_tci;
+ diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
+ if (skb_vlan_tag_present(p))
+ diffs |= p->vlan_tci ^ skb->vlan_tci;
diffs |= skb_metadata_dst_cmp(p, skb);
diffs |= skb_metadata_differs(p, skb);
if (maclen == ETH_HLEN)
@@@ -5661,9 -5652,13 +5661,13 @@@ static void napi_reuse_skb(struct napi_
__skb_pull(skb, skb_headlen(skb));
/* restore the reserve we had after netdev_alloc_skb_ip_align() */
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
skb->dev = napi->dev;
skb->skb_iif = 0;
+
+ /* eth_type_trans() assumes pkt_type is PACKET_HOST */
+ skb->pkt_type = PACKET_HOST;
+
skb->encapsulation = 0;
skb_shinfo(skb)->gso_type = 0;
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
@@@ -5788,7 -5783,7 +5792,7 @@@ __sum16 __skb_gro_checksum_complete(str
if (likely(!sum)) {
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
!skb->csum_complete_sw)
- netdev_rx_csum_fault(skb->dev);
+ netdev_rx_csum_fault(skb->dev, skb);
}
NAPI_GRO_CB(skb)->csum = wsum;
diff --combined net/ipv4/ip_tunnel_core.c
index f45b96d715f0,c248e0dccbe1..c857ec6b9784
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@@ -80,7 -80,7 +80,7 @@@ void iptunnel_xmit(struct sock *sk, str
iph->version = 4;
iph->ihl = sizeof(struct iphdr) >> 2;
- iph->frag_off = df;
+ iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
iph->protocol = proto;
iph->tos = tos;
iph->daddr = dst;
@@@ -120,7 -120,7 +120,7 @@@ int __iptunnel_pull_header(struct sk_bu
}
skb_clear_hash_if_not_l4(skb);
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
skb_set_queue_mapping(skb, 0);
skb_scrub_packet(skb, xnet);
diff --combined net/ipv6/route.c
index b2447b7c7303,059f0531f7c1..194bc162866d
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@@ -2232,8 -2232,7 +2232,7 @@@ static void ip6_link_failure(struct sk_
if (rt) {
rcu_read_lock();
if (rt->rt6i_flags & RTF_CACHE) {
- if (dst_hold_safe(&rt->dst))
- rt6_remove_exception_rt(rt);
+ rt6_remove_exception_rt(rt);
} else {
struct fib6_info *from;
struct fib6_node *fn;
@@@ -2360,10 -2359,13 +2359,13 @@@ EXPORT_SYMBOL_GPL(ip6_update_pmtu)
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
{
+ int oif = sk->sk_bound_dev_if;
struct dst_entry *dst;
- ip6_update_pmtu(skb, sock_net(sk), mtu,
- sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
+ if (!oif && skb->dev)
+ oif = l3mdev_master_ifindex(skb->dev);
+
+ ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
dst = __sk_dst_get(sk);
if (!dst || !dst->obsolete ||
@@@ -2975,8 -2977,7 +2977,8 @@@ static struct fib6_info *ip6_route_info
if (!rt)
goto out;
- rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len);
+ rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
+ extack);
if (IS_ERR(rt->fib6_metrics)) {
err = PTR_ERR(rt->fib6_metrics);
/* Do not leave garbage there. */
@@@ -3215,8 -3216,8 +3217,8 @@@ static int ip6_del_cached_rt(struct rt6
if (cfg->fc_flags & RTF_GATEWAY &&
!ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
goto out;
- if (dst_hold_safe(&rt->dst))
- rc = rt6_remove_exception_rt(rt);
+
+ rc = rt6_remove_exception_rt(rt);
out:
return rc;
}
@@@ -3709,7 -3710,7 +3711,7 @@@ struct fib6_info *addrconf_f6i_alloc(st
if (!f6i)
return ERR_PTR(-ENOMEM);
- f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0);
+ f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0, NULL);
f6i->dst_nocount = true;
f6i->dst_host = true;
f6i->fib6_protocol = RTPROT_KERNEL;
diff --combined net/sched/sch_fq.c
index 3671eab91107,25a7cf6d380f..1da8864502d4
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@@ -94,7 -94,6 +94,7 @@@ struct fq_sched_data
u32 flow_refill_delay;
u32 flow_plimit; /* max packets per flow */
unsigned long flow_max_rate; /* optional max rate per flow */
+ u64 ce_threshold;
u32 orphan_mask; /* mask for orphaned skb */
u32 low_rate_threshold;
struct rb_root *fq_root;
@@@ -108,7 -107,6 +108,7 @@@
u64 stat_gc_flows;
u64 stat_internal_packets;
u64 stat_throttled;
+ u64 stat_ce_mark;
u64 stat_flows_plimit;
u64 stat_pkts_too_long;
u64 stat_allocation_errors;
@@@ -456,11 -454,6 +456,11 @@@ begin
fq_flow_set_throttled(q, f);
goto begin;
}
+ if (time_next_packet &&
+ (s64)(now - time_next_packet - q->ce_threshold) > 0) {
+ INET_ECN_set_ce(skb);
+ q->stat_ce_mark++;
+ }
}
skb = fq_dequeue_head(sch, f);
@@@ -476,22 -469,29 +476,29 @@@
goto begin;
}
prefetch(&skb->end);
- f->credit -= qdisc_pkt_len(skb);
+ plen = qdisc_pkt_len(skb);
+ f->credit -= plen;
- if (ktime_to_ns(skb->tstamp) || !q->rate_enable)
+ if (!q->rate_enable)
goto out;
rate = q->flow_max_rate;
- if (skb->sk)
- rate = min(skb->sk->sk_pacing_rate, rate);
-
- if (rate <= q->low_rate_threshold) {
- f->credit = 0;
- plen = qdisc_pkt_len(skb);
- } else {
- plen = max(qdisc_pkt_len(skb), q->quantum);
- if (f->credit > 0)
- goto out;
+
+ /* If EDT time was provided for this skb, we need to
+ * update f->time_next_packet only if this qdisc enforces
+ * a flow max rate.
+ */
+ if (!skb->tstamp) {
+ if (skb->sk)
+ rate = min(skb->sk->sk_pacing_rate, rate);
+
+ if (rate <= q->low_rate_threshold) {
+ f->credit = 0;
+ } else {
+ plen = max(plen, q->quantum);
+ if (f->credit > 0)
+ goto out;
+ }
}
if (rate != ~0UL) {
u64 len = (u64)plen * NSEC_PER_SEC;
@@@ -657,7 -657,6 +664,7 @@@ static const struct nla_policy fq_polic
[TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
[TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
[TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
+ [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
};
static int fq_change(struct Qdisc *sch, struct nlattr *opt,
@@@ -737,10 -736,6 +744,10 @@@
if (tb[TCA_FQ_ORPHAN_MASK])
q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
+ if (tb[TCA_FQ_CE_THRESHOLD])
+ q->ce_threshold = (u64)NSEC_PER_USEC *
+ nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
+
if (!err) {
sch_tree_unlock(sch);
err = fq_resize(sch, fq_log);
@@@ -791,10 -786,6 +798,10 @@@ static int fq_init(struct Qdisc *sch, s
q->fq_trees_log = ilog2(1024);
q->orphan_mask = 1024 - 1;
q->low_rate_threshold = 550000 / 8;
+
+ /* Default ce_threshold of 4294 seconds */
+ q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
+
qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
if (opt)
@@@ -808,7 -799,6 +815,7 @@@
static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct fq_sched_data *q = qdisc_priv(sch);
+ u64 ce_threshold = q->ce_threshold;
struct nlattr *opts;
opts = nla_nest_start(skb, TCA_OPTIONS);
@@@ -817,8 -807,6 +824,8 @@@
/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
+ do_div(ce_threshold, NSEC_PER_USEC);
+
if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
@@@ -831,7 -819,6 +838,7 @@@
nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
q->low_rate_threshold) ||
+ nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
goto nla_put_failure;
@@@ -861,7 -848,6 +868,7 @@@ static int fq_dump_stats(struct Qdisc *
st.throttled_flows = q->throttled_flows;
st.unthrottle_latency_ns = min_t(unsigned long,
q->unthrottle_latency_ns, ~0U);
+ st.ce_mark = q->stat_ce_mark;
sch_tree_unlock(sch);
return gnet_stats_copy_app(d, &st, sizeof(st));
--
LinuxNextTracking
4 years, 2 months
[linux-next] LinuxNextTracking branch, master, updated. next-20181119
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit d7d8bbb40a5b1f682ee6589e212934f4c6b8ad60
Author: Sven Eckelmann <sven(a)narfation.org>
Date: Wed Nov 7 23:09:12 2018 +0100
batman-adv: Expand merged fragment buffer for full packet
The complete size ("total_size") of the fragmented packet is stored in the
fragment header and in the size of the fragment chain. When the fragments
are ready for merge, the skbuff's tail of the first fragment is expanded to
have enough room after the data pointer for at least total_size. This means
that it gets expanded by total_size - first_skb->len.
But this is ignoring the fact that after expanding the buffer, the fragment
header is pulled by from this buffer. Assuming that the tailroom of the
buffer was already 0, the buffer after the data pointer of the skbuff is
now only total_size - len(fragment_header) large. When the merge function
is then processing the remaining fragments, the code to copy the data over
to the merged skbuff will cause an skb_over_panic when it tries to actually
put enough data to fill the total_size bytes of the packet.
The size of the skb_pull must therefore also be taken into account when the
buffer's tailroom is expanded.
Fixes: 610bfc6bc99b ("batman-adv: Receive fragmented packets and merge")
Reported-by: Martin Weinelt <martin(a)darmstadt.freifunk.net>
Co-authored-by: Linus Lüssing <linus.luessing(a)c0d3.blue>
Signed-off-by: Sven Eckelmann <sven(a)narfation.org>
Signed-off-by: Simon Wunderlich <sw(a)simonwunderlich.de>
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 0fddc17106bd..5b71a289d04f 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -275,7 +275,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
kfree(entry);
packet = (struct batadv_frag_packet *)skb_out->data;
- size = ntohs(packet->total_size);
+ size = ntohs(packet->total_size) + hdr_size;
/* Make room for the rest of the fragments. */
if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
--
LinuxNextTracking
4 years, 2 months
[linux-next] LinuxNextTracking branch, master, updated. next-20181119
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit 32848c71f0bf18a8478453e6566467843e2fd58a
Merge: 72bb42a2e1bd5aad7ea0e87ab8396d894c72857e cddaf02bcb7313a23b06e46683a1381b85840687
Author: Stephen Rothwell <sfr(a)canb.auug.org.au>
Date: Mon Nov 19 11:27:15 2018 +1100
Merge remote-tracking branch 'net-next/master'
diff --combined MAINTAINERS
index d0c567d39f3b,3bd775ba51ce..6a79dc9f3502
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -717,7 -717,7 +717,7 @@@ F: include/linux/mfd/altera-a10sr.
F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
ALTERA TRIPLE SPEED ETHERNET DRIVER
-M: Vince Bridgers <vbridger(a)opensource.altera.com>
+M: Thor Thayer <thor.thayer(a)linux.intel.com>
L: netdev(a)vger.kernel.org
L: nios2-dev(a)lists.rocketboards.org (moderated for non-subscribers)
S: Maintained
@@@ -2869,6 -2869,7 +2869,6 @@@ F: arch/mips/include/asm/mach-bcm47xx/
BROADCOM BCM5301X ARM ARCHITECTURE
M: Hauke Mehrtens <hauke(a)hauke-m.de>
M: Rafał Miłecki <zajec5(a)gmail.com>
-M: Jon Mason <jonmason(a)broadcom.com>
M: bcm-kernel-feedback-list(a)broadcom.com
L: linux-arm-kernel(a)lists.infradead.org
S: Maintained
@@@ -3014,6 -3015,7 +3014,6 @@@ F: drivers/net/ethernet/broadcom/genet
BROADCOM IPROC ARM ARCHITECTURE
M: Ray Jui <rjui(a)broadcom.com>
M: Scott Branden <sbranden(a)broadcom.com>
-M: Jon Mason <jonmason(a)broadcom.com>
M: bcm-kernel-feedback-list(a)broadcom.com
L: linux-arm-kernel(a)lists.infradead.org (moderated for non-subscribers)
T: git git://github.com/broadcom/cygnus-linux.git
@@@ -3274,12 -3276,6 +3274,12 @@@ F: include/uapi/linux/caif
F: include/net/caif/
F: net/caif/
+CAKE QDISC
+M: Toke Høiland-Jørgensen <toke(a)toke.dk>
+L: cake(a)lists.bufferbloat.net (moderated for non-subscribers)
+S: Maintained
+F: net/sched/sch_cake.c
+
CALGARY x86-64 IOMMU
M: Muli Ben-Yehuda <mulix(a)mulix.org>
M: Jon Mason <jdmason(a)kudzu.us>
@@@ -6193,7 -6189,6 +6193,7 @@@ T: git git://git.kernel.org/pub/scm/lin
S: Supported
F: drivers/phy/
F: include/linux/phy/
+F: Documentation/devicetree/bindings/phy/
GENERIC PINCTRL I2C DEMULTIPLEXER DRIVER
M: Wolfram Sang <wsa+renesas(a)sang-engineering.com>
@@@ -6994,23 -6989,6 +6994,23 @@@ L: linux-i2c(a)vger.kernel.or
S: Maintained
F: drivers/i2c/i2c-stub.c
+I3C SUBSYSTEM
+M: Boris Brezillon <boris.brezillon(a)bootlin.com>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux.git
+S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-i3c
+F: Documentation/devicetree/bindings/i3c/
+F: Documentation/driver-api/i3c
+F: drivers/i3c/
+F: include/linux/i3c/
+F: include/dt-bindings/i3c/
+
+I3C DRIVER FOR SYNOPSYS DESIGNWARE
+M: Vitor Soares <vitor.soares(a)synopsys.com>
+S: Maintained
+F: Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
+F: drivers/i3c/master/dw*
+
IA64 (Itanium) PLATFORM
M: Tony Luck <tony.luck(a)intel.com>
M: Fenghua Yu <fenghua.yu(a)intel.com>
@@@ -7872,13 -7850,6 +7872,6 @@@ F: include/linux/isdn
F: include/uapi/linux/isdn.h
F: include/uapi/linux/isdn/
- ISDN SUBSYSTEM (Eicon active card driver)
- M: Armin Schindler <mac(a)melware.de>
- L: isdn4linux(a)listserv.isdn4linux.de (subscribers-only)
- W: http://www.melware.de
- S: Maintained
- F: drivers/isdn/hardware/eicon/
-
IT87 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare(a)suse.com>
L: linux-hwmon(a)vger.kernel.org
@@@ -10708,6 -10679,14 +10701,14 @@@ L: linux-nfc(a)lists.01.org (moderated fo
S: Supported
F: drivers/nfc/nxp-nci
+ OBJAGG
+ M: Jiri Pirko <jiri(a)mellanox.com>
+ L: netdev(a)vger.kernel.org
+ S: Supported
+ F: lib/objagg.c
+ F: lib/test_objagg.c
+ F: include/linux/objagg.h
+
OBJTOOL
M: Josh Poimboeuf <jpoimboe(a)redhat.com>
M: Peter Zijlstra <peterz(a)infradead.org>
@@@ -10830,9 -10809,9 +10831,9 @@@ F: drivers/media/platform/omap3isp
F: drivers/staging/media/omap4iss/
OMAP MMC SUPPORT
-M: Jarkko Lavinen <jarkko.lavinen(a)nokia.com>
+M: Aaro Koskinen <aaro.koskinen(a)iki.fi>
L: linux-omap(a)vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: drivers/mmc/host/omap.c
OMAP POWER MANAGEMENT SUPPORT
@@@ -11767,7 -11746,6 +11768,7 @@@ F: Documentation/devicetree/bindings/pi
PIN CONTROLLER - INTEL
M: Mika Westerberg <mika.westerberg(a)linux.intel.com>
M: Andy Shevchenko <andriy.shevchenko(a)linux.intel.com>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
S: Maintained
F: drivers/pinctrl/intel/
@@@ -12397,13 -12375,6 +12398,13 @@@ L: linux-arm-msm(a)vger.kernel.or
S: Maintained
F: drivers/iommu/qcom_iommu.c
+QUALCOMM TSENS THERMAL DRIVER
+M: Amit Kucheria <amit.kucheria(a)linaro.org>
+L: linux-pm(a)vger.kernel.org
+L: linux-arm-msm(a)vger.kernel.org
+S: Maintained
+F: drivers/thermal/qcom/
+
QUALCOMM VENUS VIDEO ACCELERATOR DRIVER
M: Stanimir Varbanov <stanimir.varbanov(a)linaro.org>
L: linux-media(a)vger.kernel.org
@@@ -13868,14 -13839,6 +13869,14 @@@ S: Maintaine
F: drivers/ssb/
F: include/linux/ssb/
+SONY IMX214 SENSOR DRIVER
+M: Ricardo Ribalda <ricardo.ribalda(a)gmail.com>
+L: linux-media(a)vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/imx214.c
+F: Documentation/devicetree/bindings/media/i2c/sony,imx214.txt
+
SONY IMX258 SENSOR DRIVER
M: Sakari Ailus <sakari.ailus(a)linux.intel.com>
L: linux-media(a)vger.kernel.org
diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d49db46254cd,956e708c777d..649bf7c586c1
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@@ -2295,6 -2295,8 +2295,8 @@@ static int cxgb_up(struct adapter *adap
static void cxgb_down(struct adapter *adapter)
{
+ struct hash_mac_addr *entry, *tmp;
+
cancel_work_sync(&adapter->tid_release_task);
cancel_work_sync(&adapter->db_full_task);
cancel_work_sync(&adapter->db_drop_task);
@@@ -2303,6 -2305,12 +2305,12 @@@
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
+
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
adapter->flags &= ~FULL_INIT_DONE;
}
@@@ -5863,7 -5871,7 +5871,7 @@@ fw_attach_fail
if (!is_t4(adapter->params.chip))
cxgb4_ptp_init(adapter);
- if (IS_ENABLED(CONFIG_THERMAL) &&
+ if (IS_REACHABLE(CONFIG_THERMAL) &&
!is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
cxgb4_thermal_init(adapter);
@@@ -5932,7 -5940,7 +5940,7 @@@ static void remove_one(struct pci_dev *
if (!is_t4(adapter->params.chip))
cxgb4_ptp_stop(adapter);
- if (IS_ENABLED(CONFIG_THERMAL))
+ if (IS_REACHABLE(CONFIG_THERMAL))
cxgb4_thermal_remove(adapter);
/* If we allocated filters, free up state associated with any
diff --combined lib/Kconfig.debug
index 60750a72cbe2,b3c91b9e32f8..f27f20e45547
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@@ -211,23 -211,6 +211,23 @@@ config GDB_SCRIPT
instance. See Documentation/dev-tools/gdb-kernel-debugging.rst
for further details.
+config NO_AUTO_INLINE
+ bool "Disable compiler auto-inline optimizations"
+ help
+ This will prevent the compiler from optimizing the kernel by
+ auto-inlining functions not marked with the inline keyword.
+ With this option, only functions explicitly marked with
+ "inline" will be inlined. This will allow the function tracer
+ to trace more functions because it only traces functions that
+ the compiler has not inlined.
+
+ Enabling this function can help debugging a kernel if using
+ the function tracer. But it can also change how the kernel
+ works, because inlining functions may change the timing,
+ which could make it difficult while debugging race conditions.
+
+ If unsure, select N.
+
config ENABLE_MUST_CHECK
bool "Enable __must_check logic"
default y
@@@ -1993,6 -1976,16 +1993,16 @@@ config TEST_MEMCAT_
If unsure, say N.
+ config TEST_OBJAGG
+ tristate "Perform selftest on object aggreration manager"
+ default n
+ depends on OBJAGG
+ help
+ Enable this option to test object aggregation manager on boot
+ (or module load).
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config MEMTEST
diff --combined net/bridge/br_private.h
index 04c19a37e500,67105c66584a..bc2653738fc3
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@@ -102,18 -102,12 +102,18 @@@ struct br_tunnel_info
struct metadata_dst *tunnel_dst;
};
+/* private vlan flags */
+enum {
+ BR_VLFLAG_PER_PORT_STATS = BIT(0),
+};
+
/**
* struct net_bridge_vlan - per-vlan entry
*
* @vnode: rhashtable member
* @vid: VLAN id
* @flags: bridge vlan flags
+ * @priv_flags: private (in-kernel) bridge vlan flags
* @stats: per-cpu VLAN statistics
* @br: if MASTER flag set, this points to a bridge struct
* @port: if MASTER flag unset, this points to a port struct
@@@ -133,7 -127,6 +133,7 @@@ struct net_bridge_vlan
struct rhash_head tnode;
u16 vid;
u16 flags;
+ u16 priv_flags;
struct br_vlan_stats __percpu *stats;
union {
struct net_bridge *br;
@@@ -912,7 -905,7 +912,7 @@@ static inline int br_vlan_get_tag(cons
int err = 0;
if (skb_vlan_tag_present(skb)) {
- *vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
+ *vid = skb_vlan_tag_get_id(skb);
} else {
*vid = 0;
err = -EINVAL;
diff --combined net/bridge/br_vlan.c
index e84be08b8285,a7e869da21bf..b21838b51220
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@@ -197,7 -197,7 +197,7 @@@ static void nbp_vlan_rcu_free(struct rc
v = container_of(rcu, struct net_bridge_vlan, rcu);
WARN_ON(br_vlan_is_master(v));
/* if we had per-port stats configured then free them here */
- if (v->brvlan->stats != v->stats)
+ if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
free_percpu(v->stats);
v->stats = NULL;
kfree(v);
@@@ -264,7 -264,6 +264,7 @@@ static int __vlan_add(struct net_bridge
err = -ENOMEM;
goto out_filt;
}
+ v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
} else {
v->stats = masterv->stats;
}
@@@ -421,7 -420,7 +421,7 @@@ struct sk_buff *br_handle_vlan(struct n
}
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
if (p && (p->flags & BR_VLAN_TUNNEL) &&
br_handle_egress_vlan_tunnel(skb, v)) {
@@@ -494,8 -493,8 +494,8 @@@ static bool __allowed_ingress(const str
__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
else
/* Priority-tagged Frame.
- * At this point, We know that skb->vlan_tci had
- * VLAN_TAG_PRESENT bit and its VID field was 0x000.
+ * At this point, we know that skb->vlan_tci VID
+ * field was 0.
* We update only VID field and preserve PCP field.
*/
skb->vlan_tci |= pvid;
diff --combined net/core/dev.c
index 066aa902d85c,5927f6a7c301..f2bfd2eda7b2
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -3091,10 -3091,17 +3091,17 @@@ EXPORT_SYMBOL(__skb_gso_segment)
/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
- void netdev_rx_csum_fault(struct net_device *dev)
+ void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{
if (net_ratelimit()) {
pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
+ if (dev)
+ pr_err("dev features: %pNF\n", &dev->features);
+ pr_err("skb len=%u data_len=%u pkt_type=%u gso_size=%u gso_type=%u nr_frags=%u ip_summed=%u csum=%x csum_complete_sw=%d csum_valid=%d csum_level=%u\n",
+ skb->len, skb->data_len, skb->pkt_type,
+ skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_type,
+ skb_shinfo(skb)->nr_frags, skb->ip_summed, skb->csum,
+ skb->csum_complete_sw, skb->csum_valid, skb->csum_level);
dump_stack();
}
}
@@@ -4889,7 -4896,7 +4896,7 @@@ skip_classify
* and set skb->priority like in vlan_do_receive()
* For the time being, just ignore Priority Code Point
*/
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
}
type = skb->protocol;
@@@ -5386,7 -5393,9 +5393,9 @@@ static struct list_head *gro_list_prepa
}
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
- diffs |= p->vlan_tci ^ skb->vlan_tci;
+ diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
+ if (skb_vlan_tag_present(p))
+ diffs |= p->vlan_tci ^ skb->vlan_tci;
diffs |= skb_metadata_dst_cmp(p, skb);
diffs |= skb_metadata_differs(p, skb);
if (maclen == ETH_HLEN)
@@@ -5652,13 -5661,9 +5661,13 @@@ static void napi_reuse_skb(struct napi_
__skb_pull(skb, skb_headlen(skb));
/* restore the reserve we had after netdev_alloc_skb_ip_align() */
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
skb->dev = napi->dev;
skb->skb_iif = 0;
+
+ /* eth_type_trans() assumes pkt_type is PACKET_HOST */
+ skb->pkt_type = PACKET_HOST;
+
skb->encapsulation = 0;
skb_shinfo(skb)->gso_type = 0;
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
@@@ -5783,7 -5788,7 +5792,7 @@@ __sum16 __skb_gro_checksum_complete(str
if (likely(!sum)) {
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
!skb->csum_complete_sw)
- netdev_rx_csum_fault(skb->dev);
+ netdev_rx_csum_fault(skb->dev, skb);
}
NAPI_GRO_CB(skb)->csum = wsum;
diff --combined net/core/filter.c
index 9a1327eb25fa,10acbc00ff6c..e462da7235e4
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@@ -296,22 -296,18 +296,18 @@@ static u32 convert_skb_access(int skb_f
break;
case SKF_AD_VLAN_TAG:
- case SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
/* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
offsetof(struct sk_buff, vlan_tci));
- if (skb_field == SKF_AD_VLAN_TAG) {
- *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
- ~VLAN_TAG_PRESENT);
- } else {
- /* dst_reg >>= 12 */
- *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
- /* dst_reg &= 1 */
+ break;
+ case SKF_AD_VLAN_TAG_PRESENT:
+ *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
+ if (PKT_VLAN_PRESENT_BIT)
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
+ if (PKT_VLAN_PRESENT_BIT < 7)
*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
- }
break;
}
@@@ -4852,17 -4848,18 +4848,17 @@@ static struct sock *sk_lookup(struct ne
} else {
struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
- u16 hnum = ntohs(tuple->ipv6.dport);
int sdif = inet6_sdif(skb);
if (proto == IPPROTO_TCP)
sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0,
src6, tuple->ipv6.sport,
- dst6, hnum,
+ dst6, ntohs(tuple->ipv6.dport),
dif, sdif, &refcounted);
else if (likely(ipv6_bpf_stub))
sk = ipv6_bpf_stub->udp6_lib_lookup(net,
src6, tuple->ipv6.sport,
- dst6, hnum,
+ dst6, tuple->ipv6.dport,
dif, sdif,
&udp_table, skb);
#endif
@@@ -6139,19 -6136,19 +6135,19 @@@ static u32 bpf_convert_ctx_access(enum
break;
case offsetof(struct __sk_buff, vlan_present):
- case offsetof(struct __sk_buff, vlan_tci):
- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+ *target_size = 1;
+ *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
+ PKT_VLAN_PRESENT_OFFSET());
+ if (PKT_VLAN_PRESENT_BIT)
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
+ if (PKT_VLAN_PRESENT_BIT < 7)
+ *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
+ break;
+ case offsetof(struct __sk_buff, vlan_tci):
*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
bpf_target_off(struct sk_buff, vlan_tci, 2,
target_size));
- if (si->off == offsetof(struct __sk_buff, vlan_tci)) {
- *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg,
- ~VLAN_TAG_PRESENT);
- } else {
- *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 12);
- *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
- }
break;
case offsetof(struct __sk_buff, cb[0]) ...
diff --combined net/ipv4/ip_tunnel_core.c
index c248e0dccbe1,f45b96d715f0..c857ec6b9784
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@@ -80,7 -80,7 +80,7 @@@ void iptunnel_xmit(struct sock *sk, str
iph->version = 4;
iph->ihl = sizeof(struct iphdr) >> 2;
- iph->frag_off = df;
+ iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
iph->protocol = proto;
iph->tos = tos;
iph->daddr = dst;
@@@ -120,7 -120,7 +120,7 @@@ int __iptunnel_pull_header(struct sk_bu
}
skb_clear_hash_if_not_l4(skb);
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
skb_set_queue_mapping(skb, 0);
skb_scrub_packet(skb, xnet);
diff --combined net/ipv6/route.c
index 14b422f35504,b2447b7c7303..30706fc8d42a
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@@ -2232,7 -2232,8 +2232,7 @@@ static void ip6_link_failure(struct sk_
if (rt) {
rcu_read_lock();
if (rt->rt6i_flags & RTF_CACHE) {
- if (dst_hold_safe(&rt->dst))
- rt6_remove_exception_rt(rt);
+ rt6_remove_exception_rt(rt);
} else {
struct fib6_info *from;
struct fib6_node *fn;
@@@ -2974,7 -2975,8 +2974,8 @@@ static struct fib6_info *ip6_route_info
if (!rt)
goto out;
- rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len);
+ rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
+ extack);
if (IS_ERR(rt->fib6_metrics)) {
err = PTR_ERR(rt->fib6_metrics);
/* Do not leave garbage there. */
@@@ -3213,8 -3215,8 +3214,8 @@@ static int ip6_del_cached_rt(struct rt6
if (cfg->fc_flags & RTF_GATEWAY &&
!ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
goto out;
- if (dst_hold_safe(&rt->dst))
- rc = rt6_remove_exception_rt(rt);
+
+ rc = rt6_remove_exception_rt(rt);
out:
return rc;
}
@@@ -3707,7 -3709,7 +3708,7 @@@ struct fib6_info *addrconf_f6i_alloc(st
if (!f6i)
return ERR_PTR(-ENOMEM);
- f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0);
+ f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0, NULL);
f6i->dst_nocount = true;
f6i->dst_host = true;
f6i->fib6_protocol = RTPROT_KERNEL;
diff --combined net/sched/sch_fq.c
index 25a7cf6d380f,3671eab91107..1da8864502d4
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@@ -94,6 -94,7 +94,7 @@@ struct fq_sched_data
u32 flow_refill_delay;
u32 flow_plimit; /* max packets per flow */
unsigned long flow_max_rate; /* optional max rate per flow */
+ u64 ce_threshold;
u32 orphan_mask; /* mask for orphaned skb */
u32 low_rate_threshold;
struct rb_root *fq_root;
@@@ -107,6 -108,7 +108,7 @@@
u64 stat_gc_flows;
u64 stat_internal_packets;
u64 stat_throttled;
+ u64 stat_ce_mark;
u64 stat_flows_plimit;
u64 stat_pkts_too_long;
u64 stat_allocation_errors;
@@@ -454,6 -456,11 +456,11 @@@ begin
fq_flow_set_throttled(q, f);
goto begin;
}
+ if (time_next_packet &&
+ (s64)(now - time_next_packet - q->ce_threshold) > 0) {
+ INET_ECN_set_ce(skb);
+ q->stat_ce_mark++;
+ }
}
skb = fq_dequeue_head(sch, f);
@@@ -469,29 -476,22 +476,29 @@@
goto begin;
}
prefetch(&skb->end);
- f->credit -= qdisc_pkt_len(skb);
+ plen = qdisc_pkt_len(skb);
+ f->credit -= plen;
- if (ktime_to_ns(skb->tstamp) || !q->rate_enable)
+ if (!q->rate_enable)
goto out;
rate = q->flow_max_rate;
- if (skb->sk)
- rate = min(skb->sk->sk_pacing_rate, rate);
-
- if (rate <= q->low_rate_threshold) {
- f->credit = 0;
- plen = qdisc_pkt_len(skb);
- } else {
- plen = max(qdisc_pkt_len(skb), q->quantum);
- if (f->credit > 0)
- goto out;
+
+ /* If EDT time was provided for this skb, we need to
+ * update f->time_next_packet only if this qdisc enforces
+ * a flow max rate.
+ */
+ if (!skb->tstamp) {
+ if (skb->sk)
+ rate = min(skb->sk->sk_pacing_rate, rate);
+
+ if (rate <= q->low_rate_threshold) {
+ f->credit = 0;
+ } else {
+ plen = max(plen, q->quantum);
+ if (f->credit > 0)
+ goto out;
+ }
}
if (rate != ~0UL) {
u64 len = (u64)plen * NSEC_PER_SEC;
@@@ -657,6 -657,7 +664,7 @@@ static const struct nla_policy fq_polic
[TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
[TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
[TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
+ [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
};
static int fq_change(struct Qdisc *sch, struct nlattr *opt,
@@@ -736,6 -737,10 +744,10 @@@
if (tb[TCA_FQ_ORPHAN_MASK])
q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
+ if (tb[TCA_FQ_CE_THRESHOLD])
+ q->ce_threshold = (u64)NSEC_PER_USEC *
+ nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
+
if (!err) {
sch_tree_unlock(sch);
err = fq_resize(sch, fq_log);
@@@ -786,6 -791,10 +798,10 @@@ static int fq_init(struct Qdisc *sch, s
q->fq_trees_log = ilog2(1024);
q->orphan_mask = 1024 - 1;
q->low_rate_threshold = 550000 / 8;
+
+ /* Default ce_threshold of 4294 seconds */
+ q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
+
qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
if (opt)
@@@ -799,6 -808,7 +815,7 @@@
static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct fq_sched_data *q = qdisc_priv(sch);
+ u64 ce_threshold = q->ce_threshold;
struct nlattr *opts;
opts = nla_nest_start(skb, TCA_OPTIONS);
@@@ -807,6 -817,8 +824,8 @@@
/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
+ do_div(ce_threshold, NSEC_PER_USEC);
+
if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
@@@ -819,6 -831,7 +838,7 @@@
nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
q->low_rate_threshold) ||
+ nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
goto nla_put_failure;
@@@ -848,6 -861,7 +868,7 @@@ static int fq_dump_stats(struct Qdisc *
st.throttled_flows = q->throttled_flows;
st.unthrottle_latency_ns = min_t(unsigned long,
q->unthrottle_latency_ns, ~0U);
+ st.ce_mark = q->stat_ce_mark;
sch_tree_unlock(sch);
return gnet_stats_copy_app(d, &st, sizeof(st));
--
LinuxNextTracking
4 years, 2 months
[linux-next] LinuxNextTracking branch, master, updated. next-20181119
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit f4156f9656feac21f4de712fac94fae964c5d402
Author: Sven Eckelmann <sven(a)narfation.org>
Date: Tue Oct 30 12:17:10 2018 +0100
batman-adv: Use explicit tvlv padding for ELP packets
The announcement messages of batman-adv COMPAT_VERSION 15 have the
possibility to announce additional information via a dynamic TVLV part.
This part is optional for the ELP packets and currently not parsed by the
Linux implementation. Still out-of-tree versions are using it to transport
things like neighbor hashes to optimize the rebroadcast behavior.
Since the ELP broadcast packets are smaller than the minimal ethernet
packet, it often has to be padded. This is often done (as specified in
RFC894) with octets of zero and thus work perfectly fine with the TVLV
part (making it a zero length and thus empty). But not all ethernet
compatible hardware seems to follow this advice. To avoid ambiguous
situations when parsing the TVLV header, just force the 4 bytes (TVLV
length + padding) after the required ELP header to zero.
Fixes: d6f94d91f766 ("batman-adv: ELP - adding basic infrastructure")
Reported-by: Linus Lüssing <linus.luessing(a)c0d3.blue>
Signed-off-by: Sven Eckelmann <sven(a)narfation.org>
Signed-off-by: Simon Wunderlich <sw(a)simonwunderlich.de>
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 9f481cfdf77d..e8090f099eb8 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -352,19 +352,21 @@ out:
*/
int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
{
+ static const size_t tvlv_padding = sizeof(__be32);
struct batadv_elp_packet *elp_packet;
unsigned char *elp_buff;
u32 random_seqno;
size_t size;
int res = -ENOMEM;
- size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN;
+ size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
if (!hard_iface->bat_v.elp_skb)
goto out;
skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
- elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
+ elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb,
+ BATADV_ELP_HLEN + tvlv_padding);
elp_packet = (struct batadv_elp_packet *)elp_buff;
elp_packet->packet_type = BATADV_ELP;
--
LinuxNextTracking
4 years, 2 months
Build check errors found: 2018-11-19
by postmaster@open-mesh.org
Name of failed tests
====================
master
------
* difference between net-next and batadv master
Output of different failed tests
================================
master: difference between net-next and batadv master
-----------------------------------------------------
netnext/net/batman-adv/bat_v_elp.c | 6 ++----
netnext/net/batman-adv/fragmentation.c | 2 +-
2 files changed, 3 insertions(+), 5 deletions(-)
Statistics
==========
master
------
Failed tests: 1
Started build tests: 53
Tested Linux versions: 23
Tested configs: 47
maint
-----
Failed tests: 0
Started build tests: 54
Tested Linux versions: 26
Tested configs: 50
4 years, 2 months
Build check errors found: 2018-11-18
by postmaster@open-mesh.org
Name of failed tests
====================
master
------
* difference between net-next and batadv master
Output of different failed tests
================================
master: difference between net-next and batadv master
-----------------------------------------------------
netnext/net/batman-adv/bat_v_elp.c | 6 ++----
netnext/net/batman-adv/fragmentation.c | 2 +-
2 files changed, 3 insertions(+), 5 deletions(-)
Statistics
==========
master
------
Failed tests: 1
Started build tests: 53
Tested Linux versions: 20
Tested configs: 50
maint
-----
Failed tests: 0
Started build tests: 54
Tested Linux versions: 22
Tested configs: 45
4 years, 2 months
Build check errors found: 2018-11-17
by postmaster@open-mesh.org
Name of failed tests
====================
maint
-----
* difference between net and batadv maint
master
------
* difference between net-next and batadv master
Output of different failed tests
================================
maint: difference between net and batadv maint
----------------------------------------------
net/net/batman-adv/bat_v_elp.c | 6 ++----
net/net/batman-adv/fragmentation.c | 2 +-
2 files changed, 3 insertions(+), 5 deletions(-)
master: difference between net-next and batadv master
-----------------------------------------------------
netnext/net/batman-adv/bat_v_elp.c | 6 ++----
netnext/net/batman-adv/fragmentation.c | 2 +-
2 files changed, 3 insertions(+), 5 deletions(-)
Statistics
==========
maint
-----
Failed tests: 1
Started build tests: 52
Tested Linux versions: 23
Tested configs: 48
master
------
Failed tests: 1
Started build tests: 51
Tested Linux versions: 24
Tested configs: 48
4 years, 2 months
[linux-next] LinuxNextTracking branch, master, updated. next-20181116
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit 016fd285682952b943641c074d1cc0d02b3e6889
Author: Linus Lüssing <linus.luessing(a)c0d3.blue>
Date: Tue Nov 6 10:01:50 2018 +0100
batman-adv: enable MCAST by default at compile time
Thanks to rigorous testing in wireless community mesh networks several
issues with multicast entries in the translation table were found and
fixed in the last 1.5 years. Now we see the first larger networks
(a few hundred nodes) with a batman-adv version with multicast
optimizations enabled arising, with no TT / multicast optimization
related issues so far.
Therefore it seems safe to enable multicast optimizations by default.
Signed-off-by: Linus Lüssing <linus.luessing(a)c0d3.blue>
Signed-off-by: Sven Eckelmann <sven(a)narfation.org>
Signed-off-by: Simon Wunderlich <sw(a)simonwunderlich.de>
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index d6b94559f888..c386e6981416 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -82,6 +82,7 @@ config BATMAN_ADV_NC
config BATMAN_ADV_MCAST
bool "Multicast optimisation"
depends on BATMAN_ADV && INET && !(BRIDGE=m && BATMAN_ADV=y)
+ default y
help
This option enables the multicast optimisation which aims to
reduce the air overhead while improving the reliability of
--
LinuxNextTracking
4 years, 2 months
[linux-next] LinuxNextTracking branch, master, updated. next-20181116
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit fb939135a6cf77a26831d23e6d22e4b9602cfce7
Author: Sven Eckelmann <sven(a)narfation.org>
Date: Sun Oct 14 17:16:14 2018 +0200
batman-adv: Move CRC16 dependency to BATMAN_ADV_BLA
The commit ced72933a5e8 ("batman-adv: use CRC32C instead of CRC16 in TT
code") switched the translation table code from crc16 to crc32c. The
(optional) bridge loop avoidance code is the only user of this function.
batman-adv should only select CRC16 when it is actually using it.
Signed-off-by: Sven Eckelmann <sven(a)narfation.org>
Signed-off-by: Simon Wunderlich <sw(a)simonwunderlich.de>
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 082e96060bc2..d6b94559f888 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -22,7 +22,6 @@
config BATMAN_ADV
tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
depends on NET
- select CRC16
select LIBCRC32C
help
B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
@@ -48,6 +47,7 @@ config BATMAN_ADV_BATMAN_V
config BATMAN_ADV_BLA
bool "Bridge Loop Avoidance"
depends on BATMAN_ADV && INET
+ select CRC16
default y
help
This option enables BLA (Bridge Loop Avoidance), a mechanism
--
LinuxNextTracking
4 years, 2 months
[linux-next] LinuxNextTracking branch, master, updated. next-20181116
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit 6b7b40aad5cd2d7b59fbbd60537ce2eaea2f980d
Author: Sven Eckelmann <sven(a)narfation.org>
Date: Tue Oct 30 22:01:29 2018 +0100
batman-adv: Add inconsistent local TT netlink dump detection
The netlink dump functionality transfers a large number of entries from the
kernel to userspace. It is rather likely that the transfer has to
interrupted and later continued. During that time, it can happen that
either new entries are added or removed. The userspace could than either
receive some entries multiple times or miss entries.
Commit 670dc2833d14 ("netlink: advertise incomplete dumps") introduced a
mechanism to inform userspace about this problem. Userspace can then decide
whether it is necessary or not to retry dumping the information again.
The netlink dump functions have to be switched to exclusive locks to avoid
changes while the current message is prepared. The already existing
generation sequence counter from the hash helper can be used for this
simple hash.
Reported-by: Matthias Schiffer <mschiffer(a)universe-factory.net>
Signed-off-by: Sven Eckelmann <sven(a)narfation.org>
Signed-off-by: Simon Wunderlich <sw(a)simonwunderlich.de>
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d21624c44665..8dcd4968cde7 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1145,14 +1145,15 @@ out:
* batadv_tt_local_dump_entry() - Dump one TT local entry into a message
* @msg :Netlink message to dump into
* @portid: Port making netlink request
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
* @common: tt local & tt global common data
*
* Return: Error code, or 0 on success
*/
static int
-batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_priv *bat_priv,
struct batadv_tt_common_entry *common)
{
@@ -1173,12 +1174,14 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
batadv_softif_vlan_put(vlan);
- hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
- NLM_F_MULTI,
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
BATADV_CMD_GET_TRANSTABLE_LOCAL);
if (!hdr)
return -ENOBUFS;
+ genl_dump_check_consistent(cb, hdr);
+
if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) ||
nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
@@ -1201,34 +1204,39 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
* batadv_tt_local_dump_bucket() - Dump one TT local bucket into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
- * @head: Pointer to the list containing the local tt entries
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
* @idx_s: Number of entries to skip
*
* Return: Error code, or 0 on success
*/
static int
-batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_priv *bat_priv,
- struct hlist_head *head, int *idx_s)
+ struct batadv_hashtable *hash, unsigned int bucket,
+ int *idx_s)
{
struct batadv_tt_common_entry *common;
int idx = 0;
- rcu_read_lock();
- hlist_for_each_entry_rcu(common, head, hash_entry) {
+ spin_lock_bh(&hash->list_locks[bucket]);
+ cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+ hlist_for_each_entry(common, &hash->table[bucket], hash_entry) {
if (idx++ < *idx_s)
continue;
- if (batadv_tt_local_dump_entry(msg, portid, seq, bat_priv,
+ if (batadv_tt_local_dump_entry(msg, portid, cb, bat_priv,
common)) {
- rcu_read_unlock();
+ spin_unlock_bh(&hash->list_locks[bucket]);
*idx_s = idx - 1;
return -EMSGSIZE;
}
}
- rcu_read_unlock();
+ spin_unlock_bh(&hash->list_locks[bucket]);
*idx_s = 0;
return 0;
@@ -1248,7 +1256,6 @@ int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb)
struct batadv_priv *bat_priv;
struct batadv_hard_iface *primary_if = NULL;
struct batadv_hashtable *hash;
- struct hlist_head *head;
int ret;
int ifindex;
int bucket = cb->args[0];
@@ -1276,10 +1283,8 @@ int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb)
hash = bat_priv->tt.local_hash;
while (bucket < hash->size) {
- head = &hash->table[bucket];
-
- if (batadv_tt_local_dump_bucket(msg, portid, cb->nlh->nlmsg_seq,
- bat_priv, head, &idx))
+ if (batadv_tt_local_dump_bucket(msg, portid, cb, bat_priv,
+ hash, bucket, &idx))
break;
bucket++;
--
LinuxNextTracking
4 years, 2 months