The following commit has been merged in the master branch: commit f2be6d710d25be7d8d13f49f713d69dea9c71d57 Merge: bae4e109837b419b93fbddcb414c86673b1c90a5 f2ce1065e767fc7da106a5f5381d1e8f842dc6f4 Author: David S. Miller davem@davemloft.net Date: Mon Nov 19 10:55:00 2018 -0800
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
diff --combined MAINTAINERS index 3bd775ba51ce,77b11742785d..68528f176875 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -717,7 -717,7 +717,7 @@@ F: include/linux/mfd/altera-a10sr. F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
ALTERA TRIPLE SPEED ETHERNET DRIVER - M: Vince Bridgers vbridger@opensource.altera.com + M: Thor Thayer thor.thayer@linux.intel.com L: netdev@vger.kernel.org L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) S: Maintained @@@ -3276,6 -3276,12 +3276,12 @@@ F: include/uapi/linux/caif F: include/net/caif/ F: net/caif/
+ CAKE QDISC + M: Toke Høiland-Jørgensen toke@toke.dk + L: cake@lists.bufferbloat.net (moderated for non-subscribers) + S: Maintained + F: net/sched/sch_cake.c + CALGARY x86-64 IOMMU M: Muli Ben-Yehuda mulix@mulix.org M: Jon Mason jdmason@kudzu.us @@@ -7850,6 -7856,13 +7856,6 @@@ F: include/linux/isdn F: include/uapi/linux/isdn.h F: include/uapi/linux/isdn/
-ISDN SUBSYSTEM (Eicon active card driver) -M: Armin Schindler mac@melware.de -L: isdn4linux@listserv.isdn4linux.de (subscribers-only) -W: http://www.melware.de -S: Maintained -F: drivers/isdn/hardware/eicon/ - IT87 HARDWARE MONITORING DRIVER M: Jean Delvare jdelvare@suse.com L: linux-hwmon@vger.kernel.org @@@ -10679,14 -10692,6 +10685,14 @@@ L: linux-nfc@lists.01.org (moderated fo S: Supported F: drivers/nfc/nxp-nci
+OBJAGG +M: Jiri Pirko jiri@mellanox.com +L: netdev@vger.kernel.org +S: Supported +F: lib/objagg.c +F: lib/test_objagg.c +F: include/linux/objagg.h + OBJTOOL M: Josh Poimboeuf jpoimboe@redhat.com M: Peter Zijlstra peterz@infradead.org @@@ -10809,9 -10814,9 +10815,9 @@@ F: drivers/media/platform/omap3isp F: drivers/staging/media/omap4iss/
OMAP MMC SUPPORT - M: Jarkko Lavinen jarkko.lavinen@nokia.com + M: Aaro Koskinen aaro.koskinen@iki.fi L: linux-omap@vger.kernel.org - S: Maintained + S: Odd Fixes F: drivers/mmc/host/omap.c
OMAP POWER MANAGEMENT SUPPORT @@@ -11746,6 -11751,7 +11752,7 @@@ F: Documentation/devicetree/bindings/pi PIN CONTROLLER - INTEL M: Mika Westerberg mika.westerberg@linux.intel.com M: Andy Shevchenko andriy.shevchenko@linux.intel.com + T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git S: Maintained F: drivers/pinctrl/intel/
diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 956e708c777d,d49db46254cd..649bf7c586c1 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@@ -2295,8 -2295,6 +2295,8 @@@ static int cxgb_up(struct adapter *adap
static void cxgb_down(struct adapter *adapter) { + struct hash_mac_addr *entry, *tmp; + cancel_work_sync(&adapter->tid_release_task); cancel_work_sync(&adapter->db_full_task); cancel_work_sync(&adapter->db_drop_task); @@@ -2305,12 -2303,6 +2305,12 @@@
t4_sge_stop(adapter); t4_free_sge_resources(adapter); + + list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) { + list_del(&entry->list); + kfree(entry); + } + adapter->flags &= ~FULL_INIT_DONE; }
@@@ -5871,7 -5863,7 +5871,7 @@@ fw_attach_fail if (!is_t4(adapter->params.chip)) cxgb4_ptp_init(adapter);
- if (IS_ENABLED(CONFIG_THERMAL) && + if (IS_REACHABLE(CONFIG_THERMAL) && !is_t4(adapter->params.chip) && (adapter->flags & FW_OK)) cxgb4_thermal_init(adapter);
@@@ -5940,7 -5932,7 +5940,7 @@@ static void remove_one(struct pci_dev *
if (!is_t4(adapter->params.chip)) cxgb4_ptp_stop(adapter); - if (IS_ENABLED(CONFIG_THERMAL)) + if (IS_REACHABLE(CONFIG_THERMAL)) cxgb4_thermal_remove(adapter);
/* If we allocated filters, free up state associated with any diff --combined drivers/net/tun.c index 1e9da697081d,e244f5d7512a..56575f88d1fd --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@@ -188,11 -188,6 +188,11 @@@ struct tun_file struct xdp_rxq_info xdp_rxq; };
+struct tun_page { + struct page *page; + int count; +}; + struct tun_flow_entry { struct hlist_node hash_link; struct rcu_head rcu; @@@ -1478,22 -1473,23 +1478,22 @@@ static struct sk_buff *tun_napi_alloc_f skb->truesize += skb->data_len;
for (i = 1; i < it->nr_segs; i++) { - struct page_frag *pfrag = ¤t->task_frag; size_t fragsz = it->iov[i].iov_len; + struct page *page; + void *frag;
if (fragsz == 0 || fragsz > PAGE_SIZE) { err = -EINVAL; goto free; } - - if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { + frag = netdev_alloc_frag(fragsz); + if (!frag) { err = -ENOMEM; goto free; } - - skb_fill_page_desc(skb, i - 1, pfrag->page, - pfrag->offset, fragsz); - page_ref_inc(pfrag->page); - pfrag->offset += fragsz; + page = virt_to_head_page(frag); + skb_fill_page_desc(skb, i - 1, page, + frag - page_address(page), fragsz); }
return skb; @@@ -1540,6 -1536,7 +1540,7 @@@ static void tun_rx_batched(struct tun_s
if (!rx_batched || (!more && skb_queue_empty(queue))) { local_bh_disable(); + skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); return; @@@ -1559,8 -1556,11 +1560,11 @@@ struct sk_buff *nskb;
local_bh_disable(); - while ((nskb = __skb_dequeue(&process_queue))) + while ((nskb = __skb_dequeue(&process_queue))) { + skb_record_rx_queue(nskb, tfile->queue_index); netif_receive_skb(nskb); + } + skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); } @@@ -2381,16 -2381,9 +2385,16 @@@ static void tun_sock_write_space(struc kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); }
+static void tun_put_page(struct tun_page *tpage) +{ + if (tpage->page) + __page_frag_cache_drain(tpage->page, tpage->count); +} + static int tun_xdp_one(struct tun_struct *tun, struct tun_file *tfile, - struct xdp_buff *xdp, int *flush) + struct xdp_buff *xdp, int *flush, + struct tun_page *tpage) { struct tun_xdp_hdr *hdr = xdp->data_hard_start; struct virtio_net_hdr *gso = &hdr->gso; @@@ -2401,7 -2394,6 +2405,7 @@@ int buflen = hdr->buflen; int err = 0; bool skb_xdp = false; + struct page *page;
xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { @@@ -2428,14 -2420,7 +2432,14 @@@ case XDP_PASS: break; default: - put_page(virt_to_head_page(xdp->data)); + page = virt_to_head_page(xdp->data); + if (tpage->page == page) { + ++tpage->count; + } else { + tun_put_page(tpage); + tpage->page = page; + tpage->count = 1; + } return 0; } } @@@ -2467,10 -2452,10 +2471,11 @@@ build goto out; }
- if (!rcu_dereference(tun->steering_prog)) + if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && + !tfile->detached) rxhash = __skb_get_hash_symmetric(skb);
+ skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb);
stats = get_cpu_ptr(tun->pcpu_stats); @@@ -2499,18 -2484,15 +2504,18 @@@ static int tun_sendmsg(struct socket *s return -EBADFD;
if (ctl && (ctl->type == TUN_MSG_PTR)) { + struct tun_page tpage; int n = ctl->num; int flush = 0;
+ memset(&tpage, 0, sizeof(tpage)); + local_bh_disable(); rcu_read_lock();
for (i = 0; i < n; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; - tun_xdp_one(tun, tfile, xdp, &flush); + tun_xdp_one(tun, tfile, xdp, &flush, &tpage); }
if (flush) @@@ -2519,8 -2501,6 +2524,8 @@@ rcu_read_unlock(); local_bh_enable();
+ tun_put_page(&tpage); + ret = total_len; goto out; } diff --combined net/bridge/br_private.h index 67105c66584a,04c19a37e500..bc2653738fc3 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@@ -102,12 -102,18 +102,18 @@@ struct br_tunnel_info struct metadata_dst *tunnel_dst; };
+ /* private vlan flags */ + enum { + BR_VLFLAG_PER_PORT_STATS = BIT(0), + }; + /** * struct net_bridge_vlan - per-vlan entry * * @vnode: rhashtable member * @vid: VLAN id * @flags: bridge vlan flags + * @priv_flags: private (in-kernel) bridge vlan flags * @stats: per-cpu VLAN statistics * @br: if MASTER flag set, this points to a bridge struct * @port: if MASTER flag unset, this points to a port struct @@@ -127,6 -133,7 +133,7 @@@ struct net_bridge_vlan struct rhash_head tnode; u16 vid; u16 flags; + u16 priv_flags; struct br_vlan_stats __percpu *stats; union { struct net_bridge *br; @@@ -905,7 -912,7 +912,7 @@@ static inline int br_vlan_get_tag(cons int err = 0;
if (skb_vlan_tag_present(skb)) { - *vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK; + *vid = skb_vlan_tag_get_id(skb); } else { *vid = 0; err = -EINVAL; diff --combined net/bridge/br_vlan.c index a7e869da21bf,e84be08b8285..b21838b51220 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@@ -197,7 -197,7 +197,7 @@@ static void nbp_vlan_rcu_free(struct rc v = container_of(rcu, struct net_bridge_vlan, rcu); WARN_ON(br_vlan_is_master(v)); /* if we had per-port stats configured then free them here */ - if (v->brvlan->stats != v->stats) + if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS) free_percpu(v->stats); v->stats = NULL; kfree(v); @@@ -264,6 -264,7 +264,7 @@@ static int __vlan_add(struct net_bridge err = -ENOMEM; goto out_filt; } + v->priv_flags |= BR_VLFLAG_PER_PORT_STATS; } else { v->stats = masterv->stats; } @@@ -420,7 -421,7 +421,7 @@@ struct sk_buff *br_handle_vlan(struct n }
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) - skb->vlan_tci = 0; + __vlan_hwaccel_clear_tag(skb);
if (p && (p->flags & BR_VLAN_TUNNEL) && br_handle_egress_vlan_tunnel(skb, v)) { @@@ -493,8 -494,8 +494,8 @@@ static bool __allowed_ingress(const str __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid); else /* Priority-tagged Frame. - * At this point, We know that skb->vlan_tci had - * VLAN_TAG_PRESENT bit and its VID field was 0x000. + * At this point, we know that skb->vlan_tci VID + * field was 0. * We update only VID field and preserve PCP field. */ skb->vlan_tci |= pvid; diff --combined net/core/dev.c index 5927f6a7c301,066aa902d85c..f2bfd2eda7b2 --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -3091,17 -3091,10 +3091,17 @@@ EXPORT_SYMBOL(__skb_gso_segment)
/* Take action when hardware reception checksum errors are detected. */ #ifdef CONFIG_BUG -void netdev_rx_csum_fault(struct net_device *dev) +void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) { if (net_ratelimit()) { pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); + if (dev) + pr_err("dev features: %pNF\n", &dev->features); + pr_err("skb len=%u data_len=%u pkt_type=%u gso_size=%u gso_type=%u nr_frags=%u ip_summed=%u csum=%x csum_complete_sw=%d csum_valid=%d csum_level=%u\n", + skb->len, skb->data_len, skb->pkt_type, + skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_type, + skb_shinfo(skb)->nr_frags, skb->ip_summed, skb->csum, + skb->csum_complete_sw, skb->csum_valid, skb->csum_level); dump_stack(); } } @@@ -4896,7 -4889,7 +4896,7 @@@ skip_classify * and set skb->priority like in vlan_do_receive() * For the time being, just ignore Priority Code Point */ - skb->vlan_tci = 0; + __vlan_hwaccel_clear_tag(skb); }
type = skb->protocol; @@@ -5393,9 -5386,7 +5393,9 @@@ static struct list_head *gro_list_prepa }
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; - diffs |= p->vlan_tci ^ skb->vlan_tci; + diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb); + if (skb_vlan_tag_present(p)) + diffs |= p->vlan_tci ^ skb->vlan_tci; diffs |= skb_metadata_dst_cmp(p, skb); diffs |= skb_metadata_differs(p, skb); if (maclen == ETH_HLEN) @@@ -5661,9 -5652,13 +5661,13 @@@ static void napi_reuse_skb(struct napi_ __skb_pull(skb, skb_headlen(skb)); /* restore the reserve we had after netdev_alloc_skb_ip_align() */ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); - skb->vlan_tci = 0; + __vlan_hwaccel_clear_tag(skb); skb->dev = napi->dev; skb->skb_iif = 0; + + /* eth_type_trans() assumes pkt_type is PACKET_HOST */ + skb->pkt_type = PACKET_HOST; + skb->encapsulation = 0; skb_shinfo(skb)->gso_type = 0; skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); @@@ -5788,7 -5783,7 +5792,7 @@@ __sum16 __skb_gro_checksum_complete(str if (likely(!sum)) { if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !skb->csum_complete_sw) - netdev_rx_csum_fault(skb->dev); + netdev_rx_csum_fault(skb->dev, skb); }
NAPI_GRO_CB(skb)->csum = wsum; diff --combined net/ipv4/ip_tunnel_core.c index f45b96d715f0,c248e0dccbe1..c857ec6b9784 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@@ -80,7 -80,7 +80,7 @@@ void iptunnel_xmit(struct sock *sk, str
iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; - iph->frag_off = df; + iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; @@@ -120,7 -120,7 +120,7 @@@ int __iptunnel_pull_header(struct sk_bu }
skb_clear_hash_if_not_l4(skb); - skb->vlan_tci = 0; + __vlan_hwaccel_clear_tag(skb); skb_set_queue_mapping(skb, 0); skb_scrub_packet(skb, xnet);
diff --combined net/ipv6/route.c index b2447b7c7303,059f0531f7c1..194bc162866d --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -2232,8 -2232,7 +2232,7 @@@ static void ip6_link_failure(struct sk_ if (rt) { rcu_read_lock(); if (rt->rt6i_flags & RTF_CACHE) { - if (dst_hold_safe(&rt->dst)) - rt6_remove_exception_rt(rt); + rt6_remove_exception_rt(rt); } else { struct fib6_info *from; struct fib6_node *fn; @@@ -2360,10 -2359,13 +2359,13 @@@ EXPORT_SYMBOL_GPL(ip6_update_pmtu)
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) { + int oif = sk->sk_bound_dev_if; struct dst_entry *dst;
- ip6_update_pmtu(skb, sock_net(sk), mtu, - sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); + if (!oif && skb->dev) + oif = l3mdev_master_ifindex(skb->dev); + + ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
dst = __sk_dst_get(sk); if (!dst || !dst->obsolete || @@@ -2975,8 -2977,7 +2977,8 @@@ static struct fib6_info *ip6_route_info if (!rt) goto out;
- rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len); + rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len, + extack); if (IS_ERR(rt->fib6_metrics)) { err = PTR_ERR(rt->fib6_metrics); /* Do not leave garbage there. */ @@@ -3215,8 -3216,8 +3217,8 @@@ static int ip6_del_cached_rt(struct rt6 if (cfg->fc_flags & RTF_GATEWAY && !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) goto out; - if (dst_hold_safe(&rt->dst)) - rc = rt6_remove_exception_rt(rt); + + rc = rt6_remove_exception_rt(rt); out: return rc; } @@@ -3709,7 -3710,7 +3711,7 @@@ struct fib6_info *addrconf_f6i_alloc(st if (!f6i) return ERR_PTR(-ENOMEM);
- f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0); + f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0, NULL); f6i->dst_nocount = true; f6i->dst_host = true; f6i->fib6_protocol = RTPROT_KERNEL; diff --combined net/sched/sch_fq.c index 3671eab91107,25a7cf6d380f..1da8864502d4 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@@ -94,7 -94,6 +94,7 @@@ struct fq_sched_data u32 flow_refill_delay; u32 flow_plimit; /* max packets per flow */ unsigned long flow_max_rate; /* optional max rate per flow */ + u64 ce_threshold; u32 orphan_mask; /* mask for orphaned skb */ u32 low_rate_threshold; struct rb_root *fq_root; @@@ -108,7 -107,6 +108,7 @@@ u64 stat_gc_flows; u64 stat_internal_packets; u64 stat_throttled; + u64 stat_ce_mark; u64 stat_flows_plimit; u64 stat_pkts_too_long; u64 stat_allocation_errors; @@@ -456,11 -454,6 +456,11 @@@ begin fq_flow_set_throttled(q, f); goto begin; } + if (time_next_packet && + (s64)(now - time_next_packet - q->ce_threshold) > 0) { + INET_ECN_set_ce(skb); + q->stat_ce_mark++; + } }
skb = fq_dequeue_head(sch, f); @@@ -476,22 -469,29 +476,29 @@@ goto begin; } prefetch(&skb->end); - f->credit -= qdisc_pkt_len(skb); + plen = qdisc_pkt_len(skb); + f->credit -= plen;
- if (ktime_to_ns(skb->tstamp) || !q->rate_enable) + if (!q->rate_enable) goto out;
rate = q->flow_max_rate; - if (skb->sk) - rate = min(skb->sk->sk_pacing_rate, rate); - - if (rate <= q->low_rate_threshold) { - f->credit = 0; - plen = qdisc_pkt_len(skb); - } else { - plen = max(qdisc_pkt_len(skb), q->quantum); - if (f->credit > 0) - goto out; + + /* If EDT time was provided for this skb, we need to + * update f->time_next_packet only if this qdisc enforces + * a flow max rate. + */ + if (!skb->tstamp) { + if (skb->sk) + rate = min(skb->sk->sk_pacing_rate, rate); + + if (rate <= q->low_rate_threshold) { + f->credit = 0; + } else { + plen = max(plen, q->quantum); + if (f->credit > 0) + goto out; + } } if (rate != ~0UL) { u64 len = (u64)plen * NSEC_PER_SEC; @@@ -657,7 -657,6 +664,7 @@@ static const struct nla_policy fq_polic [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 }, [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 }, + [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 }, };
static int fq_change(struct Qdisc *sch, struct nlattr *opt, @@@ -737,10 -736,6 +744,10 @@@ if (tb[TCA_FQ_ORPHAN_MASK]) q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
+ if (tb[TCA_FQ_CE_THRESHOLD]) + q->ce_threshold = (u64)NSEC_PER_USEC * + nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]); + if (!err) { sch_tree_unlock(sch); err = fq_resize(sch, fq_log); @@@ -791,10 -786,6 +798,10 @@@ static int fq_init(struct Qdisc *sch, s q->fq_trees_log = ilog2(1024); q->orphan_mask = 1024 - 1; q->low_rate_threshold = 550000 / 8; + + /* Default ce_threshold of 4294 seconds */ + q->ce_threshold = (u64)NSEC_PER_USEC * ~0U; + qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
if (opt) @@@ -808,7 -799,6 +815,7 @@@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct fq_sched_data *q = qdisc_priv(sch); + u64 ce_threshold = q->ce_threshold; struct nlattr *opts;
opts = nla_nest_start(skb, TCA_OPTIONS); @@@ -817,8 -807,6 +824,8 @@@
/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
+ do_div(ce_threshold, NSEC_PER_USEC); + if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || @@@ -831,7 -819,6 +838,7 @@@ nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) || nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD, q->low_rate_threshold) || + nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) || nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) goto nla_put_failure;
@@@ -861,7 -848,6 +868,7 @@@ static int fq_dump_stats(struct Qdisc * st.throttled_flows = q->throttled_flows; st.unthrottle_latency_ns = min_t(unsigned long, q->unthrottle_latency_ns, ~0U); + st.ce_mark = q->stat_ce_mark; sch_tree_unlock(sch);
return gnet_stats_copy_app(d, &st, sizeof(st));
linux-merge@lists.open-mesh.org