The following commit has been merged in the master branch: commit d247b6ab3ce6dd43665780865ec5fa145d9ab6bd Merge: 30f00847953e3aa3f710d62ffd37b42042807900 4d8fdc95c60e90d84c8257a0067ff4b1729a3757 Author: David S. Miller davem@davemloft.net Date: Tue Aug 5 18:46:26 2014 -0700
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/Makefile net/ipv6/sysctl_net_ipv6.c
Two ipv6_table_template[] additions overlap, so the index of the ipv6_table[x] assignments needed to be adjusted.
In the drivers/net/Makefile case, we've gotten rid of the garbage whereby we had to list every single USB networking driver in the top-level Makefile, there is just one "USB_NETWORKING" that guards everything.
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 0fdbcc8,3187bc0..59846da --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@@ -427,17 -427,16 +427,17 @@@ static int qlcnic_fdb_add(struct ndmsg }
static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb, - struct net_device *netdev, int idx) + struct net_device *netdev, + struct net_device *filter_dev, int idx) { struct qlcnic_adapter *adapter = netdev_priv(netdev);
if (!adapter->fdb_mac_learn) - return ndo_dflt_fdb_dump(skb, ncb, netdev, idx); + return ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || qlcnic_sriov_check(adapter)) - idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx); + idx = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
return idx; } @@@ -2324,14 -2323,14 +2324,14 @@@ qlcnic_setup_netdev(struct qlcnic_adapt if (err) return err;
+ qlcnic_dcb_init_dcbnl_ops(adapter->dcb); + err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "failed to register net device\n"); return err; }
- qlcnic_dcb_init_dcbnl_ops(adapter->dcb); - return 0; }
@@@ -2624,13 -2623,13 +2624,13 @@@ qlcnic_probe(struct pci_dev *pdev, cons if (err) goto err_out_disable_mbx_intr;
+ if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); + err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); if (err) goto err_out_disable_mbx_intr;
- if (adapter->portnum == 0) - qlcnic_set_drv_version(adapter); - pci_set_drvdata(pdev, adapter);
if (qlcnic_82xx_check(adapter)) @@@ -2981,43 -2980,17 +2981,43 @@@ static inline void dump_tx_ring_desc(st } }
-static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter) +static void qlcnic_dump_rings(struct qlcnic_adapter *adapter) { + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct net_device *netdev = adapter->netdev; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; int ring;
if (!netdev || !netif_running(netdev)) return;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + if (!rds_ring) + continue; + netdev_info(netdev, + "rds_ring=%d crb_rcv_producer=%d producer=%u num_desc=%u\n", + ring, readl(rds_ring->crb_rcv_producer), + rds_ring->producer, rds_ring->num_desc); + } + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &(recv_ctx->sds_rings[ring]); + if (!sds_ring) + continue; + netdev_info(netdev, + "sds_ring=%d crb_sts_consumer=%d consumer=%u crb_intr_mask=%d num_desc=%u\n", + ring, readl(sds_ring->crb_sts_consumer), + sds_ring->consumer, readl(sds_ring->crb_intr_mask), + sds_ring->num_desc); + } + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; + if (!tx_ring) + continue; netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n", ring, tx_ring->ctx_id); netdev_info(netdev, @@@ -3040,10 -3013,9 +3040,10 @@@ netdev_info(netdev, "Total desc=%d, Available desc=%d\n", tx_ring->num_desc, qlcnic_tx_avail(tx_ring));
- if (netif_msg_tx_done(adapter->ahw)) + if (netif_msg_tx_err(adapter->ahw)) dump_tx_ring_desc(tx_ring); } + }
static void qlcnic_tx_timeout(struct net_device *netdev) @@@ -3053,18 -3025,16 +3053,18 @@@ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return;
- if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) { - netdev_info(netdev, "Tx timeout, reset the adapter.\n"); + qlcnic_dump_rings(adapter); + + if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS || + netif_msg_tx_err(adapter->ahw)) { + netdev_err(netdev, "Tx timeout, reset the adapter.\n"); if (qlcnic_82xx_check(adapter)) adapter->need_fw_reset = 1; else if (qlcnic_83xx_check(adapter)) qlcnic_83xx_idc_request_reset(adapter, QLCNIC_FORCE_FW_DUMP_KEY); } else { - netdev_info(netdev, "Tx timeout, reset adapter context.\n"); - qlcnic_dump_tx_rings(adapter); + netdev_err(netdev, "Tx timeout, reset adapter context.\n"); adapter->ahw->reset_context = 1; } } diff --combined net/bridge/br_fdb.c index 0bb9d8b,02359e8..6f6c95c --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@@ -93,7 -93,7 +93,7 @@@ static void fdb_rcu_free(struct rcu_hea static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr) { int err; - struct net_bridge_port *p, *tmp; + struct net_bridge_port *p;
ASSERT_RTNL();
@@@ -107,9 -107,11 +107,9 @@@
return; undo: - list_for_each_entry(tmp, &br->port_list, list) { - if (tmp == p) - break; - if (!br_promisc_port(tmp)) - dev_uc_del(tmp->dev, addr); + list_for_each_entry_continue_reverse(p, &br->port_list, list) { + if (!br_promisc_port(p)) + dev_uc_del(p->dev, addr); } }
@@@ -629,7 -631,7 +629,7 @@@ static int fdb_fill_info(struct sk_buf if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) goto nla_put_failure;
- if (nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id)) + if (fdb->vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id)) goto nla_put_failure;
return nlmsg_end(skb, nlh); @@@ -676,7 -678,6 +676,7 @@@ errout int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, + struct net_device *filter_dev, int idx) { struct net_bridge *br = netdev_priv(dev); @@@ -692,19 -693,6 +692,19 @@@ if (idx < cb->args[0]) goto skip;
+ if (filter_dev && + (!f->dst || f->dst->dev != filter_dev)) { + if (filter_dev != dev) + goto skip; + /* !f->dst is a speacial case for bridge + * It means the MAC belongs to the bridge + * Therefore need a little more filtering + * we only want to dump the !f->dst case + */ + if (f->dst) + goto skip; + } + if (fdb_fill_info(skb, br, f, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, diff --combined net/core/skbuff.c index 3dec029,58ff88e..224506a6 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -2976,9 -2976,9 +2976,9 @@@ struct sk_buff *skb_segment(struct sk_b tail = nskb;
__copy_skb_header(nskb, head_skb); - nskb->mac_len = head_skb->mac_len;
skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); + skb_reset_mac_len(nskb);
skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, nskb->data - tnl_hlen, @@@ -3490,10 -3490,10 +3490,10 @@@ int sock_queue_err_skb(struct sock *sk } EXPORT_SYMBOL(sock_queue_err_skb);
-void skb_tstamp_tx(struct sk_buff *orig_skb, - struct skb_shared_hwtstamps *hwtstamps) +void __skb_tstamp_tx(struct sk_buff *orig_skb, + struct skb_shared_hwtstamps *hwtstamps, + struct sock *sk, int tstype) { - struct sock *sk = orig_skb->sk; struct sock_exterr_skb *serr; struct sk_buff *skb; int err; @@@ -3521,26 -3521,12 +3521,26 @@@ memset(serr, 0, sizeof(*serr)); serr->ee.ee_errno = ENOMSG; serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + serr->ee.ee_info = tstype; + if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { + serr->ee.ee_data = skb_shinfo(skb)->tskey; + if (sk->sk_protocol == IPPROTO_TCP) + serr->ee.ee_data -= sk->sk_tskey; + }
err = sock_queue_err_skb(sk, skb);
if (err) kfree_skb(skb); } +EXPORT_SYMBOL_GPL(__skb_tstamp_tx); + +void skb_tstamp_tx(struct sk_buff *orig_skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk, + SCM_TSTAMP_SND); +} EXPORT_SYMBOL_GPL(skb_tstamp_tx);
void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) diff --combined net/ipv4/ip_tunnel.c index dd8c8c7,45920d9..afed1aa --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@@ -69,23 -69,25 +69,25 @@@ static unsigned int ip_tunnel_hash(__be }
static void __tunnel_dst_set(struct ip_tunnel_dst *idst, - struct dst_entry *dst) + struct dst_entry *dst, __be32 saddr) { struct dst_entry *old_dst;
dst_clone(dst); old_dst = xchg((__force struct dst_entry **)&idst->dst, dst); dst_release(old_dst); + idst->saddr = saddr; }
- static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst) + static void tunnel_dst_set(struct ip_tunnel *t, + struct dst_entry *dst, __be32 saddr) { - __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst); + __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr); }
static void tunnel_dst_reset(struct ip_tunnel *t) { - tunnel_dst_set(t, NULL); + tunnel_dst_set(t, NULL, 0); }
void ip_tunnel_dst_reset_all(struct ip_tunnel *t) @@@ -93,20 -95,25 +95,25 @@@ int i;
for_each_possible_cpu(i) - __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); + __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0); } EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
- static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie) + static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, + u32 cookie, __be32 *saddr) { + struct ip_tunnel_dst *idst; struct dst_entry *dst;
rcu_read_lock(); - dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); + idst = this_cpu_ptr(t->dst_cache); + dst = rcu_dereference(idst->dst); if (dst && !atomic_inc_not_zero(&dst->__refcnt)) dst = NULL; if (dst) { - if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) { + if (!dst->obsolete || dst->ops->check(dst, cookie)) { + *saddr = idst->saddr; + } else { tunnel_dst_reset(t); dst_release(dst); dst = NULL; @@@ -305,7 -312,7 +312,7 @@@ static struct net_device *__ip_tunnel_c }
ASSERT_RTNL(); - dev = alloc_netdev(ops->priv_size, name, ops->setup); + dev = alloc_netdev(ops->priv_size, name, NET_NAME_UNKNOWN, ops->setup); if (!dev) { err = -ENOMEM; goto failed; @@@ -367,7 -374,7 +374,7 @@@ static int ip_tunnel_bind_dev(struct ne
if (!IS_ERR(rt)) { tdev = rt->dst.dev; - tunnel_dst_set(tunnel, &rt->dst); + tunnel_dst_set(tunnel, &rt->dst, fl4.saddr); ip_rt_put(rt); } if (dev->type != ARPHRD_ETHER) @@@ -610,7 -617,7 +617,7 @@@ void ip_tunnel_xmit(struct sk_buff *skb init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
- rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL; + rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL;
if (!rt) { rt = ip_route_output_key(tunnel->net, &fl4); @@@ -620,7 -627,7 +627,7 @@@ goto tx_error; } if (connected) - tunnel_dst_set(tunnel, &rt->dst); + tunnel_dst_set(tunnel, &rt->dst, fl4.saddr); }
if (rt->dst.dev == dev) { diff --combined net/ipv6/sysctl_net_ipv6.c index 5bf7b61,8183346..0c56c93 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@@ -39,13 -39,6 +39,13 @@@ static struct ctl_table ipv6_table_temp .proc_handler = proc_dointvec }, { + .procname = "auto_flowlabels", + .data = &init_net.ipv6.sysctl.auto_flowlabels, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { .procname = "fwmark_reflect", .data = &init_net.ipv6.sysctl.fwmark_reflect, .maxlen = sizeof(int), @@@ -81,7 -74,7 +81,8 @@@ static int __net_init ipv6_sysctl_net_i ipv6_table[0].data = &net->ipv6.sysctl.bindv6only; ipv6_table[1].data = &net->ipv6.sysctl.anycast_src_echo_reply; ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency; - ipv6_table[3].data = &net->ipv6.sysctl.fwmark_reflect; + ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels; ++ ipv6_table[4].data = &net->ipv6.sysctl.fwmark_reflect;
ipv6_route_table = ipv6_route_sysctl_init(net); if (!ipv6_route_table) diff --combined net/netfilter/nf_tables_api.c index 93692d6,b35ba83..b8035c2 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@@ -2247,7 -2247,80 +2247,7 @@@ err return err; }
-static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb, - struct netlink_callback *cb) -{ - const struct nft_set *set; - unsigned int idx = 0, s_idx = cb->args[0]; - - if (cb->args[1]) - return skb->len; - - rcu_read_lock(); - cb->seq = ctx->net->nft.base_seq; - - list_for_each_entry_rcu(set, &ctx->table->sets, list) { - if (idx < s_idx) - goto cont; - if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, - NLM_F_MULTI) < 0) { - cb->args[0] = idx; - goto done; - } - nl_dump_check_consistent(cb, nlmsg_hdr(skb)); -cont: - idx++; - } - cb->args[1] = 1; -done: - rcu_read_unlock(); - return skb->len; -} - -static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb, - struct netlink_callback *cb) -{ - const struct nft_set *set; - unsigned int idx, s_idx = cb->args[0]; - struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; - - if (cb->args[1]) - return skb->len; - - rcu_read_lock(); - cb->seq = ctx->net->nft.base_seq; - - list_for_each_entry_rcu(table, &ctx->afi->tables, list) { - if (cur_table) { - if (cur_table != table) - continue; - - cur_table = NULL; - } - ctx->table = table; - idx = 0; - list_for_each_entry_rcu(set, &ctx->table->sets, list) { - if (idx < s_idx) - goto cont; - if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, - NLM_F_MULTI) < 0) { - cb->args[0] = idx; - cb->args[2] = (unsigned long) table; - goto done; - } - nl_dump_check_consistent(cb, nlmsg_hdr(skb)); -cont: - idx++; - } - } - cb->args[1] = 1; -done: - rcu_read_unlock(); - return skb->len; -} - -static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb, - struct netlink_callback *cb) +static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb) { const struct nft_set *set; unsigned int idx, s_idx = cb->args[0]; @@@ -2255,7 -2328,6 +2255,7 @@@ struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; struct net *net = sock_net(skb->sk); int cur_family = cb->args[3]; + struct nft_ctx *ctx = cb->data, ctx_set;
if (cb->args[1]) return skb->len; @@@ -2264,34 -2336,28 +2264,34 @@@ cb->seq = net->nft.base_seq;
list_for_each_entry_rcu(afi, &net->nft.af_info, list) { + if (ctx->afi && ctx->afi != afi) + continue; + if (cur_family) { if (afi->family != cur_family) continue;
cur_family = 0; } - list_for_each_entry_rcu(table, &afi->tables, list) { + if (ctx->table && ctx->table != table) + continue; + if (cur_table) { if (cur_table != table) continue;
cur_table = NULL; } - - ctx->table = table; - ctx->afi = afi; idx = 0; - list_for_each_entry_rcu(set, &ctx->table->sets, list) { + list_for_each_entry_rcu(set, &table->sets, list) { if (idx < s_idx) goto cont; - if (nf_tables_fill_set(skb, ctx, set, + + ctx_set = *ctx; + ctx_set.table = table; + ctx_set.afi = afi; + if (nf_tables_fill_set(skb, &ctx_set, set, NFT_MSG_NEWSET, NLM_F_MULTI) < 0) { cb->args[0] = idx; @@@ -2313,10 -2379,31 +2313,10 @@@ done return skb->len; }
-static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb) +static int nf_tables_dump_sets_done(struct netlink_callback *cb) { - const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); - struct nlattr *nla[NFTA_SET_MAX + 1]; - struct nft_ctx ctx; - int err, ret; - - err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_MAX, - nft_set_policy); - if (err < 0) - return err; - - err = nft_ctx_init_from_setattr(&ctx, cb->skb, cb->nlh, (void *)nla); - if (err < 0) - return err; - - if (ctx.table == NULL) { - if (ctx.afi == NULL) - ret = nf_tables_dump_sets_all(&ctx, skb, cb); - else - ret = nf_tables_dump_sets_family(&ctx, skb, cb); - } else - ret = nf_tables_dump_sets_table(&ctx, skb, cb); - - return ret; + kfree(cb->data); + return 0; }
#define NFT_SET_INACTIVE (1 << 15) /* Internal set flag */ @@@ -2339,17 -2426,7 +2339,17 @@@ static int nf_tables_getset(struct soc if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nf_tables_dump_sets, + .done = nf_tables_dump_sets_done, }; + struct nft_ctx *ctx_dump; + + ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_KERNEL); + if (ctx_dump == NULL) + return -ENOMEM; + + *ctx_dump = ctx; + c.data = ctx_dump; + return netlink_dump_start(nlsk, skb, nlh, &c); }
@@@ -3073,9 -3150,6 +3073,9 @@@ static int nf_tables_newsetelem(struct struct nft_ctx ctx; int rem, err = 0;
+ if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) + return -EINVAL; + err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true); if (err < 0) return err; @@@ -3144,6 -3218,7 +3144,7 @@@ static int nft_del_setelem(struct nft_c if (set->flags & NFT_SET_MAP) nft_data_uninit(&elem.data, set->dtype);
+ return 0; err2: nft_data_uninit(&elem.key, desc.type); err1: @@@ -3159,9 -3234,6 +3160,9 @@@ static int nf_tables_delsetelem(struct struct nft_ctx ctx; int rem, err = 0;
+ if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) + return -EINVAL; + err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); if (err < 0) return err; diff --combined net/netfilter/xt_LED.c index f14bcf2,92c71cd..3ba31c1 --- a/net/netfilter/xt_LED.c +++ b/net/netfilter/xt_LED.c @@@ -50,11 -50,14 +50,14 @@@ struct xt_led_info_internal struct timer_list timer; };
+ #define XT_LED_BLINK_DELAY 50 /* ms */ + static unsigned int led_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_led_info *ledinfo = par->targinfo; struct xt_led_info_internal *ledinternal = ledinfo->internal_data; + unsigned long led_delay = XT_LED_BLINK_DELAY;
/* * If "always blink" is enabled, and there's still some time until the @@@ -62,9 -65,10 +65,10 @@@ */ if ((ledinfo->delay > 0) && ledinfo->always_blink && timer_pending(&ledinternal->timer)) - led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF); - - led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL); + led_trigger_blink_oneshot(&ledinternal->netfilter_led_trigger, + &led_delay, &led_delay, 1); + else + led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL);
/* If there's a positive delay, start/update the timer */ if (ledinfo->delay > 0) { @@@ -133,7 -137,9 +137,7 @@@ static int led_tg_check(const struct xt
err = led_trigger_register(&ledinternal->netfilter_led_trigger); if (err) { - pr_warning("led_trigger_register() failed\n"); - if (err == -EEXIST) - pr_warning("Trigger name is already in use.\n"); + pr_err("Trigger name is already in use.\n"); goto exit_alloc; }
diff --combined net/sctp/output.c index 1eedba5,407ae2b..42dffd4 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@@ -178,7 -178,7 +178,7 @@@ sctp_xmit_t sctp_packet_transmit_chunk(
case SCTP_XMIT_RWND_FULL: case SCTP_XMIT_OK: - case SCTP_XMIT_NAGLE_DELAY: + case SCTP_XMIT_DELAY: break; }
@@@ -599,7 -599,7 +599,7 @@@ out return err; no_route: kfree_skb(nskb); - IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); + IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
/* FIXME: Returning the 'err' will effect all the associations * associated with a socket, although only one of the paths of the @@@ -633,6 -633,7 +633,6 @@@ nomem static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk) { - sctp_xmit_t retval = SCTP_XMIT_OK; size_t datasize, rwnd, inflight, flight_size; struct sctp_transport *transport = packet->transport; struct sctp_association *asoc = transport->asoc; @@@ -657,11 -658,15 +657,11 @@@
datasize = sctp_data_size(chunk);
- if (datasize > rwnd) { - if (inflight > 0) { - /* We have (at least) one data chunk in flight, - * so we can't fall back to rule 6.1 B). - */ - retval = SCTP_XMIT_RWND_FULL; - goto finish; - } - } + if (datasize > rwnd && inflight > 0) + /* We have (at least) one data chunk in flight, + * so we can't fall back to rule 6.1 B). + */ + return SCTP_XMIT_RWND_FULL;
/* RFC 2960 6.1 Transmission of DATA Chunks * @@@ -675,44 -680,36 +675,44 @@@ * When a Fast Retransmit is being performed the sender SHOULD * ignore the value of cwnd and SHOULD NOT delay retransmission. */ - if (chunk->fast_retransmit != SCTP_NEED_FRTX) - if (flight_size >= transport->cwnd) { - retval = SCTP_XMIT_RWND_FULL; - goto finish; - } + if (chunk->fast_retransmit != SCTP_NEED_FRTX && + flight_size >= transport->cwnd) + return SCTP_XMIT_RWND_FULL;
/* Nagle's algorithm to solve small-packet problem: * Inhibit the sending of new chunks when new outgoing data arrives * if any previously transmitted data on the connection remains * unacknowledged. */ - if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) && - inflight && sctp_state(asoc, ESTABLISHED)) { - unsigned int max = transport->pathmtu - packet->overhead; - unsigned int len = chunk->skb->len + q->out_qlen; - - /* Check whether this chunk and all the rest of pending - * data will fit or delay in hopes of bundling a full - * sized packet. - * Don't delay large message writes that may have been - * fragmeneted into small peices. - */ - if ((len < max) && chunk->msg->can_delay) { - retval = SCTP_XMIT_NAGLE_DELAY; - goto finish; - } - }
-finish: - return retval; + if (sctp_sk(asoc->base.sk)->nodelay) + /* Nagle disabled */ + return SCTP_XMIT_OK; + + if (!sctp_packet_empty(packet)) + /* Append to packet */ + return SCTP_XMIT_OK; + + if (inflight == 0) + /* Nothing unacked */ + return SCTP_XMIT_OK; + + if (!sctp_state(asoc, ESTABLISHED)) + return SCTP_XMIT_OK; + + /* Check whether this chunk and all the rest of pending data will fit + * or delay in hopes of bundling a full sized packet. + */ + if (chunk->skb->len + q->out_qlen >= transport->pathmtu - packet->overhead) + /* Enough data queued to fill a packet */ + return SCTP_XMIT_OK; + + /* Don't delay large message writes that may have been fragmented */ + if (!chunk->msg->can_delay) + return SCTP_XMIT_OK; + + /* Defer until all data acked or packet full */ + return SCTP_XMIT_DELAY; }
/* This private function does management things when adding DATA chunk */
linux-merge@lists.open-mesh.org