The following commit has been merged in the master branch: commit 6a06e5e1bb217be077e1f8ee2745b4c5b1aa02db Merge: d9f72f359e00a45a6cd7cc2d5121b04b9dc927e1 6672d90fe779dc0dfffe027c3ede12609df091c2 Author: David S. Miller davem@davemloft.net Date: Fri Sep 28 14:40:49 2012 -0400
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/team/team.c drivers/net/usb/qmi_wwan.c net/batman-adv/bat_iv_ogm.c net/ipv4/fib_frontend.c net/ipv4/route.c net/l2tp/l2tp_netlink.c
The team, fib_frontend, route, and l2tp_netlink conflicts were simply overlapping changes.
qmi_wwan and bat_iv_ogm were of the "use HEAD" variety.
With help from Antonio Quartulli.
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined drivers/infiniband/ulp/ipoib/ipoib.h index 381f51b,0af216d..ac48f86 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@@ -104,10 -104,6 +104,10 @@@ enum
MAX_SEND_CQE = 16, IPOIB_CM_COPYBREAK = 256, + + IPOIB_NON_CHILD = 0, + IPOIB_LEGACY_CHILD = 1, + IPOIB_RTNL_CHILD = 2, };
#define IPOIB_OP_RECV (1ul << 31) @@@ -266,7 -262,10 +266,10 @@@ struct ipoib_ethtool_st u16 max_coalesced_frames; };
+ struct ipoib_neigh_table; + struct ipoib_neigh_hash { + struct ipoib_neigh_table *ntbl; struct ipoib_neigh __rcu **buckets; struct rcu_head rcu; u32 mask; @@@ -275,9 -274,9 +278,9 @@@
struct ipoib_neigh_table { struct ipoib_neigh_hash __rcu *htbl; - rwlock_t rwlock; atomic_t entries; struct completion flushed; + struct completion deleted; };
/* @@@ -354,7 -353,6 +357,7 @@@ struct ipoib_dev_priv struct net_device *parent; struct list_head child_intfs; struct list_head list; + int child_type;
#ifdef CONFIG_INFINIBAND_IPOIB_CM struct ipoib_cm_dev_priv cm; @@@ -514,14 -512,6 +517,14 @@@ void ipoib_event(struct ib_event_handle int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
+int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, + u16 pkey, int child_type); + +int __init ipoib_netlink_init(void); +void __exit ipoib_netlink_fini(void); + +void ipoib_setup(struct net_device *dev); + void ipoib_pkey_poll(struct work_struct *work); int ipoib_pkey_dev_delay_open(struct net_device *dev); void ipoib_drain_cq(struct net_device *dev); diff --combined drivers/infiniband/ulp/ipoib/ipoib_main.c index b3e9709,1e19b5a..128fab1 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@@ -173,11 -173,6 +173,11 @@@ static int ipoib_stop(struct net_devic return 0; }
+static void ipoib_uninit(struct net_device *dev) +{ + ipoib_dev_cleanup(dev); +} + static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@@ -551,15 -546,15 +551,15 @@@ static void neigh_add_path(struct sk_bu struct ipoib_neigh *neigh; unsigned long flags;
+ spin_lock_irqsave(&priv->lock, flags); neigh = ipoib_neigh_alloc(daddr, dev); if (!neigh) { + spin_unlock_irqrestore(&priv->lock, flags); ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); return; }
- spin_lock_irqsave(&priv->lock, flags); - path = __path_find(dev, daddr + 4); if (!path) { path = path_rec_create(dev, daddr + 4); @@@ -868,10 -863,10 +868,10 @@@ static void __ipoib_reap_neigh(struct i if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) return;
- write_lock_bh(&ntbl->rwlock); + spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock));
if (!htbl) goto out_unlock; @@@ -888,16 -883,14 +888,14 @@@ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
while ((neigh = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock))) != NULL) { + lockdep_is_held(&priv->lock))) != NULL) { /* was the neigh idle for two GC periods */ if (time_after(neigh_obsolete, neigh->alive)) { rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); /* remove from path/mc list */ - spin_lock_irqsave(&priv->lock, flags); list_del(&neigh->list); - spin_unlock_irqrestore(&priv->lock, flags); call_rcu(&neigh->rcu, ipoib_neigh_reclaim); } else { np = &neigh->hnext; @@@ -907,7 -900,7 +905,7 @@@ }
out_unlock: - write_unlock_bh(&ntbl->rwlock); + spin_unlock_irqrestore(&priv->lock, flags); }
static void ipoib_reap_neigh(struct work_struct *work) @@@ -952,10 -945,8 +950,8 @@@ struct ipoib_neigh *ipoib_neigh_alloc(u struct ipoib_neigh *neigh; u32 hash_val;
- write_lock_bh(&ntbl->rwlock); - htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); if (!htbl) { neigh = NULL; goto out_unlock; @@@ -966,10 -957,10 +962,10 @@@ */ hash_val = ipoib_addr_hash(htbl, daddr); for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); neigh != NULL; neigh = rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))) { + lockdep_is_held(&priv->lock))) { if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { /* found, take one ref on behalf of the caller */ if (!atomic_inc_not_zero(&neigh->refcnt)) { @@@ -992,12 -983,11 +988,11 @@@ /* put in hash */ rcu_assign_pointer(neigh->hnext, rcu_dereference_protected(htbl->buckets[hash_val], - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); rcu_assign_pointer(htbl->buckets[hash_val], neigh); atomic_inc(&ntbl->entries);
out_unlock: - write_unlock_bh(&ntbl->rwlock);
return neigh; } @@@ -1045,35 -1035,29 +1040,29 @@@ void ipoib_neigh_free(struct ipoib_neig struct ipoib_neigh *n; u32 hash_val;
- write_lock_bh(&ntbl->rwlock); - htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); if (!htbl) - goto out_unlock; + return;
hash_val = ipoib_addr_hash(htbl, neigh->daddr); np = &htbl->buckets[hash_val]; for (n = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); n != NULL; n = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock))) { + lockdep_is_held(&priv->lock))) { if (n == neigh) { /* found */ rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); call_rcu(&neigh->rcu, ipoib_neigh_reclaim); - goto out_unlock; + return; } else { np = &n->hnext; } } - - out_unlock: - write_unlock_bh(&ntbl->rwlock); - }
static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) @@@ -1085,7 -1069,6 +1074,6 @@@
clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); ntbl->htbl = NULL; - rwlock_init(&ntbl->rwlock); htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); if (!htbl) return -ENOMEM; @@@ -1100,6 -1083,7 +1088,7 @@@ htbl->mask = (size - 1); htbl->buckets = buckets; ntbl->htbl = htbl; + htbl->ntbl = ntbl; atomic_set(&ntbl->entries, 0);
/* start garbage collection */ @@@ -1116,9 -1100,11 +1105,11 @@@ static void neigh_hash_free_rcu(struct struct ipoib_neigh_hash, rcu); struct ipoib_neigh __rcu **buckets = htbl->buckets; + struct ipoib_neigh_table *ntbl = htbl->ntbl;
kfree(buckets); kfree(htbl); + complete(&ntbl->deleted); }
void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) @@@ -1130,10 -1116,10 +1121,10 @@@ int i;
/* remove all neigh connected to a given path or mcast */ - write_lock_bh(&ntbl->rwlock); + spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock));
if (!htbl) goto out_unlock; @@@ -1143,16 -1129,14 +1134,14 @@@ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
while ((neigh = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock))) != NULL) { + lockdep_is_held(&priv->lock))) != NULL) { /* delete neighs belong to this parent */ if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); /* remove from parent list */ - spin_lock_irqsave(&priv->lock, flags); list_del(&neigh->list); - spin_unlock_irqrestore(&priv->lock, flags); call_rcu(&neigh->rcu, ipoib_neigh_reclaim); } else { np = &neigh->hnext; @@@ -1161,7 -1145,7 +1150,7 @@@ } } out_unlock: - write_unlock_bh(&ntbl->rwlock); + spin_unlock_irqrestore(&priv->lock, flags); }
static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) @@@ -1169,37 -1153,44 +1158,44 @@@ struct ipoib_neigh_table *ntbl = &priv->ntbl; struct ipoib_neigh_hash *htbl; unsigned long flags; - int i; + int i, wait_flushed = 0;
- write_lock_bh(&ntbl->rwlock); + init_completion(&priv->ntbl.flushed); + + spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); if (!htbl) goto out_unlock;
+ wait_flushed = atomic_read(&priv->ntbl.entries); + if (!wait_flushed) + goto free_htbl; + for (i = 0; i < htbl->size; i++) { struct ipoib_neigh *neigh; struct ipoib_neigh __rcu **np = &htbl->buckets[i];
while ((neigh = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock))) != NULL) { + lockdep_is_held(&priv->lock))) != NULL) { rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); /* remove from path/mc list */ - spin_lock_irqsave(&priv->lock, flags); list_del(&neigh->list); - spin_unlock_irqrestore(&priv->lock, flags); call_rcu(&neigh->rcu, ipoib_neigh_reclaim); } }
+ free_htbl: rcu_assign_pointer(ntbl->htbl, NULL); call_rcu(&htbl->rcu, neigh_hash_free_rcu);
out_unlock: - write_unlock_bh(&ntbl->rwlock); + spin_unlock_irqrestore(&priv->lock, flags); + if (wait_flushed) + wait_for_completion(&priv->ntbl.flushed); }
static void ipoib_neigh_hash_uninit(struct net_device *dev) @@@ -1208,7 -1199,7 +1204,7 @@@ int stopped;
ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); - init_completion(&priv->ntbl.flushed); + init_completion(&priv->ntbl.deleted); set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
/* Stop GC if called at init fail need to cancel work */ @@@ -1216,10 -1207,9 +1212,9 @@@ if (!stopped) cancel_delayed_work(&priv->neigh_reap_task);
- if (atomic_read(&priv->ntbl.entries)) { - ipoib_flush_neighs(priv); - wait_for_completion(&priv->ntbl.flushed); - } + ipoib_flush_neighs(priv); + + wait_for_completion(&priv->ntbl.deleted); }
@@@ -1267,9 -1257,6 +1262,9 @@@ out void ipoib_dev_cleanup(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; + LIST_HEAD(head); + + ASSERT_RTNL();
ipoib_delete_debug_files(dev);
@@@ -1278,9 -1265,10 +1273,9 @@@ /* Stop GC on child */ set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags); cancel_delayed_work(&cpriv->neigh_reap_task); - unregister_netdev(cpriv->dev); - ipoib_dev_cleanup(cpriv->dev); - free_netdev(cpriv->dev); + unregister_netdevice_queue(cpriv->dev, &head); } + unregister_netdevice_many(&head);
ipoib_ib_dev_cleanup(dev);
@@@ -1298,7 -1286,6 +1293,7 @@@ static const struct header_ops ipoib_he };
static const struct net_device_ops ipoib_netdev_ops = { + .ndo_uninit = ipoib_uninit, .ndo_open = ipoib_open, .ndo_stop = ipoib_stop, .ndo_change_mtu = ipoib_change_mtu, @@@ -1308,7 -1295,7 +1303,7 @@@ .ndo_set_rx_mode = ipoib_set_mcast_list, };
-static void ipoib_setup(struct net_device *dev) +void ipoib_setup(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev);
@@@ -1670,6 -1657,7 +1665,6 @@@ static void ipoib_remove_one(struct ib_ flush_workqueue(ipoib_workqueue);
unregister_netdev(priv->dev); - ipoib_dev_cleanup(priv->dev); free_netdev(priv->dev); }
@@@ -1721,15 -1709,8 +1716,15 @@@ static int __init ipoib_init_module(voi if (ret) goto err_sa;
+ ret = ipoib_netlink_init(); + if (ret) + goto err_client; + return 0;
+err_client: + ib_unregister_client(&ipoib_client); + err_sa: ib_sa_unregister_client(&ipoib_sa_client); destroy_workqueue(ipoib_workqueue); @@@ -1742,7 -1723,6 +1737,7 @@@ err_fs
static void __exit ipoib_cleanup_module(void) { + ipoib_netlink_fini(); ib_unregister_client(&ipoib_client); ib_sa_unregister_client(&ipoib_sa_client); ipoib_unregister_debugfs(); diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ca80487,e8e97a7..f67e700 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@@ -662,14 -662,16 +662,16 @@@ void bnx2x_csum_validate(struct sk_buf struct bnx2x_fastpath *fp, struct bnx2x_eth_q_stats *qstats) { - /* Do nothing if no IP/L4 csum validation was done */ - + /* Do nothing if no L4 csum validation was done. + * We do not check whether IP csum was validated. For IPv4 we assume + * that if the card got as far as validating the L4 csum, it also + * validated the IP csum. IPv6 has no IP csum. + */ if (cqe->fast_path_cqe.status_flags & - (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | - ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) return;
- /* If both IP/L4 validation were done, check if an error was found. */ + /* If L4 validation was done, check if an error was found. */
if (cqe->fast_path_cqe.type_error_flags & (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | @@@ -2283,7 -2285,7 +2285,7 @@@ int bnx2x_nic_load(struct bnx2x *bp, in /* Wait for all pending SP commands to complete */ if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { BNX2X_ERR("Timeout waiting for SP elements to complete\n"); - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); return -EBUSY; }
@@@ -2331,7 -2333,7 +2333,7 @@@ load_error0 }
/* must be called with rtnl_lock */ -int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) { int i; bool global = false; @@@ -2393,7 -2395,7 +2395,7 @@@
/* Cleanup the chip if needed */ if (unload_mode != UNLOAD_RECOVERY) - bnx2x_chip_cleanup(bp, unload_mode); + bnx2x_chip_cleanup(bp, unload_mode, keep_link); else { /* Send the UNLOAD_REQUEST to the MCP */ bnx2x_send_unload_req(bp, unload_mode); @@@ -2417,7 -2419,7 +2419,7 @@@ bnx2x_free_irq(bp);
/* Report UNLOAD_DONE to MCP */ - bnx2x_send_unload_done(bp); + bnx2x_send_unload_done(bp, false); }
/* @@@ -3768,7 -3770,7 +3770,7 @@@ int bnx2x_reload_if_running(struct net_ if (unlikely(!netif_running(dev))) return 0;
- bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); return bnx2x_nic_load(bp, LOAD_NORMAL); }
@@@ -3965,7 -3967,7 +3967,7 @@@ int bnx2x_suspend(struct pci_dev *pdev
netif_device_detach(dev);
- bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7a91570,0875ecf..c319e4c --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@@ -2171,6 -2171,7 +2171,6 @@@ void bnx2x_link_set(struct bnx2x *bp { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp);
@@@ -2183,19 -2184,12 +2183,19 @@@ static void bnx2x__link_reset(struct bn { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); } else BNX2X_ERR("Bootcode is missing - can not reset link\n"); }
+void bnx2x_force_link_reset(struct bnx2x *bp) +{ + bnx2x_acquire_phy_lock(bp); + bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_release_phy_lock(bp); +} + u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) { u8 rc = 0; @@@ -6763,6 -6757,7 +6763,6 @@@ static int bnx2x_init_hw_port(struct bn u32 low, high; u32 val;
- bnx2x__link_reset(bp);
DP(NETIF_MSG_HW, "starting port init port %d\n", port);
@@@ -8255,15 -8250,12 +8255,15 @@@ u32 bnx2x_send_unload_req(struct bnx2x * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. * * @bp: driver handle + * @keep_link: true iff link should be kept up */ -void bnx2x_send_unload_done(struct bnx2x *bp) +void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) { + u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; + /* Report UNLOAD_DONE to MCP */ if (!BP_NOMCP(bp)) - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); }
static int bnx2x_func_wait_started(struct bnx2x *bp) @@@ -8332,7 -8324,7 +8332,7 @@@ return 0; }
-void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) { int port = BP_PORT(bp); int i, rc = 0; @@@ -8454,7 -8446,7 +8454,7 @@@ unload_error
/* Report UNLOAD_DONE to MCP */ - bnx2x_send_unload_done(bp); + bnx2x_send_unload_done(bp, keep_link); }
void bnx2x_disable_close_the_gate(struct bnx2x *bp) @@@ -8866,8 -8858,7 +8866,8 @@@ int bnx2x_leader_reset(struct bnx2x *bp * driver is owner of the HW */ if (!global && !BP_NOMCP(bp)) { - load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, + DRV_MSG_CODE_LOAD_REQ_WITH_LFA); if (!load_code) { BNX2X_ERR("MCP response failure, aborting\n"); rc = -EAGAIN; @@@ -8973,7 -8964,7 +8973,7 @@@ static void bnx2x_parity_recover(struc
/* Stop the driver */ /* If interface has been removed - break */ - if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) + if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) return;
bp->recovery_state = BNX2X_RECOVERY_WAIT; @@@ -9139,7 -9130,7 +9139,7 @@@ static void bnx2x_sp_rtnl_task(struct w bp->sp_rtnl_state = 0; smp_mb();
- bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_nic_load(bp, LOAD_NORMAL);
goto sp_rtnl_exit; @@@ -9325,8 -9316,7 +9325,8 @@@ static void __devinit bnx2x_prev_unload
static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp) { - u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, + DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); if (!rc) { BNX2X_ERR("MCP response failure, aborting\n"); return -EBUSY; @@@ -9841,12 -9831,13 +9841,13 @@@ static void __devinit bnx2x_get_igu_cam }
#ifdef CONFIG_PCI_MSI - /* - * It's expected that number of CAM entries for this functions is equal - * to the number evaluated based on the MSI-X table size. We want a - * harsh warning if these values are different! + /* Due to new PF resource allocation by MFW T7.4 and above, it's + * optional that number of CAM entries will not be equal to the value + * advertised in PCI. + * Driver should use the minimal value of both as the actual status + * block count */ - WARN_ON(bp->igu_sb_cnt != igu_sb_cnt); + bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); #endif
if (igu_sb_cnt == 0) @@@ -11019,7 -11010,7 +11020,7 @@@ static int bnx2x_close(struct net_devic struct bnx2x *bp = netdev_priv(dev);
/* Unload the driver, release IRQs */ - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
/* Power off */ bnx2x_set_power_state(bp, PCI_D3hot); diff --combined drivers/net/ethernet/freescale/gianfar_ptp.c index 18762a3,0daa66b..b9db0e0 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@@ -510,12 -510,12 +510,12 @@@ static int gianfar_ptp_probe(struct pla
spin_unlock_irqrestore(&etsects->lock, flags);
- etsects->clock = ptp_clock_register(&etsects->caps); + etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev); if (IS_ERR(etsects->clock)) { err = PTR_ERR(etsects->clock); goto no_clock; } - gfar_phc_clock = ptp_clock_index(etsects->clock); + gfar_phc_index = ptp_clock_index(etsects->clock);
dev_set_drvdata(&dev->dev, etsects);
@@@ -539,7 -539,7 +539,7 @@@ static int gianfar_ptp_remove(struct pl gfar_write(&etsects->regs->tmr_temask, 0); gfar_write(&etsects->regs->tmr_ctrl, 0);
- gfar_phc_clock = -1; + gfar_phc_index = -1; ptp_clock_unregister(etsects->clock); iounmap(etsects->regs); release_resource(etsects->rsrc); diff --combined drivers/net/ethernet/intel/e1000/e1000_main.c index 3a8368e,bde337e..f9ac229 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@@ -2014,7 -2014,6 +2014,7 @@@ static void e1000_clean_tx_ring(struct e1000_unmap_and_free_tx_resource(adapter, buffer_info); }
+ netdev_reset_queue(adapter->netdev); size = sizeof(struct e1000_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size);
@@@ -3150,6 -3149,17 +3150,17 @@@ static netdev_tx_t e1000_xmit_frame(str return NETDEV_TX_OK; }
+ /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (skb->len < ETH_ZLEN) { + if (skb_pad(skb, ETH_ZLEN - skb->len)) + return NETDEV_TX_OK; + skb->len = ETH_ZLEN; + skb_set_tail_pointer(skb, ETH_ZLEN); + } + mss = skb_shinfo(skb)->gso_size; /* The controller does a simple calculation to * make sure there is enough room in the FIFO before @@@ -3263,7 -3273,6 +3274,7 @@@ nr_frags, mss);
if (count) { + netdev_sent_queue(netdev, skb->len); skb_tx_timestamp(skb);
e1000_tx_queue(adapter, tx_ring, tx_flags, count); @@@ -3851,7 -3860,6 +3862,7 @@@ static bool e1000_clean_tx_irq(struct e unsigned int i, eop; unsigned int count = 0; unsigned int total_tx_bytes=0, total_tx_packets=0; + unsigned int bytes_compl = 0, pkts_compl = 0;
i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; @@@ -3869,11 -3877,6 +3880,11 @@@ if (cleaned) { total_tx_packets += buffer_info->segs; total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + } e1000_unmap_and_free_tx_resource(adapter, buffer_info); tx_desc->upper.data = 0; @@@ -3887,8 -3890,6 +3898,8 @@@
tx_ring->next_to_clean = i;
+ netdev_completed_queue(netdev, pkts_compl, bytes_compl); + #define TX_WAKE_THRESHOLD 32 if (unlikely(count && netif_carrier_ok(netdev) && E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { @@@ -4949,10 -4950,6 +4960,10 @@@ int e1000_set_spd_dplx(struct e1000_ada default: goto err_inval; } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + return 0;
err_inval: diff --combined drivers/net/team/team.c index 9ce0c51,f8cd61f..5c7547c --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@@ -54,29 -54,29 +54,29 @@@ static struct team_port *team_port_get_ }
/* - * Since the ability to change mac address for open port device is tested in + * Since the ability to change device address for open port device is tested in * team_port_add, this function can be called without control of return value */ -static int __set_port_mac(struct net_device *port_dev, - const unsigned char *dev_addr) +static int __set_port_dev_addr(struct net_device *port_dev, + const unsigned char *dev_addr) { struct sockaddr addr;
- memcpy(addr.sa_data, dev_addr, ETH_ALEN); - addr.sa_family = ARPHRD_ETHER; + memcpy(addr.sa_data, dev_addr, port_dev->addr_len); + addr.sa_family = port_dev->type; return dev_set_mac_address(port_dev, &addr); }
-static int team_port_set_orig_mac(struct team_port *port) +static int team_port_set_orig_dev_addr(struct team_port *port) { - return __set_port_mac(port->dev, port->orig.dev_addr); + return __set_port_dev_addr(port->dev, port->orig.dev_addr); }
-int team_port_set_team_mac(struct team_port *port) +int team_port_set_team_dev_addr(struct team_port *port) { - return __set_port_mac(port->dev, port->team->dev->dev_addr); + return __set_port_dev_addr(port->dev, port->team->dev->dev_addr); } -EXPORT_SYMBOL(team_port_set_team_mac); +EXPORT_SYMBOL(team_port_set_team_dev_addr);
static void team_refresh_port_linkup(struct team_port *port) { @@@ -658,122 -658,6 +658,122 @@@ static rx_handler_result_t team_handle_ }
+/************************************* + * Multiqueue Tx port select override + *************************************/ + +static int team_queue_override_init(struct team *team) +{ + struct list_head *listarr; + unsigned int queue_cnt = team->dev->num_tx_queues - 1; + unsigned int i; + + if (!queue_cnt) + return 0; + listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL); + if (!listarr) + return -ENOMEM; + team->qom_lists = listarr; + for (i = 0; i < queue_cnt; i++) + INIT_LIST_HEAD(listarr++); + return 0; +} + +static void team_queue_override_fini(struct team *team) +{ + kfree(team->qom_lists); +} + +static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id) +{ + return &team->qom_lists[queue_id - 1]; +} + +/* + * note: already called with rcu_read_lock + */ +static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb) +{ + struct list_head *qom_list; + struct team_port *port; + + if (!team->queue_override_enabled || !skb->queue_mapping) + return false; + qom_list = __team_get_qom_list(team, skb->queue_mapping); + list_for_each_entry_rcu(port, qom_list, qom_list) { + if (!team_dev_queue_xmit(team, port, skb)) + return true; + } + return false; +} + +static void __team_queue_override_port_del(struct team *team, + struct team_port *port) +{ + list_del_rcu(&port->qom_list); + synchronize_rcu(); + INIT_LIST_HEAD(&port->qom_list); +} + +static bool team_queue_override_port_has_gt_prio_than(struct team_port *port, + struct team_port *cur) +{ + if (port->priority < cur->priority) + return true; + if (port->priority > cur->priority) + return false; + if (port->index < cur->index) + return true; + return false; +} + +static void __team_queue_override_port_add(struct team *team, + struct team_port *port) +{ + struct team_port *cur; + struct list_head *qom_list; + struct list_head *node; + + if (!port->queue_id || !team_port_enabled(port)) + return; + + qom_list = __team_get_qom_list(team, port->queue_id); + node = qom_list; + list_for_each_entry(cur, qom_list, qom_list) { + if (team_queue_override_port_has_gt_prio_than(port, cur)) + break; + node = &cur->qom_list; + } + list_add_tail_rcu(&port->qom_list, node); +} + +static void __team_queue_override_enabled_check(struct team *team) +{ + struct team_port *port; + bool enabled = false; + + list_for_each_entry(port, &team->port_list, list) { + if (!list_empty(&port->qom_list)) { + enabled = true; + break; + } + } + if (enabled == team->queue_override_enabled) + return; + netdev_dbg(team->dev, "%s queue override\n", + enabled ? "Enabling" : "Disabling"); + team->queue_override_enabled = enabled; +} + +static void team_queue_override_port_refresh(struct team *team, + struct team_port *port) +{ + __team_queue_override_port_del(team, port); + __team_queue_override_port_add(team, port); + __team_queue_override_enabled_check(team); +} + + /**************** * Port handling ****************/ @@@ -804,7 -688,6 +804,7 @@@ static void team_port_enable(struct tea hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); team_adjust_ops(team); + team_queue_override_port_refresh(team, port); if (team->ops.port_enabled) team->ops.port_enabled(team, port); } @@@ -833,7 -716,6 +833,7 @@@ static void team_port_disable(struct te hlist_del_rcu(&port->hlist); __reconstruct_port_hlist(team, port->index); port->index = -1; + team_queue_override_port_refresh(team, port); __team_adjust_ops(team, team->en_port_count - 1); /* * Wait until readers see adjusted ops. This ensures that @@@ -967,9 -849,6 +967,8 @@@ static struct netpoll_info *team_netpol #endif
static void __team_port_change_port_added(struct team_port *port, bool linkup); - +static int team_dev_type_check_change(struct net_device *dev, + struct net_device *port_dev);
static int team_port_add(struct team *team, struct net_device *port_dev) { @@@ -978,8 -857,9 +977,8 @@@ char *portname = port_dev->name; int err;
- if (port_dev->flags & IFF_LOOPBACK || - port_dev->type != ARPHRD_ETHER) { - netdev_err(dev, "Device %s is of an unsupported type\n", + if (port_dev->flags & IFF_LOOPBACK) { + netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n", portname); return -EINVAL; } @@@ -990,17 -870,6 +989,17 @@@ return -EBUSY; }
+ if (port_dev->features & NETIF_F_VLAN_CHALLENGED && + vlan_uses_dev(dev)) { + netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n", + portname); + return -EPERM; + } + + err = team_dev_type_check_change(dev, port_dev); + if (err) + return err; + if (port_dev->flags & IFF_UP) { netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", portname); @@@ -1014,7 -883,6 +1013,7 @@@
port->dev = port_dev; port->team = team; + INIT_LIST_HEAD(&port->qom_list);
port->orig.mtu = port_dev->mtu; err = dev_set_mtu(port_dev, dev->mtu); @@@ -1023,7 -891,7 +1022,7 @@@ goto err_set_mtu; }
- memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN); + memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
err = team_port_enter(team, port); if (err) { @@@ -1104,7 -972,7 +1103,7 @@@ err_vids_add
err_dev_open: team_port_leave(team, port); - team_port_set_orig_mac(port); + team_port_set_orig_dev_addr(port);
err_port_enter: dev_set_mtu(port_dev, port->orig.mtu); @@@ -1142,7 -1010,7 +1141,7 @@@ static int team_port_del(struct team *t vlan_vids_del_by_dev(port_dev, dev); dev_close(port_dev); team_port_leave(team, port); - team_port_set_orig_mac(port); + team_port_set_orig_dev_addr(port); dev_set_mtu(port_dev, port->orig.mtu); synchronize_rcu(); kfree(port); @@@ -1227,49 -1095,6 +1226,49 @@@ static int team_user_linkup_en_option_s return 0; }
+static int team_priority_option_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + ctx->data.s32_val = port->priority; + return 0; +} + +static int team_priority_option_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + port->priority = ctx->data.s32_val; + team_queue_override_port_refresh(team, port); + return 0; +} + +static int team_queue_id_option_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + ctx->data.u32_val = port->queue_id; + return 0; +} + +static int team_queue_id_option_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + if (port->queue_id == ctx->data.u32_val) + return 0; + if (ctx->data.u32_val >= team->dev->real_num_tx_queues) + return -EINVAL; + port->queue_id = ctx->data.u32_val; + team_queue_override_port_refresh(team, port); + return 0; +} + + static const struct team_option team_options[] = { { .name = "mode", @@@ -1298,20 -1123,6 +1297,20 @@@ .getter = team_user_linkup_en_option_get, .setter = team_user_linkup_en_option_set, }, + { + .name = "priority", + .type = TEAM_OPTION_TYPE_S32, + .per_port = true, + .getter = team_priority_option_get, + .setter = team_priority_option_set, + }, + { + .name = "queue_id", + .type = TEAM_OPTION_TYPE_U32, + .per_port = true, + .getter = team_queue_id_option_get, + .setter = team_queue_id_option_set, + }, };
static struct lock_class_key team_netdev_xmit_lock_key; @@@ -1347,9 -1158,6 +1346,9 @@@ static int team_init(struct net_device for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) INIT_HLIST_HEAD(&team->en_port_hlist[i]); INIT_LIST_HEAD(&team->port_list); + err = team_queue_override_init(team); + if (err) + goto err_team_queue_override_init;
team_adjust_ops(team);
@@@ -1365,8 -1173,6 +1364,8 @@@ return 0;
err_options_register: + team_queue_override_fini(team); +err_team_queue_override_init: free_percpu(team->pcpu_stats);
return err; @@@ -1384,7 -1190,6 +1383,7 @@@ static void team_uninit(struct net_devi
__team_change_mode(team, NULL); /* cleanup */ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); + team_queue_override_fini(team); mutex_unlock(&team->lock); }
@@@ -1414,12 -1219,10 +1413,12 @@@ static int team_close(struct net_devic static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) { struct team *team = netdev_priv(dev); - bool tx_success = false; + bool tx_success; unsigned int len = skb->len;
- tx_success = team->ops.transmit(team, skb); + tx_success = team_queue_override_transmit(team, skb); + if (!tx_success) + tx_success = team->ops.transmit(team, skb); if (tx_success) { struct team_pcpu_stats *pcpu_stats;
@@@ -1493,18 -1296,17 +1492,18 @@@ static void team_set_rx_mode(struct net
static int team_set_mac_address(struct net_device *dev, void *p) { + struct sockaddr *addr = p; struct team *team = netdev_priv(dev); struct team_port *port; - int err;
- err = eth_mac_addr(dev, p); - if (err) - return err; + if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + dev->addr_assign_type &= ~NET_ADDR_RANDOM; rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) - if (team->ops.port_change_mac) - team->ops.port_change_mac(team, port); + if (team->ops.port_change_dev_addr) + team->ops.port_change_dev_addr(team, port); rcu_read_unlock(); return 0; } @@@ -1735,45 -1537,6 +1734,45 @@@ static const struct net_device_ops team * rt netlink interface ***********************/
+static void team_setup_by_port(struct net_device *dev, + struct net_device *port_dev) +{ + dev->header_ops = port_dev->header_ops; + dev->type = port_dev->type; + dev->hard_header_len = port_dev->hard_header_len; + dev->addr_len = port_dev->addr_len; + dev->mtu = port_dev->mtu; + memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); + memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len); + dev->addr_assign_type &= ~NET_ADDR_RANDOM; +} + +static int team_dev_type_check_change(struct net_device *dev, + struct net_device *port_dev) +{ + struct team *team = netdev_priv(dev); + char *portname = port_dev->name; + int err; + + if (dev->type == port_dev->type) + return 0; + if (!list_empty(&team->port_list)) { + netdev_err(dev, "Device %s is of different type\n", portname); + return -EBUSY; + } + err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev); + err = notifier_to_errno(err); + if (err) { + netdev_err(dev, "Refused to change device type\n"); + return err; + } + dev_uc_flush(dev); + dev_mc_flush(dev); + team_setup_by_port(dev, port_dev); + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + return 0; +} + static void team_setup(struct net_device *dev) { ether_setup(dev); @@@ -1888,16 -1651,16 +1887,16 @@@ static int team_nl_cmd_noop(struct sk_b if (!msg) return -ENOMEM;
- hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, + hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &team_nl_family, 0, TEAM_CMD_NOOP); - if (IS_ERR(hdr)) { - err = PTR_ERR(hdr); + if (!hdr) { + err = -EMSGSIZE; goto err_msg_put; }
genlmsg_end(msg, hdr);
- return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); + return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
err_msg_put: nlmsg_free(msg); @@@ -1954,7 -1717,7 +1953,7 @@@ static int team_nl_send_generic(struct if (err < 0) goto err_fill;
- err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid); + err = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid); return err;
err_fill: @@@ -1963,11 -1726,11 +1962,11 @@@ }
typedef int team_nl_send_func_t(struct sk_buff *skb, - struct team *team, u32 pid); + struct team *team, u32 portid);
-static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid) +static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid) { - return genlmsg_unicast(dev_net(team->dev), skb, pid); + return genlmsg_unicast(dev_net(team->dev), skb, portid); }
static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, @@@ -2027,12 -1790,6 +2026,12 @@@ nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) goto nest_cancel; break; + case TEAM_OPTION_TYPE_S32: + if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32)) + goto nest_cancel; + if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val)) + goto nest_cancel; + break; default: BUG(); } @@@ -2052,13 -1809,13 +2051,13 @@@ nest_cancel }
static int __send_and_alloc_skb(struct sk_buff **pskb, - struct team *team, u32 pid, + struct team *team, u32 portid, team_nl_send_func_t *send_func) { int err;
if (*pskb) { - err = send_func(*pskb, team, pid); + err = send_func(*pskb, team, portid); if (err) return err; } @@@ -2068,7 -1825,7 +2067,7 @@@ return 0; }
-static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq, +static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq, int flags, team_nl_send_func_t *send_func, struct list_head *sel_opt_inst_list) { @@@ -2085,14 -1842,14 +2084,14 @@@ struct team_option_inst, tmp_list);
start_again: - err = __send_and_alloc_skb(&skb, team, pid, send_func); + err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) return err;
- hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI, + hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, TEAM_CMD_OPTIONS_GET); - if (IS_ERR(hdr)) - return PTR_ERR(hdr); + if (!hdr) + return -EMSGSIZE;
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) goto nla_put_failure; @@@ -2122,15 -1879,15 +2121,15 @@@ goto start_again;
send_done: - nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); + nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); if (!nlh) { - err = __send_and_alloc_skb(&skb, team, pid, send_func); + err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) goto errout; goto send_done; }
- return send_func(skb, team, pid); + return send_func(skb, team, portid);
nla_put_failure: err = -EMSGSIZE; @@@ -2153,7 -1910,7 +2152,7 @@@ static int team_nl_cmd_options_get(stru
list_for_each_entry(opt_inst, &team->option_inst_list, list) list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); - err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq, + err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq, NLM_F_ACK, team_nl_send_unicast, &sel_opt_inst_list);
@@@ -2221,9 -1978,6 +2220,9 @@@ static int team_nl_cmd_options_set(stru case NLA_FLAG: opt_type = TEAM_OPTION_TYPE_BOOL; break; + case NLA_S32: + opt_type = TEAM_OPTION_TYPE_S32; + break; default: goto team_put; } @@@ -2280,9 -2034,6 +2279,9 @@@ case TEAM_OPTION_TYPE_BOOL: ctx.data.bool_val = attr_data ? true : false; break; + case TEAM_OPTION_TYPE_S32: + ctx.data.s32_val = nla_get_s32(attr_data); + break; default: BUG(); } @@@ -2307,7 -2058,7 +2306,7 @@@ team_put }
static int team_nl_fill_port_list_get(struct sk_buff *skb, - u32 pid, u32 seq, int flags, + u32 portid, u32 seq, int flags, struct team *team, bool fillall) { @@@ -2315,10 -2066,10 +2314,10 @@@ void *hdr; struct team_port *port;
- hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, + hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags, TEAM_CMD_PORT_LIST_GET); - if (IS_ERR(hdr)) - return PTR_ERR(hdr); + if (!hdr) + return -EMSGSIZE;
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) goto nla_put_failure; @@@ -2364,7 -2115,7 +2363,7 @@@ static int team_nl_fill_port_list_get_a struct genl_info *info, int flags, struct team *team) { - return team_nl_fill_port_list_get(skb, info->snd_pid, + return team_nl_fill_port_list_get(skb, info->snd_portid, info->snd_seq, NLM_F_ACK, team, true); } @@@ -2417,7 -2168,7 +2416,7 @@@ static struct genl_multicast_group team };
static int team_nl_send_multicast(struct sk_buff *skb, - struct team *team, u32 pid) + struct team *team, u32 portid) { return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, team_change_event_mcgrp.id, GFP_KERNEL); @@@ -2495,7 -2246,7 +2494,7 @@@ static void __team_options_change_check list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); } err = team_nl_send_event_options_get(team, &sel_opt_inst_list); - if (err) + if (err && err != -ESRCH) netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n", err); } @@@ -2524,9 -2275,9 +2523,9 @@@ static void __team_port_change_send(str
send_event: err = team_nl_send_event_port_list_get(port->team); - if (err) - netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", - port->dev->name); + if (err && err != -ESRCH) + netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n", + port->dev->name, err);
}
diff --combined drivers/net/usb/asix_devices.c index 8d5fdf1,32e31c5..1df77f2 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@@ -221,8 -221,7 +221,8 @@@ static int ax88172_bind(struct usbnet * /* Get the MAC address */ ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); if (ret < 0) { - dbg("read AX_CMD_READ_NODE_ID failed: %d", ret); + netdev_dbg(dev->net, "read AX_CMD_READ_NODE_ID failed: %d\n", + ret); goto out; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); @@@ -304,7 -303,7 +304,7 @@@ static int ax88772_reset(struct usbnet
ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL); if (ret < 0) { - dbg("Select PHY #1 failed: %d", ret); + netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); goto out; }
@@@ -332,13 -331,13 +332,13 @@@
msleep(150); rx_ctl = asix_read_rx_ctl(dev); - dbg("RX_CTL is 0x%04x after software reset", rx_ctl); + netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl); ret = asix_write_rx_ctl(dev, 0x0000); if (ret < 0) goto out;
rx_ctl = asix_read_rx_ctl(dev); - dbg("RX_CTL is 0x%04x setting to 0x0000", rx_ctl); + netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
ret = asix_sw_reset(dev, AX_SWRESET_PRL); if (ret < 0) @@@ -365,7 -364,7 +365,7 @@@ AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT, AX88772_IPG2_DEFAULT, 0, NULL); if (ret < 0) { - dbg("Write IPG,IPG1,IPG2 failed: %d", ret); + netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret); goto out; }
@@@ -382,13 -381,10 +382,13 @@@ goto out;
rx_ctl = asix_read_rx_ctl(dev); - dbg("RX_CTL is 0x%04x after all initializations", rx_ctl); + netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n", + rx_ctl);
rx_ctl = asix_read_medium_status(dev); - dbg("Medium Status is 0x%04x after all initializations", rx_ctl); + netdev_dbg(dev->net, + "Medium Status is 0x%04x after all initializations\n", + rx_ctl);
return 0;
@@@ -420,7 -416,7 +420,7 @@@ static int ax88772_bind(struct usbnet * /* Get the MAC address */ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); if (ret < 0) { - dbg("Failed to read MAC address: %d", ret); + netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); return ret; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); @@@ -443,7 -439,7 +443,7 @@@ /* Reset the PHY to normal operation mode */ ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL); if (ret < 0) { - dbg("Select PHY #1 failed: %d", ret); + netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); return ret; }
@@@ -463,7 -459,7 +463,7 @@@
/* Read PHYID register *AFTER* the PHY was reset properly */ phyid = asix_get_phyid(dev); - dbg("PHYID=0x%08x", phyid); + netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ if (dev->driver_info->flags & FLAG_FRAMING_AX) { @@@ -579,13 -575,13 +579,13 @@@ static int ax88178_reset(struct usbnet u32 phyid;
asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status); - dbg("GPIO Status: 0x%04x", status); + netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL); asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom); asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
- dbg("EEPROM index 0x17 is 0x%04x", eeprom); + netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
if (eeprom == cpu_to_le16(0xffff)) { data->phymode = PHY_MODE_MARVELL; @@@ -596,7 -592,7 +596,7 @@@ data->ledmode = le16_to_cpu(eeprom) >> 8; gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1; } - dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode); + netdev_dbg(dev->net, "GPIO0: %d, PhyMode: %d\n", gpio0, data->phymode);
/* Power up external GigaPHY through AX88178 GPIO pin */ asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40); @@@ -605,14 -601,14 +605,14 @@@ asix_write_gpio(dev, 0x001c, 300); asix_write_gpio(dev, 0x003c, 30); } else { - dbg("gpio phymode == 1 path"); + netdev_dbg(dev->net, "gpio phymode == 1 path\n"); asix_write_gpio(dev, AX_GPIO_GPO1EN, 30); asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30); }
/* Read PHYID register *AFTER* powering up PHY */ phyid = asix_get_phyid(dev); - dbg("PHYID=0x%08x", phyid); + netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
/* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */ asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL); @@@ -774,7 -770,7 +774,7 @@@ static int ax88178_bind(struct usbnet * /* Get the MAC address */ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); if (ret < 0) { - dbg("Failed to read MAC address: %d", ret); + netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); return ret; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); @@@ -966,6 -962,10 +966,10 @@@ static const struct usb_device_id produ USB_DEVICE (0x2001, 0x3c05), .driver_info = (unsigned long) &ax88772_info, }, { + // DLink DUB-E100 H/W Ver C1 + USB_DEVICE (0x2001, 0x1a02), + .driver_info = (unsigned long) &ax88772_info, + }, { // Linksys USB1000 USB_DEVICE (0x1737, 0x0039), .driver_info = (unsigned long) &ax88178_info, diff --combined drivers/net/usb/qmi_wwan.c index ca25320,3543c9e..6883c37 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@@ -108,7 -108,7 +108,7 @@@ static int qmi_wwan_register_subdriver( atomic_set(&info->pmcount, 0);
/* register subdriver */ - subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power); + subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power); if (IS_ERR(subdriver)) { dev_err(&info->control->dev, "subdriver registration failed\n"); rv = PTR_ERR(subdriver); @@@ -139,18 -139,10 +139,18 @@@ static int qmi_wwan_bind(struct usbnet
BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
- /* require a single interrupt status endpoint for subdriver */ + /* control and data is shared? */ + if (intf->cur_altsetting->desc.bNumEndpoints == 3) { + info->control = intf; + info->data = intf; + goto shared; + } + + /* else require a single interrupt status endpoint on control intf */ if (intf->cur_altsetting->desc.bNumEndpoints != 1) goto err;
+ /* and a number of CDC descriptors */ while (len > 3) { struct usb_descriptor_header *h = (void *)buf;
@@@ -239,9 -231,8 +239,9 @@@ next_desc if (status < 0) goto err;
+shared: status = qmi_wwan_register_subdriver(dev); - if (status < 0) { + if (status < 0 && info->control != info->data) { usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver, info->data); } @@@ -250,6 -241,20 +250,6 @@@ err return status; }
-/* Some devices combine the "control" and "data" functions into a - * single interface with all three endpoints: interrupt + bulk in and - * out - */ -static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf) -{ - struct qmi_wwan_state *info = (void *)&dev->data; - - /* control and data is shared */ - info->control = intf; - info->data = intf; - return qmi_wwan_register_subdriver(dev); -} - static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) { struct qmi_wwan_state *info = (void *)&dev->data; @@@ -326,12 -331,20 +326,12 @@@ static const struct driver_info qmi_wwa .manage_power = qmi_wwan_manage_power, };
-static const struct driver_info qmi_wwan_shared = { - .description = "WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, -}; - #define HUAWEI_VENDOR_ID 0x12D1
/* map QMI/wwan function by a fixed interface number */ #define QMI_FIXED_INTF(vend, prod, num) \ USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ - .driver_info = (unsigned long)&qmi_wwan_shared + .driver_info = (unsigned long)&qmi_wwan_info
/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */ #define QMI_GOBI1K_DEVICE(vend, prod) \ @@@ -353,21 -366,21 +353,21 @@@ static const struct usb_device_id produ },
/* 2. Combined interface devices matching on class+protocol */ - { /* Huawei E367 and possibly others in "Windows mode" */ + { /* Huawei E367 and possibly others in "Windows mode" */ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 7), .driver_info = (unsigned long)&qmi_wwan_info, }, { /* Huawei E392, E398 and possibly others in "Windows mode" */ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), - .driver_info = (unsigned long)&qmi_wwan_shared, + .driver_info = (unsigned long)&qmi_wwan_info, }, - { /* Pantech UML290, P4200 and more */ + { /* Pantech UML290, P4200 and more */ USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), - .driver_info = (unsigned long)&qmi_wwan_shared, + .driver_info = (unsigned long)&qmi_wwan_info, }, { /* Pantech UML290 - newer firmware */ USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff), - .driver_info = (unsigned long)&qmi_wwan_shared, + .driver_info = (unsigned long)&qmi_wwan_info, },
/* 3. Combined interface devices matching on interface number */ @@@ -454,7 -467,7 +454,7 @@@ static int qmi_wwan_probe(struct usb_in */ if (!id->driver_info) { dev_dbg(&intf->dev, "setting defaults for dynamic device id\n"); - id->driver_info = (unsigned long)&qmi_wwan_shared; + id->driver_info = (unsigned long)&qmi_wwan_info; }
return usbnet_probe(intf, id); diff --combined drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index b5659cb,d066f25..884f9f0 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@@ -138,8 -138,7 +138,8 @@@ static const struct ar9300_eeprom ar930 }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0}, + .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@@ -714,8 -713,7 +714,8 @@@ static const struct ar9300_eeprom ar930 }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0}, + .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@@ -1291,8 -1289,7 +1291,8 @@@ static const struct ar9300_eeprom ar930 }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0}, + .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@@ -1868,8 -1865,7 +1868,8 @@@ static const struct ar9300_eeprom ar930 }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0}, + .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@@ -2444,8 -2440,7 +2444,8 @@@ static const struct ar9300_eeprom ar930 }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0}, + .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@@ -2987,6 -2982,10 +2987,10 @@@ static u32 ath9k_hw_ar9300_get_eeprom(s case EEP_RX_MASK: return pBase->txrxMask & 0xf; case EEP_PAPRD: + if (AR_SREV_9462(ah)) + return false; + if (!ah->config.enable_paprd); + return false; return !!(pBase->featureEnable & BIT(5)); case EEP_CHAIN_MASK_REDUCE: return (pBase->miscConfiguration >> 0x3) & 0x1; @@@ -3525,7 -3524,7 +3529,7 @@@ static void ar9003_hw_xpa_bias_level_ap
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias); - else if (AR_SREV_9462(ah) || AR_SREV_9550(ah)) + else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah)) REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); else { REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); @@@ -3573,7 -3572,7 +3577,7 @@@ static void ar9003_hw_ant_ctrl_apply(st
u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
- if (AR_SREV_9462(ah)) { + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_AR9462_ALL, value); } else if (AR_SREV_9550(ah)) { @@@ -3617,7 -3616,7 +3621,7 @@@ } }
- if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { + if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) { value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1); /* * main_lnaconf, alt_lnaconf, main_tb, alt_tb @@@ -3627,16 -3626,19 +3631,16 @@@ regval &= (~AR_ANT_DIV_CTRL_ALL); regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S; /* enable_lnadiv */ - regval &= (~AR_PHY_9485_ANT_DIV_LNADIV); - regval |= ((value >> 6) & 0x1) << - AR_PHY_9485_ANT_DIV_LNADIV_S; + regval &= (~AR_PHY_ANT_DIV_LNADIV); + regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S; REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
/*enable fast_div */ regval = REG_READ(ah, AR_PHY_CCK_DETECT); regval &= (~AR_FAST_DIV_ENABLE); - regval |= ((value >> 7) & 0x1) << - AR_FAST_DIV_ENABLE_S; + regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S; REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); - ant_div_ctl1 = - ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); + ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); /* check whether antenna diversity is enabled */ if ((ant_div_ctl1 >> 0x6) == 0x3) { regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); @@@ -3644,15 -3646,15 +3648,15 @@@ * clear bits 25-30 main_lnaconf, alt_lnaconf, * main_tb, alt_tb */ - regval &= (~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF | - AR_PHY_9485_ANT_DIV_ALT_LNACONF | - AR_PHY_9485_ANT_DIV_ALT_GAINTB | - AR_PHY_9485_ANT_DIV_MAIN_GAINTB)); + regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF | + AR_PHY_ANT_DIV_ALT_LNACONF | + AR_PHY_ANT_DIV_ALT_GAINTB | + AR_PHY_ANT_DIV_MAIN_GAINTB)); /* by default use LNA1 for the main antenna */ - regval |= (AR_PHY_9485_ANT_DIV_LNA1 << - AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S); - regval |= (AR_PHY_9485_ANT_DIV_LNA2 << - AR_PHY_9485_ANT_DIV_ALT_LNACONF_S); + regval |= (AR_PHY_ANT_DIV_LNA1 << + AR_PHY_ANT_DIV_MAIN_LNACONF_S); + regval |= (AR_PHY_ANT_DIV_LNA2 << + AR_PHY_ANT_DIV_ALT_LNACONF_S); REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); }
@@@ -3845,7 -3847,7 +3849,7 @@@ void ar9003_hw_internal_regulator_apply REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) return; - } else if (AR_SREV_9462(ah)) { + } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { reg_val = le32_to_cpu(pBase->swreg); REG_WRITE(ah, AR_PHY_PMU1, reg_val); } else { @@@ -3876,7 -3878,7 +3880,7 @@@ while (!REG_READ_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM)) udelay(10); - } else if (AR_SREV_9462(ah)) + } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1); else { reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) | @@@ -3979,62 -3981,6 +3983,62 @@@ static void ar9003_hw_xlna_bias_strengt bias & 0x3); }
+static int ar9003_hw_get_thermometer(struct ath_hw *ah) +{ + struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; + struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader; + int thermometer = (pBase->miscConfiguration >> 1) & 0x3; + + return --thermometer; +} + +static void ar9003_hw_thermometer_apply(struct ath_hw *ah) +{ + int thermometer = ar9003_hw_get_thermometer(ah); + u8 therm_on = (thermometer < 0) ? 0 : 1; + + REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4, + AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on); + if (ah->caps.tx_chainmask & BIT(1)) + REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4, + AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on); + if (ah->caps.tx_chainmask & BIT(2)) + REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, + AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on); + + therm_on = (thermometer < 0) ? 0 : (thermometer == 0); + REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4, + AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); + if (ah->caps.tx_chainmask & BIT(1)) { + therm_on = (thermometer < 0) ? 0 : (thermometer == 1); + REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4, + AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); + } + if (ah->caps.tx_chainmask & BIT(2)) { + therm_on = (thermometer < 0) ? 0 : (thermometer == 2); + REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, + AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); + } +} + +static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah) +{ + u32 data, ko, kg; + + if (!AR_SREV_9462_20(ah)) + return; + ar9300_otp_read_word(ah, 1, &data); + ko = data & 0xff; + kg = (data >> 8) & 0xff; + if (ko || kg) { + REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3, + AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET, ko); + REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3, + AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN, + kg + 256); + } +} + static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { @@@ -4050,8 -3996,6 +4054,8 @@@ ar9003_hw_internal_regulator_apply(ah); ar9003_hw_apply_tuning_caps(ah); ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz); + ar9003_hw_thermometer_apply(ah); + ar9003_hw_thermo_cal_apply(ah); }
static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah, @@@ -4588,7 -4532,7 +4592,7 @@@ static int ar9003_hw_power_control_over { int tempSlope = 0; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; - int f[3], t[3]; + int f[8], t[8], i;
REG_RMW(ah, AR_PHY_TPC_11_B0, (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), @@@ -4621,14 -4565,7 +4625,14 @@@ */ if (frequency < 4000) tempSlope = eep->modalHeader2G.tempSlope; - else if (eep->base_ext2.tempSlopeLow != 0) { + else if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) { + for (i = 0; i < 8; i++) { + t[i] = eep->base_ext1.tempslopextension[i]; + f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0); + } + tempSlope = ar9003_hw_power_interpolate((s32) frequency, + f, t, 8); + } else if (eep->base_ext2.tempSlopeLow != 0) { t[0] = eep->base_ext2.tempSlopeLow; f[0] = 5180; t[1] = eep->modalHeader5G.tempSlope; @@@ -4968,79 -4905,90 +4972,79 @@@ static void ar9003_hw_set_power_per_rat i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i], chan->channel);
- /* - * compare test group from regulatory - * channel list with test mode from pCtlMode - * list - */ - if ((((cfgCtl & ~CTL_MODE_M) | - (pCtlMode[ctlMode] & CTL_MODE_M)) == - ctlIndex[i]) || - (((cfgCtl & ~CTL_MODE_M) | - (pCtlMode[ctlMode] & CTL_MODE_M)) == - ((ctlIndex[i] & CTL_MODE_M) | - SD_NO_CTL))) { - twiceMinEdgePower = - ar9003_hw_get_max_edge_power(pEepData, - freq, i, - is2ghz); - - if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) - /* - * Find the minimum of all CTL - * edge powers that apply to - * this channel - */ - twiceMaxEdgePower = - min(twiceMaxEdgePower, - twiceMinEdgePower); - else { - /* specific */ - twiceMaxEdgePower = - twiceMinEdgePower; - break; - } + /* + * compare test group from regulatory + * channel list with test mode from pCtlMode + * list + */ + if ((((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + ctlIndex[i]) || + (((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + ((ctlIndex[i] & CTL_MODE_M) | + SD_NO_CTL))) { + twiceMinEdgePower = + ar9003_hw_get_max_edge_power(pEepData, + freq, i, + is2ghz); + + if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) + /* + * Find the minimum of all CTL + * edge powers that apply to + * this channel + */ + twiceMaxEdgePower = + min(twiceMaxEdgePower, + twiceMinEdgePower); + else { + /* specific */ + twiceMaxEdgePower = twiceMinEdgePower; + break; } } + }
- minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); + minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
- ath_dbg(common, REGULATORY, - "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n", - ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, - scaledPower, minCtlPower); - - /* Apply ctl mode to correct target power set */ - switch (pCtlMode[ctlMode]) { - case CTL_11B: - for (i = ALL_TARGET_LEGACY_1L_5L; - i <= ALL_TARGET_LEGACY_11S; i++) - pPwrArray[i] = - (u8)min((u16)pPwrArray[i], - minCtlPower); - break; - case CTL_11A: - case CTL_11G: - for (i = ALL_TARGET_LEGACY_6_24; - i <= ALL_TARGET_LEGACY_54; i++) - pPwrArray[i] = - (u8)min((u16)pPwrArray[i], - minCtlPower); - break; - case CTL_5GHT20: - case CTL_2GHT20: - for (i = ALL_TARGET_HT20_0_8_16; - i <= ALL_TARGET_HT20_21; i++) - pPwrArray[i] = - (u8)min((u16)pPwrArray[i], - minCtlPower); - pPwrArray[ALL_TARGET_HT20_22] = - (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22], - minCtlPower); - pPwrArray[ALL_TARGET_HT20_23] = - (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23], - minCtlPower); - break; - case CTL_5GHT40: - case CTL_2GHT40: - for (i = ALL_TARGET_HT40_0_8_16; - i <= ALL_TARGET_HT40_23; i++) - pPwrArray[i] = - (u8)min((u16)pPwrArray[i], - minCtlPower); - break; - default: - break; - } + ath_dbg(common, REGULATORY, + "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n", + ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, + scaledPower, minCtlPower); + + /* Apply ctl mode to correct target power set */ + switch (pCtlMode[ctlMode]) { + case CTL_11B: + for (i = ALL_TARGET_LEGACY_1L_5L; + i <= ALL_TARGET_LEGACY_11S; i++) + pPwrArray[i] = (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + case CTL_11A: + case CTL_11G: + for (i = ALL_TARGET_LEGACY_6_24; + i <= ALL_TARGET_LEGACY_54; i++) + pPwrArray[i] = (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + case CTL_5GHT20: + case CTL_2GHT20: + for (i = ALL_TARGET_HT20_0_8_16; + i <= ALL_TARGET_HT20_23; i++) + pPwrArray[i] = (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + case CTL_5GHT40: + case CTL_2GHT40: + for (i = ALL_TARGET_HT40_0_8_16; + i <= ALL_TARGET_HT40_23; i++) + pPwrArray[i] = (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + default: + break; + } } /* end ctl mode checking */ }
diff --combined drivers/net/wireless/ath/ath9k/debug.c index ab3bc85,c8ef301..e1041a6 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@@ -373,8 -373,6 +373,8 @@@ void ath_debug_stat_interrupt(struct at sc->debug.stats.istats.tsfoor++; if (status & ATH9K_INT_MCI) sc->debug.stats.istats.mci++; + if (status & ATH9K_INT_GENTIMER) + sc->debug.stats.istats.gen_timer++; }
static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, @@@ -420,7 -418,6 +420,7 @@@ PR_IS("DTIM", dtim); PR_IS("TSFOOR", tsfoor); PR_IS("MCI", mci); + PR_IS("GENTIMER", gen_timer); PR_IS("TOTAL", total);
len += snprintf(buf + len, mxlen - len, @@@ -1580,6 -1577,8 +1580,8 @@@ int ath9k_init_debug(struct ath_hw *ah sc->debug.debugfs_phy, sc, &fops_tx_chainmask); debugfs_create_file("disable_ani", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_disable_ani); + debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, + &sc->sc_ah->config.enable_paprd); debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_regidx); debugfs_create_file("regval", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, diff --combined drivers/net/wireless/ath/ath9k/hw.c index 99cab44,4faf0a3..40f57aa --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@@ -355,7 -355,7 +355,7 @@@ static void ath9k_hw_read_revisions(str (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
- if (AR_SREV_9462(ah)) + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) ah->is_pciexpress = true; else ah->is_pciexpress = (val & @@@ -602,11 -602,6 +602,11 @@@ static int __ath9k_hw_init(struct ath_h if (AR_SREV_9462(ah)) ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
+ if (AR_SREV_9565(ah)) { + ah->WARegVal |= AR_WA_BIT22; + REG_WRITE(ah, AR_WA, ah->WARegVal); + } + ath9k_hw_init_defaults(ah); ath9k_hw_init_config(ah);
@@@ -652,7 -647,6 +652,7 @@@ case AR_SREV_VERSION_9340: case AR_SREV_VERSION_9462: case AR_SREV_VERSION_9550: + case AR_SREV_VERSION_9565: break; default: ath_err(common, @@@ -714,7 -708,7 +714,7 @@@ int ath9k_hw_init(struct ath_hw *ah int ret; struct ath_common *common = ath9k_hw_common(ah);
- /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */ + /* These are all the AR5008/AR9001/AR9002/AR9003 hardware family of chipsets */ switch (ah->hw_version.devid) { case AR5416_DEVID_PCI: case AR5416_DEVID_PCIE: @@@ -734,7 -728,6 +734,7 @@@ case AR9300_DEVID_AR9580: case AR9300_DEVID_AR9462: case AR9485_DEVID_AR1111: + case AR9300_DEVID_AR9565: break; default: if (common->bus_ops->ath_bus_type == ATH_USB) @@@ -807,7 -800,8 +807,7 @@@ static void ath9k_hw_init_pll(struct at { u32 pll;
- if (AR_SREV_9485(ah)) { - + if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) { /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_PLL_PWD, 0x1); @@@ -918,8 -912,7 +918,8 @@@ }
pll = ath9k_hw_compute_pll_control(ah, chan); - + if (AR_SREV_9565(ah)) + pll |= 0x40000; REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) || @@@ -2041,7 -2034,7 +2041,7 @@@ static void ath9k_set_power_sleep(struc { REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
- if (AR_SREV_9462(ah)) { + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff); REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff); REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff); @@@ -2408,10 -2401,7 +2408,10 @@@ int ath9k_hw_fill_cap_info(struct ath_h if (eeval & AR5416_OPFLAGS_11G) pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
- if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah)) + if (AR_SREV_9485(ah) || + AR_SREV_9285(ah) || + AR_SREV_9330(ah) || + AR_SREV_9565(ah)) chip_chainmask = 1; else if (AR_SREV_9462(ah)) chip_chainmask = 3; @@@ -2499,7 -2489,7 +2499,7 @@@
if (AR_SREV_9300_20_OR_LATER(ah)) { pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK; - if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah)) + if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) && !AR_SREV_9565(ah)) pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH; @@@ -2507,10 -2497,6 +2507,6 @@@ pCap->rx_status_len = sizeof(struct ar9003_rxs); pCap->tx_desc_len = sizeof(struct ar9003_txc); pCap->txs_len = sizeof(struct ar9003_txs); - if (!ah->config.paprd_disable && - ah->eep_ops->get_eeprom(ah, EEP_PAPRD) && - !AR_SREV_9462(ah)) - pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; } else { pCap->tx_desc_len = sizeof(struct ath_desc); if (AR_SREV_9280_20(ah)) @@@ -2582,12 -2568,14 +2578,12 @@@ ah->enabled_cals |= TX_IQ_ON_AGC_CAL; }
- if (AR_SREV_9462(ah)) { - + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE)) pCap->hw_caps |= ATH9K_HW_CAP_MCI;
if (AR_SREV_9462_20(ah)) pCap->hw_caps |= ATH9K_HW_CAP_RTT; - }
@@@ -2753,7 -2741,7 +2749,7 @@@ void ath9k_hw_setrxfilter(struct ath_h
ENABLE_REGWRITE_BUFFER(ah);
- if (AR_SREV_9462(ah)) + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER;
REG_WRITE(ah, AR_RX_FILTER, bits); @@@ -3050,7 -3038,7 +3046,7 @@@ void ath9k_hw_gen_timer_start(struct at REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, gen_tmr_configuration[timer->index].mode_mask);
- if (AR_SREV_9462(ah)) { + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { /* * Starting from AR9462, each generic timer can select which tsf * to use. But we still follow the old rule, 0 - 7 use tsf and @@@ -3084,16 -3072,6 +3080,16 @@@ void ath9k_hw_gen_timer_stop(struct ath REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, gen_tmr_configuration[timer->index].mode_mask);
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { + /* + * Need to switch back to TSF if it was using TSF2. + */ + if ((timer->index >= AR_GEN_TIMER_BANK_1_LEN)) { + REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL, + (1 << timer->index)); + } + } + /* Disable both trigger and thresh interrupt masks */ REG_CLR_BIT(ah, AR_IMR_S5, (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | @@@ -3175,7 -3153,6 +3171,7 @@@ static struct { AR_SREV_VERSION_9485, "9485" }, { AR_SREV_VERSION_9462, "9462" }, { AR_SREV_VERSION_9550, "9550" }, + { AR_SREV_VERSION_9565, "9565" }, };
/* For devices with external radios */ diff --combined drivers/net/wireless/ath/ath9k/hw.h index 0d17ce0,de6968f..f0798cc --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@@ -50,7 -50,6 +50,7 @@@ #define AR9300_DEVID_AR9330 0x0035 #define AR9300_DEVID_QCA955X 0x0038 #define AR9485_DEVID_AR1111 0x0037 +#define AR9300_DEVID_AR9565 0x0036
#define AR5416_AR9100_DEVID 0x000b
@@@ -237,7 -236,6 +237,6 @@@ enum ath9k_hw_caps ATH9K_HW_CAP_LDPC = BIT(6), ATH9K_HW_CAP_FASTCLOCK = BIT(7), ATH9K_HW_CAP_SGI_20 = BIT(8), - ATH9K_HW_CAP_PAPRD = BIT(9), ATH9K_HW_CAP_ANT_DIV_COMB = BIT(10), ATH9K_HW_CAP_2GHZ = BIT(11), ATH9K_HW_CAP_5GHZ = BIT(12), @@@ -288,12 -286,12 +287,12 @@@ struct ath9k_ops_config u8 pcie_clock_req; u32 pcie_waen; u8 analog_shiftreg; - u8 paprd_disable; u32 ofdm_trig_low; u32 ofdm_trig_high; u32 cck_trig_high; u32 cck_trig_low; u32 enable_ani; + u32 enable_paprd; int serialize_regmode; bool rx_intr_mitigation; bool tx_intr_mitigation; diff --combined drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index e0b313c,7c4ee72..372f74e --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@@ -42,7 -42,6 +42,7 @@@
#define DMA_ALIGN_MASK 0x03
+#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324 #define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 #define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 #define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 @@@ -52,7 -51,6 +52,7 @@@
/* devices we support, null terminated */ static const struct sdio_device_id brcmf_sdmmc_ids[] = { + {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)}, {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)}, {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)}, {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)}, @@@ -640,6 -638,8 +640,8 @@@ static int brcmf_sdio_pd_probe(struct p
oobirq_entry = kzalloc(sizeof(struct brcmf_sdio_oobirq), GFP_KERNEL); + if (!oobirq_entry) + return -ENOMEM; oobirq_entry->irq = res->start; oobirq_entry->flags = res->flags & IRQF_TRIGGER_MASK; list_add_tail(&oobirq_entry->list, &oobirq_lh); diff --combined drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c index f6b862d,6f70953..8121dba --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c @@@ -205,8 -205,7 +205,8 @@@ brcmf_c_show_host_event(struct brcmf_ev BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, { BRCMF_E_IF, "IF"}, { BRCMF_E_RSSI, "RSSI"}, { - BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"} + BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}, { + BRCMF_E_ESCAN_RESULT, "ESCAN_RESULT"} }; uint event_type, flags, auth_type, datalen; static u32 seqnum_prev; @@@ -351,11 -350,6 +351,11 @@@ brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name); break;
+ case BRCMF_E_ESCAN_RESULT: + brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name); + datalen = 0; + break; + case BRCMF_E_PFN_NET_FOUND: case BRCMF_E_PFN_NET_LOST: case BRCMF_E_PFN_SCAN_COMPLETE: @@@ -770,8 -764,11 +770,11 @@@ static void brcmf_c_arp_offload_set(str { char iovbuf[32]; int retcode; + __le32 arp_mode_le;
- brcmf_c_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf)); + arp_mode_le = cpu_to_le32(arp_mode); + brcmf_c_mkiovar("arp_ol", (char *)&arp_mode_le, 4, iovbuf, + sizeof(iovbuf)); retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); retcode = retcode >= 0 ? 0 : retcode; @@@ -787,8 -784,11 +790,11 @@@ static void brcmf_c_arp_offload_enable( { char iovbuf[32]; int retcode; + __le32 arp_enable_le;
- brcmf_c_mkiovar("arpoe", (char *)&arp_enable, 4, + arp_enable_le = cpu_to_le32(arp_enable); + + brcmf_c_mkiovar("arpoe", (char *)&arp_enable_le, 4, iovbuf, sizeof(iovbuf)); retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); @@@ -806,10 -806,10 +812,10 @@@ int brcmf_c_preinit_dcmds(struct brcmf_ char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ char buf[128], *ptr; - u32 roaming = 1; - uint bcn_timeout = 3; - int scan_assoc_time = 40; - int scan_unassoc_time = 40; + __le32 roaming_le = cpu_to_le32(1); + __le32 bcn_timeout_le = cpu_to_le32(3); + __le32 scan_assoc_time_le = cpu_to_le32(40); + __le32 scan_unassoc_time_le = cpu_to_le32(40); int i; struct brcmf_bus_dcmd *cmdlst; struct list_head *cur, *q; @@@ -835,14 -835,14 +841,14 @@@
/* Setup timeout if Beacons are lost and roam is off to report link down */ - brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, + brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout_le, 4, iovbuf, sizeof(iovbuf)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
/* Enable/Disable build-in roaming to allowed ext supplicant to take of romaing */ - brcmf_c_mkiovar("roam_off", (char *)&roaming, 4, + brcmf_c_mkiovar("roam_off", (char *)&roaming_le, 4, iovbuf, sizeof(iovbuf)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); @@@ -854,9 -854,9 +860,9 @@@ sizeof(iovbuf));
brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME, - (char *)&scan_assoc_time, sizeof(scan_assoc_time)); + (char *)&scan_assoc_time_le, sizeof(scan_assoc_time_le)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME, - (char *)&scan_unassoc_time, sizeof(scan_unassoc_time)); + (char *)&scan_unassoc_time_le, sizeof(scan_unassoc_time_le));
/* Set and enable ARP offload feature */ brcmf_c_arp_offload_set(drvr, BRCMF_ARPOL_MODE); diff --combined drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 65cf8f9,50b5553..32ee052 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@@ -28,7 -28,6 +28,7 @@@ #include <linux/ieee80211.h> #include <linux/uaccess.h> #include <net/cfg80211.h> +#include <net/netlink.h>
#include <brcmu_utils.h> #include <defs.h> @@@ -490,8 -489,8 +490,8 @@@ static void brcmf_set_mpc(struct net_de } }
-static void wl_iscan_prep(struct brcmf_scan_params_le *params_le, - struct brcmf_ssid *ssid) +static void brcmf_iscan_prep(struct brcmf_scan_params_le *params_le, + struct brcmf_ssid *ssid) { memcpy(params_le->bssid, ether_bcast, ETH_ALEN); params_le->bss_type = DOT11_BSSTYPE_ANY; @@@ -501,8 -500,10 +501,10 @@@ params_le->active_time = cpu_to_le32(-1); params_le->passive_time = cpu_to_le32(-1); params_le->home_time = cpu_to_le32(-1); - if (ssid && ssid->SSID_len) - memcpy(¶ms_le->ssid_le, ssid, sizeof(struct brcmf_ssid)); + if (ssid && ssid->SSID_len) { + params_le->ssid_le.SSID_len = cpu_to_le32(ssid->SSID_len); + memcpy(¶ms_le->ssid_le.SSID, ssid->SSID, ssid->SSID_len); + } }
static s32 @@@ -545,7 -546,7 +547,7 @@@ brcmf_run_iscan(struct brcmf_cfg80211_i return -ENOMEM; BUG_ON(params_size >= BRCMF_DCMD_SMLEN);
- wl_iscan_prep(¶ms->params_le, ssid); + brcmf_iscan_prep(¶ms->params_le, ssid);
params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION); params->action = cpu_to_le16(action); @@@ -598,9 -599,9 +600,9 @@@ static s32 brcmf_do_iscan(struct brcmf_ }
static s32 -__brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, - struct cfg80211_scan_request *request, - struct cfg80211_ssid *this_ssid) +brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request, + struct cfg80211_ssid *this_ssid) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct cfg80211_ssid *ssids; @@@ -691,342 -692,11 +693,342 @@@ scan_out return err; }
+static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le, + struct cfg80211_scan_request *request) +{ + u32 n_ssids; + u32 n_channels; + s32 i; + s32 offset; + __le16 chanspec; + u16 channel; + struct ieee80211_channel *req_channel; + char *ptr; + struct brcmf_ssid ssid; + + memcpy(params_le->bssid, ether_bcast, ETH_ALEN); + params_le->bss_type = DOT11_BSSTYPE_ANY; + params_le->scan_type = 0; + params_le->channel_num = 0; + params_le->nprobes = cpu_to_le32(-1); + params_le->active_time = cpu_to_le32(-1); + params_le->passive_time = cpu_to_le32(-1); + params_le->home_time = cpu_to_le32(-1); + memset(¶ms_le->ssid_le, 0, sizeof(params_le->ssid_le)); + + /* if request is null exit so it will be all channel broadcast scan */ + if (!request) + return; + + n_ssids = request->n_ssids; + n_channels = request->n_channels; + /* Copy channel array if applicable */ + WL_SCAN("### List of channelspecs to scan ### %d\n", n_channels); + if (n_channels > 0) { + for (i = 0; i < n_channels; i++) { + chanspec = 0; + req_channel = request->channels[i]; + channel = ieee80211_frequency_to_channel( + req_channel->center_freq); + if (req_channel->band == IEEE80211_BAND_2GHZ) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + if (req_channel->flags & IEEE80211_CHAN_NO_HT40) { + chanspec |= WL_CHANSPEC_BW_20; + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + } else { + chanspec |= WL_CHANSPEC_BW_40; + if (req_channel->flags & + IEEE80211_CHAN_NO_HT40PLUS) + chanspec |= WL_CHANSPEC_CTL_SB_LOWER; + else + chanspec |= WL_CHANSPEC_CTL_SB_UPPER; + } + + params_le->channel_list[i] = + (channel & WL_CHANSPEC_CHAN_MASK) | + chanspec; + WL_SCAN("Chan : %d, Channel spec: %x\n", + channel, params_le->channel_list[i]); + params_le->channel_list[i] = + cpu_to_le16(params_le->channel_list[i]); + } + } else { + WL_SCAN("Scanning all channels\n"); + } + /* Copy ssid array if applicable */ + WL_SCAN("### List of SSIDs to scan ### %d\n", n_ssids); + if (n_ssids > 0) { + offset = offsetof(struct brcmf_scan_params_le, channel_list) + + n_channels * sizeof(u16); + offset = roundup(offset, sizeof(u32)); + ptr = (char *)params_le + offset; + for (i = 0; i < n_ssids; i++) { + memset(&ssid, 0, sizeof(ssid)); + ssid.SSID_len = cpu_to_le32(request->ssids[i].ssid_len); + memcpy(ssid.SSID, request->ssids[i].ssid, + request->ssids[i].ssid_len); + if (!ssid.SSID_len) + WL_SCAN("%d: Broadcast scan\n", i); + else + WL_SCAN("%d: scan for %s size =%d\n", i, + ssid.SSID, ssid.SSID_len); + memcpy(ptr, &ssid, sizeof(ssid)); + ptr += sizeof(ssid); + } + } else { + WL_SCAN("Broadcast scan %p\n", request->ssids); + if ((request->ssids) && request->ssids->ssid_len) { + WL_SCAN("SSID %s len=%d\n", params_le->ssid_le.SSID, + request->ssids->ssid_len); + params_le->ssid_le.SSID_len = + cpu_to_le32(request->ssids->ssid_len); + memcpy(¶ms_le->ssid_le.SSID, request->ssids->ssid, + request->ssids->ssid_len); + } + } + /* Adding mask to channel numbers */ + params_le->channel_num = + cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) | + (n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK)); +} + +static s32 +brcmf_notify_escan_complete(struct brcmf_cfg80211_priv *cfg_priv, + struct net_device *ndev, + bool aborted, bool fw_abort) +{ + struct brcmf_scan_params_le params_le; + struct cfg80211_scan_request *scan_request; + s32 err = 0; + + WL_SCAN("Enter\n"); + + /* clear scan request, because the FW abort can cause a second call */ + /* to this functon and might cause a double cfg80211_scan_done */ + scan_request = cfg_priv->scan_request; + cfg_priv->scan_request = NULL; + + if (timer_pending(&cfg_priv->escan_timeout)) + del_timer_sync(&cfg_priv->escan_timeout); + + if (fw_abort) { + /* Do a scan abort to stop the driver's scan engine */ + WL_SCAN("ABORT scan in firmware\n"); + memset(¶ms_le, 0, sizeof(params_le)); + memcpy(params_le.bssid, ether_bcast, ETH_ALEN); + params_le.bss_type = DOT11_BSSTYPE_ANY; + params_le.scan_type = 0; + params_le.channel_num = cpu_to_le32(1); + params_le.nprobes = cpu_to_le32(1); + params_le.active_time = cpu_to_le32(-1); + params_le.passive_time = cpu_to_le32(-1); + params_le.home_time = cpu_to_le32(-1); + /* Scan is aborted by setting channel_list[0] to -1 */ + params_le.channel_list[0] = cpu_to_le16(-1); + /* E-Scan (or anyother type) can be aborted by SCAN */ + err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, ¶ms_le, + sizeof(params_le)); + if (err) + WL_ERR("Scan abort failed\n"); + } + if (scan_request) { + WL_SCAN("ESCAN Completed scan: %s\n", + aborted ? "Aborted" : "Done"); + cfg80211_scan_done(scan_request, aborted); + brcmf_set_mpc(ndev, 1); + } + if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { + WL_ERR("Scan complete while device not scanning\n"); + return -EPERM; + } + + return err; +} + +static s32 +brcmf_run_escan(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, + struct cfg80211_scan_request *request, u16 action) +{ + s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE + + offsetof(struct brcmf_escan_params_le, params_le); + struct brcmf_escan_params_le *params; + s32 err = 0; + + WL_SCAN("E-SCAN START\n"); + + if (request != NULL) { + /* Allocate space for populating ssids in struct */ + params_size += sizeof(u32) * ((request->n_channels + 1) / 2); + + /* Allocate space for populating ssids in struct */ + params_size += sizeof(struct brcmf_ssid) * request->n_ssids; + } + + params = kzalloc(params_size, GFP_KERNEL); + if (!params) { + err = -ENOMEM; + goto exit; + } + BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN); + brcmf_escan_prep(¶ms->params_le, request); + params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION); + params->action = cpu_to_le16(action); + params->sync_id = cpu_to_le16(0x1234); + + err = brcmf_dev_iovar_setbuf(ndev, "escan", params, params_size, + cfg_priv->escan_ioctl_buf, BRCMF_DCMD_MEDLEN); + if (err) { + if (err == -EBUSY) + WL_INFO("system busy : escan canceled\n"); + else + WL_ERR("error (%d)\n", err); + } + + kfree(params); +exit: + return err; +} + +static s32 +brcmf_do_escan(struct brcmf_cfg80211_priv *cfg_priv, struct wiphy *wiphy, + struct net_device *ndev, struct cfg80211_scan_request *request) +{ + s32 err; + __le32 passive_scan; + struct brcmf_scan_results *results; + + WL_SCAN("Enter\n"); + cfg_priv->escan_info.ndev = ndev; + cfg_priv->escan_info.wiphy = wiphy; + cfg_priv->escan_info.escan_state = WL_ESCAN_STATE_SCANNING; + passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1); + err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN, + &passive_scan, sizeof(passive_scan)); + if (err) { + WL_ERR("error (%d)\n", err); + return err; + } + brcmf_set_mpc(ndev, 0); + results = (struct brcmf_scan_results *)cfg_priv->escan_info.escan_buf; + results->version = 0; + results->count = 0; + results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE; + + err = brcmf_run_escan(cfg_priv, ndev, request, WL_ESCAN_ACTION_START); + if (err) + brcmf_set_mpc(ndev, 1); + return err; +} + +static s32 +brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request, + struct cfg80211_ssid *this_ssid) +{ + struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); + struct cfg80211_ssid *ssids; + struct brcmf_cfg80211_scan_req *sr = cfg_priv->scan_req_int; + __le32 passive_scan; + bool escan_req; + bool spec_scan; + s32 err; + u32 SSID_len; + + WL_SCAN("START ESCAN\n"); + + if (test_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { + WL_ERR("Scanning already : status (%lu)\n", cfg_priv->status); + return -EAGAIN; + } + if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status)) { + WL_ERR("Scanning being aborted : status (%lu)\n", + cfg_priv->status); + return -EAGAIN; + } + if (test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) { + WL_ERR("Connecting : status (%lu)\n", + cfg_priv->status); + return -EAGAIN; + } + + /* Arm scan timeout timer */ + mod_timer(&cfg_priv->escan_timeout, jiffies + + WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000); + + escan_req = false; + if (request) { + /* scan bss */ + ssids = request->ssids; + escan_req = true; + } else { + /* scan in ibss */ + /* we don't do escan in ibss */ + ssids = this_ssid; + } + + cfg_priv->scan_request = request; + set_bit(WL_STATUS_SCANNING, &cfg_priv->status); + if (escan_req) { + err = brcmf_do_escan(cfg_priv, wiphy, ndev, request); + if (!err) + return err; + else + goto scan_out; + } else { + WL_SCAN("ssid "%s", ssid_len (%d)\n", + ssids->ssid, ssids->ssid_len); + memset(&sr->ssid_le, 0, sizeof(sr->ssid_le)); + SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len); + sr->ssid_le.SSID_len = cpu_to_le32(0); + spec_scan = false; + if (SSID_len) { + memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len); + sr->ssid_le.SSID_len = cpu_to_le32(SSID_len); + spec_scan = true; + } else + WL_SCAN("Broadcast scan\n"); + + passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1); + err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN, + &passive_scan, sizeof(passive_scan)); + if (err) { + WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err); + goto scan_out; + } + brcmf_set_mpc(ndev, 0); + err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le, + sizeof(sr->ssid_le)); + if (err) { + if (err == -EBUSY) + WL_INFO("BUSY: scan for "%s" canceled\n", + sr->ssid_le.SSID); + else + WL_ERR("WLC_SCAN error (%d)\n", err); + + brcmf_set_mpc(ndev, 1); + goto scan_out; + } + } + + return 0; + +scan_out: + clear_bit(WL_STATUS_SCANNING, &cfg_priv->status); + if (timer_pending(&cfg_priv->escan_timeout)) + del_timer_sync(&cfg_priv->escan_timeout); + cfg_priv->scan_request = NULL; + return err; +} + static s32 brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct net_device *ndev = request->wdev->netdev; + struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); s32 err = 0;
WL_TRACE("Enter\n"); @@@ -1034,11 -704,7 +1036,11 @@@ if (!check_sys_up(wiphy)) return -EIO;
- err = __brcmf_cfg80211_scan(wiphy, ndev, request, NULL); + if (cfg_priv->iscan_on) + err = brcmf_cfg80211_iscan(wiphy, ndev, request, NULL); + else if (cfg_priv->escan_on) + err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL); + if (err) WL_ERR("scan error (%d)\n", err);
@@@ -2807,175 -2473,6 +2809,175 @@@ static s32 brcmf_init_iscan(struct brcm return err; }
+static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work) +{ + struct brcmf_cfg80211_priv *cfg_priv = + container_of(work, struct brcmf_cfg80211_priv, + escan_timeout_work); + + brcmf_notify_escan_complete(cfg_priv, + cfg_priv->escan_info.ndev, true, true); +} + +static void brcmf_escan_timeout(unsigned long data) +{ + struct brcmf_cfg80211_priv *cfg_priv = + (struct brcmf_cfg80211_priv *)data; + + if (cfg_priv->scan_request) { + WL_ERR("timer expired\n"); + if (cfg_priv->escan_on) + schedule_work(&cfg_priv->escan_timeout_work); + } +} + +static s32 +brcmf_compare_update_same_bss(struct brcmf_bss_info_le *bss, + struct brcmf_bss_info_le *bss_info_le) +{ + if (!memcmp(&bss_info_le->BSSID, &bss->BSSID, ETH_ALEN) && + (CHSPEC_BAND(le16_to_cpu(bss_info_le->chanspec)) == + CHSPEC_BAND(le16_to_cpu(bss->chanspec))) && + bss_info_le->SSID_len == bss->SSID_len && + !memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) { + if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) == + (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL)) { + /* preserve max RSSI if the measurements are + * both on-channel or both off-channel + */ + if (bss_info_le->RSSI > bss->RSSI) + bss->RSSI = bss_info_le->RSSI; + } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) && + (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) { + /* preserve the on-channel rssi measurement + * if the new measurement is off channel + */ + bss->RSSI = bss_info_le->RSSI; + bss->flags |= WLC_BSS_RSSI_ON_CHANNEL; + } + return 1; + } + return 0; +} + +static s32 +brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_priv *cfg_priv, + struct net_device *ndev, + const struct brcmf_event_msg *e, void *data) +{ + s32 status; + s32 err = 0; + struct brcmf_escan_result_le *escan_result_le; + struct brcmf_bss_info_le *bss_info_le; + struct brcmf_bss_info_le *bss = NULL; + u32 bi_length; + struct brcmf_scan_results *list; + u32 i; + + status = be32_to_cpu(e->status); + + if (!ndev || !cfg_priv->escan_on || + !test_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { + WL_ERR("scan not ready ndev %p wl->escan_on %d drv_status %x\n", + ndev, cfg_priv->escan_on, + !test_bit(WL_STATUS_SCANNING, &cfg_priv->status)); + return -EPERM; + } + + if (status == BRCMF_E_STATUS_PARTIAL) { + WL_SCAN("ESCAN Partial result\n"); + escan_result_le = (struct brcmf_escan_result_le *) data; + if (!escan_result_le) { + WL_ERR("Invalid escan result (NULL pointer)\n"); + goto exit; + } + if (!cfg_priv->scan_request) { + WL_SCAN("result without cfg80211 request\n"); + goto exit; + } + + if (le16_to_cpu(escan_result_le->bss_count) != 1) { + WL_ERR("Invalid bss_count %d: ignoring\n", + escan_result_le->bss_count); + goto exit; + } + bss_info_le = &escan_result_le->bss_info_le; + + bi_length = le32_to_cpu(bss_info_le->length); + if (bi_length != (le32_to_cpu(escan_result_le->buflen) - + WL_ESCAN_RESULTS_FIXED_SIZE)) { + WL_ERR("Invalid bss_info length %d: ignoring\n", + bi_length); + goto exit; + } + + if (!(cfg_to_wiphy(cfg_priv)->interface_modes & + BIT(NL80211_IFTYPE_ADHOC))) { + if (le16_to_cpu(bss_info_le->capability) & + WLAN_CAPABILITY_IBSS) { + WL_ERR("Ignoring IBSS result\n"); + goto exit; + } + } + + list = (struct brcmf_scan_results *) + cfg_priv->escan_info.escan_buf; + if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) { + WL_ERR("Buffer is too small: ignoring\n"); + goto exit; + } + + for (i = 0; i < list->count; i++) { + bss = bss ? (struct brcmf_bss_info_le *) + ((unsigned char *)bss + + le32_to_cpu(bss->length)) : list->bss_info_le; + if (brcmf_compare_update_same_bss(bss, bss_info_le)) + goto exit; + } + memcpy(&(cfg_priv->escan_info.escan_buf[list->buflen]), + bss_info_le, bi_length); + list->version = le32_to_cpu(bss_info_le->version); + list->buflen += bi_length; + list->count++; + } else { + cfg_priv->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + if (cfg_priv->scan_request) { + cfg_priv->bss_list = (struct brcmf_scan_results *) + cfg_priv->escan_info.escan_buf; + brcmf_inform_bss(cfg_priv); + if (status == BRCMF_E_STATUS_SUCCESS) { + WL_SCAN("ESCAN Completed\n"); + brcmf_notify_escan_complete(cfg_priv, ndev, + false, false); + } else { + WL_ERR("ESCAN Aborted, Event 0x%x\n", status); + brcmf_notify_escan_complete(cfg_priv, ndev, + true, false); + } + brcmf_set_mpc(ndev, 1); + } else + WL_ERR("Unexpected scan result 0x%x\n", status); + } +exit: + return err; +} + +static void brcmf_init_escan(struct brcmf_cfg80211_priv *cfg_priv) +{ + + if (cfg_priv->escan_on) { + cfg_priv->el.handler[BRCMF_E_ESCAN_RESULT] = + brcmf_cfg80211_escan_handler; + cfg_priv->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + /* Init scan_timeout timer */ + init_timer(&cfg_priv->escan_timeout); + cfg_priv->escan_timeout.data = (unsigned long) cfg_priv; + cfg_priv->escan_timeout.function = brcmf_escan_timeout; + INIT_WORK(&cfg_priv->escan_timeout_work, + brcmf_cfg80211_escan_timeout_worker); + } +} + static __always_inline void brcmf_delay(u32 ms) { if (ms < 1000 / HZ) { @@@ -3051,8 -2548,10 +3053,8 @@@ static s32 brcmf_cfg80211_suspend(struc clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
/* Turn off watchdog timer */ - if (test_bit(WL_STATUS_READY, &cfg_priv->status)) { - WL_INFO("Enable MPC\n"); + if (test_bit(WL_STATUS_READY, &cfg_priv->status)) brcmf_set_mpc(ndev, 1); - }
WL_TRACE("Exit\n");
@@@ -3227,25 -2726,6 +3229,25 @@@ brcmf_cfg80211_flush_pmksa(struct wiph
}
+#ifdef CONFIG_NL80211_TESTMODE +static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len) +{ + struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); + struct net_device *ndev = cfg_priv->wdev->netdev; + struct brcmf_dcmd *dcmd = data; + struct sk_buff *reply; + int ret; + + ret = brcmf_netlink_dcmd(ndev, dcmd); + if (ret == 0) { + reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd)); + nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd); + ret = cfg80211_testmode_reply(reply); + } + return ret; +} +#endif + static struct cfg80211_ops wl_cfg80211_ops = { .change_virtual_intf = brcmf_cfg80211_change_iface, .scan = brcmf_cfg80211_scan, @@@ -3268,10 -2748,7 +3270,10 @@@ .resume = brcmf_cfg80211_resume, .set_pmksa = brcmf_cfg80211_set_pmksa, .del_pmksa = brcmf_cfg80211_del_pmksa, - .flush_pmksa = brcmf_cfg80211_flush_pmksa + .flush_pmksa = brcmf_cfg80211_flush_pmksa, +#ifdef CONFIG_NL80211_TESTMODE + .testmode_cmd = brcmf_cfg80211_testmode +#endif };
static s32 brcmf_mode_to_nl80211_iftype(s32 mode) @@@ -3696,8 -3173,10 +3698,8 @@@ brcmf_notify_scan_status(struct brcmf_c cfg_priv->scan_results->count = le32_to_cpu(bss_list_le->count);
err = brcmf_inform_bss(cfg_priv); - if (err) { + if (err) scan_abort = true; - goto scan_done_out; - }
scan_done_out: if (cfg_priv->scan_request) { @@@ -3744,8 -3223,6 +3746,8 @@@ static void brcmf_deinit_priv_mem(struc cfg_priv->profile = NULL; kfree(cfg_priv->scan_req_int); cfg_priv->scan_req_int = NULL; + kfree(cfg_priv->escan_ioctl_buf); + cfg_priv->escan_ioctl_buf = NULL; kfree(cfg_priv->dcmd_buf); cfg_priv->dcmd_buf = NULL; kfree(cfg_priv->extra_buf); @@@ -3774,9 -3251,6 +3776,9 @@@ static s32 brcmf_init_priv_mem(struct b GFP_KERNEL); if (!cfg_priv->scan_req_int) goto init_priv_mem_out; + cfg_priv->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL); + if (!cfg_priv->escan_ioctl_buf) + goto init_priv_mem_out; cfg_priv->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL); if (!cfg_priv->dcmd_buf) goto init_priv_mem_out; @@@ -3826,28 -3300,18 +3828,28 @@@ static struct brcmf_cfg80211_event_q *b
static s32 brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event, - const struct brcmf_event_msg *msg) + const struct brcmf_event_msg *msg, void *data) { struct brcmf_cfg80211_event_q *e; s32 err = 0; ulong flags; + u32 data_len; + u32 total_len;
- e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_ATOMIC); + total_len = sizeof(struct brcmf_cfg80211_event_q); + if (data) + data_len = be32_to_cpu(msg->datalen); + else + data_len = 0; + total_len += data_len; + e = kzalloc(total_len, GFP_ATOMIC); if (!e) return -ENOMEM;
e->etype = event; memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg)); + if (data) + memcpy(&e->edata, data, data_len);
spin_lock_irqsave(&cfg_priv->evt_q_lock, flags); list_add_tail(&e->evt_q_list, &cfg_priv->evt_q_list); @@@ -3913,17 -3377,8 +3915,17 @@@ static s32 wl_init_priv(struct brcmf_cf
cfg_priv->scan_request = NULL; cfg_priv->pwr_save = true; +#ifdef CONFIG_BRCMISCAN cfg_priv->iscan_on = true; /* iscan on & off switch. we enable iscan per default */ + cfg_priv->escan_on = false; /* escan on & off switch. + we disable escan per default */ +#else + cfg_priv->iscan_on = false; /* iscan on & off switch. + we disable iscan per default */ + cfg_priv->escan_on = true; /* escan on & off switch. + we enable escan per default */ +#endif cfg_priv->roam_on = true; /* roam on & off switch. we enable roam per default */
@@@ -3941,7 -3396,6 +3943,7 @@@ err = brcmf_init_iscan(cfg_priv); if (err) return err; + brcmf_init_escan(cfg_priv); brcmf_init_conf(cfg_priv->conf); brcmf_init_prof(cfg_priv->profile); brcmf_link_down(cfg_priv); @@@ -4026,7 -3480,7 +4028,7 @@@ brcmf_cfg80211_event(struct net_device u32 event_type = be32_to_cpu(e->event_type); struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
- if (!brcmf_enq_event(cfg_priv, event_type, e)) + if (!brcmf_enq_event(cfg_priv, event_type, e, data)) schedule_work(&cfg_priv->event_work); }
@@@ -4100,7 -3554,6 +4102,7 @@@ static s32 brcmf_dongle_eventmsg(struc setbit(eventmask, BRCMF_E_TXFAIL); setbit(eventmask, BRCMF_E_JOIN_START); setbit(eventmask, BRCMF_E_SCAN_COMPLETE); + setbit(eventmask, BRCMF_E_ESCAN_RESULT);
brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); diff --combined drivers/net/wireless/iwlwifi/pcie/trans.c index 8488511,dbeebef..76aa503 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@@ -492,11 -492,10 +492,11 @@@ static void iwl_tx_queue_free(struct iw iwl_tx_queue_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */ - if (txq_id == trans_pcie->cmd_queue) - for (i = 0; i < txq->q.n_window; i++) + for (i = 0; i < txq->q.n_window; i++) { kfree(txq->entries[i].cmd); + kfree(txq->entries[i].copy_cmd); + }
/* De-alloc circular buffer of TFDs */ if (txq->q.n_bd) { @@@ -897,7 -896,6 +897,7 @@@ static int iwl_set_hw_ready(struct iwl_ static int iwl_prepare_card_hw(struct iwl_trans *trans) { int ret; + int t = 0;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
@@@ -910,15 -908,17 +910,15 @@@ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
- ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, - ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, - CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); + do { + ret = iwl_set_hw_ready(trans); + if (ret >= 0) + return 0;
- if (ret < 0) - return ret; + usleep_range(200, 1000); + t += 200; + } while (t < 150000);
- /* HW should be ready by now, check again. */ - ret = iwl_set_hw_ready(trans); - if (ret >= 0) - return 0; return ret; }
@@@ -1442,6 -1442,7 +1442,7 @@@ static int iwl_trans_pcie_start_hw(stru return err;
err_free_irq: + trans_pcie->irq_requested = false; free_irq(trans_pcie->irq, trans); error: iwl_free_isr_ict(trans); @@@ -1771,7 -1772,7 +1772,7 @@@ void iwl_dump_csr(struct iwl_trans *tra #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ if (!debugfs_create_file(#name, mode, parent, trans, \ &iwl_dbgfs_##name##_ops)) \ - return -ENOMEM; \ + goto err; \ } while (0)
/* file operation */ @@@ -2035,10 -2036,6 +2036,10 @@@ static int iwl_trans_pcie_dbgfs_registe DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR); return 0; + +err: + IWL_ERR(trans, "failed to create the trans debugfs entry\n"); + return -ENOMEM; } #else static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, diff --combined drivers/net/wireless/rtlwifi/rtl8192ce/hw.c index cc89582,dd4bb09..86d73b3 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c @@@ -994,8 -994,16 +994,16 @@@ static enum version_8192c _rtl92ce_read version = (value32 & TYPE_ID) ? VERSION_A_CHIP_92C : VERSION_A_CHIP_88C; } else { - version = (value32 & TYPE_ID) ? VERSION_B_CHIP_92C : - VERSION_B_CHIP_88C; + version = (enum version_8192c) (CHIP_VER_B | + ((value32 & TYPE_ID) ? CHIP_92C_BITMASK : 0) | + ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0)); + if ((!IS_CHIP_VENDOR_UMC(version)) && (value32 & + CHIP_VER_RTL_MASK)) { + version = (enum version_8192c)(version | + ((((value32 & CHIP_VER_RTL_MASK) == BIT(12)) + ? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) | + CHIP_VENDOR_UMC)); + } }
switch (version) { @@@ -1906,8 -1914,8 +1914,8 @@@ static void rtl92ce_update_hal_rate_mas } RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n", ratr_bitmap); - *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) | - (ratr_index << 28)); + *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | + (ratr_index << 28); rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n", diff --combined drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index 60451ee,7d8f964..ea2e1bd --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@@ -162,10 -162,12 +162,12 @@@ int rtl92c_init_sw_vars(struct ieee8021
/* request fw */ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && - !IS_92C_SERIAL(rtlhal->version)) + !IS_92C_SERIAL(rtlhal->version)) { rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin"; - else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) + } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) { rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin"; + pr_info("****** This B_CUT device may not work with kernels 3.6 and earlier\n"); + }
rtlpriv->max_fw_size = 0x4000; pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); @@@ -342,7 -344,7 +344,7 @@@ static struct rtl_hal_cfg rtl92ce_hal_c .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, };
-DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = { +static DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)}, diff --combined include/linux/sched.h index a8e2413,23bddac..439d35a --- a/include/linux/sched.h +++ b/include/linux/sched.h @@@ -954,7 -954,6 +954,6 @@@ struct sched_domain unsigned int smt_gain; int flags; /* See SD_* */ int level; - int idle_buddy; /* cpu assigned to select_idle_sibling() */
/* Runtime fields. */ unsigned long last_balance; /* init to jiffies. units in jiffies */ @@@ -1530,9 -1529,6 +1529,9 @@@ struct task_struct * cache last used pipe for splice */ struct pipe_inode_info *splice_pipe; + + struct page_frag task_frag; + #ifdef CONFIG_TASK_DELAY_ACCT struct task_delay_info *delays; #endif diff --combined include/net/ip6_fib.h index cd64cf3,9fc7114..8a2a203 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@@ -37,7 -37,6 +37,7 @@@ struct fib6_config int fc_ifindex; u32 fc_flags; u32 fc_protocol; + u32 fc_type; /* only 8 bits are used */
struct in6_addr fc_dst; struct in6_addr fc_src; @@@ -112,9 -111,8 +112,8 @@@ struct rt6_info struct inet6_dev *rt6i_idev; unsigned long _rt6i_peer;
- #ifdef CONFIG_XFRM - u32 rt6i_flow_cache_genid; - #endif + u32 rt6i_genid; + /* more non-fragment space at head required */ unsigned short rt6i_nfheader_len;
diff --combined include/net/net_namespace.h index d61e2b3,fd87963..4faf661 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@@ -15,7 -15,6 +15,7 @@@ #include <net/netns/packet.h> #include <net/netns/ipv4.h> #include <net/netns/ipv6.h> +#include <net/netns/sctp.h> #include <net/netns/dccp.h> #include <net/netns/x_tables.h> #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) @@@ -67,7 -66,6 +67,7 @@@ struct net struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; /* protected by rtnl_mutex */ + int ifindex;
/* core fib_rules */ struct list_head rules_ops; @@@ -82,9 -80,6 +82,9 @@@ #if IS_ENABLED(CONFIG_IPV6) struct netns_ipv6 ipv6; #endif +#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE) + struct netns_sctp sctp; +#endif #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) struct netns_dccp dccp; #endif @@@ -93,9 -88,6 +93,9 @@@ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct netns_ct ct; #endif +#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) + struct netns_nf_frag nf_frag; +#endif struct sock *nfnl; struct sock *nfnl_stash; #endif @@@ -110,15 -102,9 +110,16 @@@ #endif struct netns_ipvs *ipvs; struct sock *diag_nlsk; + atomic_t rt_genid; };
+/* + * ifindex generation is per-net namespace, and loopback is + * always the 1st device in ns (see net_dev_init), thus any + * loopback device should get ifindex 1 + */ + +#define LOOPBACK_IFINDEX 1
#include <linux/seq_file_net.h>
@@@ -315,5 -301,14 +316,14 @@@ static inline void unregister_net_sysct } #endif
+ static inline int rt_genid(struct net *net) + { + return atomic_read(&net->rt_genid); + } + + static inline void rt_genid_bump(struct net *net) + { + atomic_inc(&net->rt_genid); + }
#endif /* __NET_NET_NAMESPACE_H */ diff --combined include/net/netns/ipv4.h index 7d00583,eb24dbc..2ae2b83 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@@ -5,7 -5,6 +5,7 @@@ #ifndef __NETNS_IPV4_H__ #define __NETNS_IPV4_H__
+#include <linux/uidgid.h> #include <net/inet_frag.h>
struct tcpm_hash_bucket; @@@ -52,6 -51,8 +52,6 @@@ struct netns_ipv4 struct xt_table *iptable_security; #endif struct xt_table *nat_table; - struct hlist_head *nat_bysource; - unsigned int nat_htable_size; #endif
int sysctl_icmp_echo_ignore_all; @@@ -61,10 -62,9 +61,9 @@@ int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr;
- unsigned int sysctl_ping_group_range[2]; + kgid_t sysctl_ping_group_range[2]; long sysctl_tcp_mem[3];
- atomic_t rt_genid; atomic_t dev_addr_genid;
#ifdef CONFIG_IP_MROUTE diff --combined include/net/sock.h index bc476a1,adb7da2..c917556 --- a/include/net/sock.h +++ b/include/net/sock.h @@@ -247,7 -247,8 +247,7 @@@ struct cg_proto * @sk_stamp: time stamp of last packet received * @sk_socket: Identd and reporting IO signals * @sk_user_data: RPC layer private data - * @sk_sndmsg_page: cached page for sendmsg - * @sk_sndmsg_off: cached offset for sendmsg + * @sk_frag: cached page frag * @sk_peek_off: current peek_offset value * @sk_send_head: front of stuff to transmit * @sk_security: used by security modules @@@ -361,8 -362,9 +361,8 @@@ struct sock ktime_t sk_stamp; struct socket *sk_socket; void *sk_user_data; - struct page *sk_sndmsg_page; + struct page_frag sk_frag; struct sk_buff *sk_send_head; - __u32 sk_sndmsg_off; __s32 sk_peek_off; int sk_write_pending; #ifdef CONFIG_SECURITY @@@ -604,15 -606,6 +604,15 @@@ static inline void sk_add_bind_node(str #define sk_for_each_bound(__sk, node, list) \ hlist_for_each_entry(__sk, node, list, sk_bind_node)
+static inline struct user_namespace *sk_user_ns(struct sock *sk) +{ + /* Careful only use this in a context where these parameters + * can not change and must all be valid, such as recvmsg from + * userspace. + */ + return sk->sk_socket->file->f_cred->user_ns; +} + /* Sock flags */ enum sock_flags { SOCK_DEAD, @@@ -1339,7 -1332,7 +1339,7 @@@ static inline bool sk_wmem_schedule(str }
static inline bool - sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size) + sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) { if (!sk_has_account(sk)) return true; @@@ -1677,7 -1670,7 +1677,7 @@@ static inline void sock_graft(struct so write_unlock_bh(&sk->sk_callback_lock); }
-extern int sock_i_uid(struct sock *sk); +extern kuid_t sock_i_uid(struct sock *sk); extern unsigned long sock_i_ino(struct sock *sk);
static inline struct dst_entry * @@@ -2032,23 -2025,18 +2032,23 @@@ static inline void sk_stream_moderate_s
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
-static inline struct page *sk_stream_alloc_page(struct sock *sk) +/** + * sk_page_frag - return an appropriate page_frag + * @sk: socket + * + * If socket allocation mode allows current thread to sleep, it means its + * safe to use the per task page_frag instead of the per socket one. + */ +static inline struct page_frag *sk_page_frag(struct sock *sk) { - struct page *page = NULL; + if (sk->sk_allocation & __GFP_WAIT) + return ¤t->task_frag;
- page = alloc_pages(sk->sk_allocation, 0); - if (!page) { - sk_enter_memory_pressure(sk); - sk_stream_moderate_sndbuf(sk); - } - return page; + return &sk->sk_frag; }
+extern bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); + /* * Default write policy as shown to user space via poll/select/SIGIO */ @@@ -2229,6 -2217,8 +2229,6 @@@ extern int net_msg_warn extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max;
-extern void sk_init(void); - extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default; diff --combined kernel/pid_namespace.c index baa528d,6144bab..478bad2 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@@ -16,7 -16,6 +16,7 @@@ #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/reboot.h> +#include <linux/export.h>
#define BITS_PER_PAGE (PAGE_SIZE*8)
@@@ -145,7 -144,6 +145,7 @@@ void free_pid_ns(struct kref *kref if (parent != NULL) put_pid_ns(parent); } +EXPORT_SYMBOL_GPL(free_pid_ns);
void zap_pid_ns_processes(struct pid_namespace *pid_ns) { @@@ -234,15 -232,19 +234,19 @@@ static int pid_ns_ctl_handler(struct ct */
tmp.data = ¤t->nsproxy->pid_ns->last_pid; - return proc_dointvec(&tmp, write, buffer, lenp, ppos); + return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); }
+ extern int pid_max; + static int zero = 0; static struct ctl_table pid_ns_ctl_table[] = { { .procname = "ns_last_pid", .maxlen = sizeof(int), .mode = 0666, /* permissions are checked in the handler */ .proc_handler = pid_ns_ctl_handler, + .extra1 = &zero, + .extra2 = &pid_max, }, { } }; diff --combined net/batman-adv/bat_iv_ogm.c index df79300,469daab..b02b75d --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@@ -166,15 -166,13 +166,15 @@@ static void batadv_iv_ogm_send_to_if(st int16_t buff_pos; struct batadv_ogm_packet *batadv_ogm_packet; struct sk_buff *skb; + uint8_t *packet_pos;
if (hard_iface->if_status != BATADV_IF_ACTIVE) return;
packet_num = 0; buff_pos = 0; - batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data; + packet_pos = forw_packet->skb->data; + batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
/* adjust all flags and log packets */ while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, @@@ -183,17 -181,15 +183,17 @@@ /* we might have aggregated direct link packets with an * ordinary base packet */ - if ((forw_packet->direct_link_flags & (1 << packet_num)) && - (forw_packet->if_incoming == hard_iface)) + if (forw_packet->direct_link_flags & BIT(packet_num) && + forw_packet->if_incoming == hard_iface) batadv_ogm_packet->flags |= BATADV_DIRECTLINK; else batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
- fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ? - "Sending own" : - "Forwarding")); + if (packet_num > 0 || !forw_packet->own) + fwd_str = "Forwarding"; + else + fwd_str = "Sending own"; + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", fwd_str, (packet_num > 0 ? "aggregated " : ""), @@@ -208,8 -204,8 +208,8 @@@ buff_pos += BATADV_OGM_HLEN; buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes); packet_num++; - batadv_ogm_packet = (struct batadv_ogm_packet *) - (forw_packet->skb->data + buff_pos); + packet_pos = forw_packet->skb->data + buff_pos; + batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; }
/* create clone because function is called more than once */ @@@ -231,10 -227,9 +231,10 @@@ static void batadv_iv_ogm_emit(struct b struct batadv_hard_iface *primary_if = NULL; struct batadv_ogm_packet *batadv_ogm_packet; unsigned char directlink; + uint8_t *packet_pos;
- batadv_ogm_packet = (struct batadv_ogm_packet *) - (forw_packet->skb->data); + packet_pos = forw_packet->skb->data; + batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
if (!forw_packet->if_incoming) { @@@ -459,7 -454,6 +459,7 @@@ static void batadv_iv_ogm_aggregate(str int packet_len, bool direct_link) { unsigned char *skb_buff; + unsigned long new_direct_link_flag;
skb_buff = skb_put(forw_packet_aggr->skb, packet_len); memcpy(skb_buff, packet_buff, packet_len); @@@ -467,10 -461,9 +467,10 @@@ forw_packet_aggr->num_packets++;
/* save packet direct link flag status */ - if (direct_link) - forw_packet_aggr->direct_link_flags |= - (1 << forw_packet_aggr->num_packets); + if (direct_link) { + new_direct_link_flag = BIT(forw_packet_aggr->num_packets); + forw_packet_aggr->direct_link_flags |= new_direct_link_flag; + } }
static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv, @@@ -593,8 -586,6 +593,8 @@@ static void batadv_iv_ogm_schedule(stru struct batadv_ogm_packet *batadv_ogm_packet; struct batadv_hard_iface *primary_if; int vis_server, tt_num_changes = 0; + uint32_t seqno; + uint8_t bandwidth;
vis_server = atomic_read(&bat_priv->vis_mode); primary_if = batadv_primary_if_get_selected(bat_priv); @@@ -608,12 -599,12 +608,12 @@@ batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
/* change sequence number to network order */ - batadv_ogm_packet->seqno = - htonl((uint32_t)atomic_read(&hard_iface->seqno)); + seqno = (uint32_t)atomic_read(&hard_iface->seqno); + batadv_ogm_packet->seqno = htonl(seqno); atomic_inc(&hard_iface->seqno);
- batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn); - batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc); + batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn); + batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc); if (tt_num_changes >= 0) batadv_ogm_packet->tt_num_changes = tt_num_changes;
@@@ -622,13 -613,12 +622,13 @@@ else batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
- if ((hard_iface == primary_if) && - (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER)) - batadv_ogm_packet->gw_flags = - (uint8_t)atomic_read(&bat_priv->gw_bandwidth); - else + if (hard_iface == primary_if && + atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) { + bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth); + batadv_ogm_packet->gw_flags = bandwidth; + } else { batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS; + }
batadv_slide_own_bcast_window(hard_iface); batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff, @@@ -652,9 -642,9 +652,10 @@@ batadv_iv_ogm_orig_update(struct batadv struct batadv_neigh_node *router = NULL; struct batadv_orig_node *orig_node_tmp; struct hlist_node *node; + int if_num; uint8_t sum_orig, sum_neigh; uint8_t *neigh_addr; + uint8_t tq_avg;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "update_originator(): Searching and updating originator entry of received packet\n"); @@@ -678,8 -668,8 +679,8 @@@ spin_lock_bh(&tmp_neigh_node->lq_update_lock); batadv_ring_buffer_set(tmp_neigh_node->tq_recv, &tmp_neigh_node->tq_index, 0); - tmp_neigh_node->tq_avg = - batadv_ring_buffer_avg(tmp_neigh_node->tq_recv); + tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv); + tmp_neigh_node->tq_avg = tq_avg; spin_unlock_bh(&tmp_neigh_node->lq_update_lock); }
@@@ -738,12 -728,14 +739,14 @@@ if (router && (neigh_node->tq_avg == router->tq_avg)) { orig_node_tmp = router->orig_node; spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); - sum_orig = orig_node_tmp->bcast_own_sum[if_incoming->if_num]; + if_num = router->if_incoming->if_num; + sum_orig = orig_node_tmp->bcast_own_sum[if_num]; spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
orig_node_tmp = neigh_node->orig_node; spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); - sum_neigh = orig_node_tmp->bcast_own_sum[if_incoming->if_num]; + if_num = neigh_node->if_incoming->if_num; + sum_neigh = orig_node_tmp->bcast_own_sum[if_num]; spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
if (sum_orig >= sum_neigh) @@@ -844,10 -836,8 +847,10 @@@ static int batadv_iv_ogm_calc_tq(struc spin_unlock_bh(&orig_node->ogm_cnt_lock);
/* pay attention to not get a value bigger than 100 % */ - total_count = (orig_eq_count > neigh_rq_count ? - neigh_rq_count : orig_eq_count); + if (orig_eq_count > neigh_rq_count) + total_count = neigh_rq_count; + else + total_count = orig_eq_count;
/* if we have too few packets (too less data) we set tq_own to zero * if we receive too few packets it is not considered bidirectional @@@ -921,7 -911,6 +924,7 @@@ batadv_iv_ogm_update_seqnos(const struc int set_mark, ret = -1; uint32_t seqno = ntohl(batadv_ogm_packet->seqno); uint8_t *neigh_addr; + uint8_t packet_count;
orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig); if (!orig_node) @@@ -956,9 -945,9 +959,9 @@@ tmp_neigh_node->real_bits, seq_diff, set_mark);
- tmp_neigh_node->real_packet_count = - bitmap_weight(tmp_neigh_node->real_bits, - BATADV_TQ_LOCAL_WINDOW_SIZE); + packet_count = bitmap_weight(tmp_neigh_node->real_bits, + BATADV_TQ_LOCAL_WINDOW_SIZE); + tmp_neigh_node->real_packet_count = packet_count; } rcu_read_unlock();
@@@ -1175,12 -1164,9 +1178,12 @@@ static void batadv_iv_ogm_process(cons /* if sender is a direct neighbor the sender mac equals * originator mac */ - orig_neigh_node = (is_single_hop_neigh ? - orig_node : - batadv_get_orig_node(bat_priv, ethhdr->h_source)); + if (is_single_hop_neigh) + orig_neigh_node = orig_node; + else + orig_neigh_node = batadv_get_orig_node(bat_priv, + ethhdr->h_source); + if (!orig_neigh_node) goto out;
@@@ -1266,7 -1252,6 +1269,7 @@@ static int batadv_iv_ogm_receive(struc int buff_pos = 0, packet_len; unsigned char *tt_buff, *packet_buff; bool ret; + uint8_t *packet_pos;
ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN); if (!ret) @@@ -1297,8 -1282,8 +1300,8 @@@ buff_pos += BATADV_OGM_HLEN; buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
- batadv_ogm_packet = (struct batadv_ogm_packet *) - (packet_buff + buff_pos); + packet_pos = packet_buff + buff_pos; + batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, batadv_ogm_packet->tt_num_changes));
diff --combined net/batman-adv/soft-interface.c index 7b683e0,21c5357..b9a28d2 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@@ -93,32 -93,28 +93,35 @@@ static int batadv_interface_release(str static struct net_device_stats *batadv_interface_stats(struct net_device *dev) { struct batadv_priv *bat_priv = netdev_priv(dev); - return &bat_priv->stats; + struct net_device_stats *stats = &bat_priv->stats; + + stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX); + stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES); + stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED); + stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX); + stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES); + return stats; }
static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) { struct batadv_priv *bat_priv = netdev_priv(dev); struct sockaddr *addr = p; + uint8_t old_addr[ETH_ALEN];
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
+ memcpy(old_addr, dev->dev_addr, ETH_ALEN); + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + /* only modify transtable if it has been initialized before */ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) { - batadv_tt_local_remove(bat_priv, dev->dev_addr, + batadv_tt_local_remove(bat_priv, old_addr, "mac address changed", false); batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX); }
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); dev->addr_assign_type &= ~NET_ADDR_RANDOM; return 0; } @@@ -149,7 -145,6 +152,7 @@@ static int batadv_interface_tx(struct s int data_len = skb->len, ret; short vid __maybe_unused = -1; bool do_bcast = false; + uint32_t seqno;
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) goto dropped; @@@ -231,8 -226,8 +234,8 @@@ primary_if->net_dev->dev_addr, ETH_ALEN);
/* set broadcast sequence number */ - bcast_packet->seqno = - htonl(atomic_inc_return(&bat_priv->bcast_seqno)); + seqno = atomic_inc_return(&bat_priv->bcast_seqno); + bcast_packet->seqno = htonl(seqno);
batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
@@@ -254,14 -249,14 +257,14 @@@ goto dropped_freed; }
- bat_priv->stats.tx_packets++; - bat_priv->stats.tx_bytes += data_len; + batadv_inc_counter(bat_priv, BATADV_CNT_TX); + batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len); goto end;
dropped: kfree_skb(skb); dropped_freed: - bat_priv->stats.tx_dropped++; + batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED); end: if (primary_if) batadv_hardif_free_ref(primary_if); @@@ -270,7 -265,7 +273,7 @@@
void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb, struct batadv_hard_iface *recv_if, - int hdr_size) + int hdr_size, struct batadv_orig_node *orig_node) { struct batadv_priv *bat_priv = netdev_priv(soft_iface); struct ethhdr *ethhdr; @@@ -316,16 -311,11 +319,16 @@@
/* skb->ip_summed = CHECKSUM_UNNECESSARY; */
- bat_priv->stats.rx_packets++; - bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; + batadv_inc_counter(bat_priv, BATADV_CNT_RX); + batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, + skb->len + ETH_HLEN);
soft_iface->last_rx = jiffies;
+ if (orig_node) + batadv_tt_add_temporary_global_entry(bat_priv, orig_node, + ethhdr->h_source); + if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) goto dropped;
@@@ -392,22 -382,15 +395,22 @@@ struct net_device *batadv_softif_create if (!soft_iface) goto out;
+ bat_priv = netdev_priv(soft_iface); + + /* batadv_interface_stats() needs to be available as soon as + * register_netdevice() has been called + */ + bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t)); + if (!bat_priv->bat_counters) + goto free_soft_iface; + ret = register_netdevice(soft_iface); if (ret < 0) { pr_err("Unable to register the batman interface '%s': %i\n", name, ret); - goto free_soft_iface; + goto free_bat_counters; }
- bat_priv = netdev_priv(soft_iface); - atomic_set(&bat_priv->aggregated_ogms, 1); atomic_set(&bat_priv->bonding, 0); atomic_set(&bat_priv->bridge_loop_avoidance, 0); @@@ -425,26 -408,29 +428,26 @@@
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); atomic_set(&bat_priv->bcast_seqno, 1); - atomic_set(&bat_priv->ttvn, 0); - atomic_set(&bat_priv->tt_local_changes, 0); - atomic_set(&bat_priv->tt_ogm_append_cnt, 0); - atomic_set(&bat_priv->bla_num_requests, 0); - - bat_priv->tt_buff = NULL; - bat_priv->tt_buff_len = 0; - bat_priv->tt_poss_change = false; + atomic_set(&bat_priv->tt.vn, 0); + atomic_set(&bat_priv->tt.local_changes, 0); + atomic_set(&bat_priv->tt.ogm_append_cnt, 0); +#ifdef CONFIG_BATMAN_ADV_BLA + atomic_set(&bat_priv->bla.num_requests, 0); +#endif + bat_priv->tt.last_changeset = NULL; + bat_priv->tt.last_changeset_len = 0; + bat_priv->tt.poss_change = false;
bat_priv->primary_if = NULL; bat_priv->num_ifaces = 0;
- bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t)); - if (!bat_priv->bat_counters) - goto unreg_soft_iface; - ret = batadv_algo_select(bat_priv, batadv_routing_algo); if (ret < 0) - goto free_bat_counters; + goto unreg_soft_iface;
ret = batadv_sysfs_add_meshif(soft_iface); if (ret < 0) - goto free_bat_counters; + goto unreg_soft_iface;
ret = batadv_debugfs_add_meshif(soft_iface); if (ret < 0) @@@ -460,13 -446,12 +463,13 @@@ unreg_debugfs batadv_debugfs_del_meshif(soft_iface); unreg_sysfs: batadv_sysfs_del_meshif(soft_iface); -free_bat_counters: - free_percpu(bat_priv->bat_counters); unreg_soft_iface: + free_percpu(bat_priv->bat_counters); unregister_netdevice(soft_iface); return NULL;
+free_bat_counters: + free_percpu(bat_priv->bat_counters); free_soft_iface: free_netdev(soft_iface); out: @@@ -536,11 -521,6 +539,11 @@@ static u32 batadv_get_link(struct net_d static const struct { const char name[ETH_GSTRING_LEN]; } batadv_counters_strings[] = { + { "tx" }, + { "tx_bytes" }, + { "tx_dropped" }, + { "rx" }, + { "rx_bytes" }, { "forward" }, { "forward_bytes" }, { "mgmt_tx" }, diff --combined net/bluetooth/bnep/sock.c index 5b6cc0b,1eaacf1..e7154a5 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c @@@ -29,10 -29,6 +29,10 @@@
#include "bnep.h"
+static struct bt_sock_list bnep_sk_list = { + .lock = __RW_LOCK_UNLOCKED(bnep_sk_list.lock) +}; + static int bnep_sock_release(struct socket *sock) { struct sock *sk = sock->sk; @@@ -42,8 -38,6 +42,8 @@@ if (!sk) return 0;
+ bt_sock_unlink(&bnep_sk_list, sk); + sock_orphan(sk); sock_put(sk); return 0; @@@ -64,7 -58,7 +64,7 @@@ static int bnep_sock_ioctl(struct socke switch (cmd) { case BNEPCONNADD: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; @@@ -90,7 -84,7 +90,7 @@@
case BNEPCONNDEL: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; @@@ -210,7 -204,6 +210,7 @@@ static int bnep_sock_create(struct net sk->sk_protocol = protocol; sk->sk_state = BT_OPEN;
+ bt_sock_link(&bnep_sk_list, sk); return 0; }
@@@ -229,30 -222,19 +229,30 @@@ int __init bnep_sock_init(void return err;
err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops); - if (err < 0) + if (err < 0) { + BT_ERR("Can't register BNEP socket"); goto error; + } + + err = bt_procfs_init(THIS_MODULE, &init_net, "bnep", &bnep_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create BNEP proc file"); + bt_sock_unregister(BTPROTO_BNEP); + goto error; + } + + BT_INFO("BNEP socket layer initialized");
return 0;
error: - BT_ERR("Can't register BNEP socket"); proto_unregister(&bnep_proto); return err; }
void __exit bnep_sock_cleanup(void) { + bt_procfs_cleanup(&init_net, "bnep"); if (bt_sock_unregister(BTPROTO_BNEP) < 0) BT_ERR("Can't unregister BNEP socket");
diff --combined net/bluetooth/cmtp/sock.c index d5cacef,32dc83d..aacb802 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c @@@ -42,10 -42,6 +42,10 @@@
#include "cmtp.h"
+static struct bt_sock_list cmtp_sk_list = { + .lock = __RW_LOCK_UNLOCKED(cmtp_sk_list.lock) +}; + static int cmtp_sock_release(struct socket *sock) { struct sock *sk = sock->sk; @@@ -55,8 -51,6 +55,8 @@@ if (!sk) return 0;
+ bt_sock_unlink(&cmtp_sk_list, sk); + sock_orphan(sk); sock_put(sk);
@@@ -78,7 -72,7 +78,7 @@@ static int cmtp_sock_ioctl(struct socke switch (cmd) { case CMTPCONNADD: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; @@@ -103,7 -97,7 +103,7 @@@
case CMTPCONNDEL: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; @@@ -220,8 -214,6 +220,8 @@@ static int cmtp_sock_create(struct net sk->sk_protocol = protocol; sk->sk_state = BT_OPEN;
+ bt_sock_link(&cmtp_sk_list, sk); + return 0; }
@@@ -240,30 -232,19 +240,30 @@@ int cmtp_init_sockets(void return err;
err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops); - if (err < 0) + if (err < 0) { + BT_ERR("Can't register CMTP socket"); goto error; + } + + err = bt_procfs_init(THIS_MODULE, &init_net, "cmtp", &cmtp_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create CMTP proc file"); + bt_sock_unregister(BTPROTO_HIDP); + goto error; + } + + BT_INFO("CMTP socket layer initialized");
return 0;
error: - BT_ERR("Can't register CMTP socket"); proto_unregister(&cmtp_proto); return err; }
void cmtp_cleanup_sockets(void) { + bt_procfs_cleanup(&init_net, "cmtp"); if (bt_sock_unregister(BTPROTO_CMTP) < 0) BT_ERR("Can't unregister CMTP socket");
diff --combined net/bluetooth/hci_core.c index fa974a1,0b997c8..8806869 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@@ -696,8 -696,7 +696,8 @@@ int hci_dev_open(__u16 dev hci_dev_hold(hdev); set_bit(HCI_UP, &hdev->flags); hci_notify(hdev, HCI_DEV_UP); - if (!test_bit(HCI_SETUP, &hdev->dev_flags)) { + if (!test_bit(HCI_SETUP, &hdev->dev_flags) && + mgmt_valid_hdev(hdev)) { hci_dev_lock(hdev); mgmt_powered(hdev, 1); hci_dev_unlock(hdev); @@@ -735,6 -734,8 +735,8 @@@ static int hci_dev_do_close(struct hci_
cancel_work_sync(&hdev->le_scan);
+ cancel_delayed_work(&hdev->power_off); + hci_req_cancel(hdev, ENODEV); hci_req_lock(hdev);
@@@ -798,8 -799,7 +800,8 @@@ * and no tasks are scheduled. */ hdev->close(hdev);
- if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { + if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) && + mgmt_valid_hdev(hdev)) { hci_dev_lock(hdev); mgmt_powered(hdev, 0); hci_dev_unlock(hdev); diff --combined net/bluetooth/hci_sock.c index bb64331,d5ace1e..07f0739 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@@ -490,7 -490,7 +490,7 @@@ static int hci_sock_bound_ioctl(struct switch (cmd) { case HCISETRAW: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return -EPERM; @@@ -510,12 -510,12 +510,12 @@@
case HCIBLOCKADDR: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_sock_blacklist_add(hdev, (void __user *) arg);
case HCIUNBLOCKADDR: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_sock_blacklist_del(hdev, (void __user *) arg);
default: @@@ -546,22 -546,22 +546,22 @@@ static int hci_sock_ioctl(struct socke
case HCIDEVUP: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_open(arg);
case HCIDEVDOWN: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_close(arg);
case HCIDEVRESET: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_reset(arg);
case HCIDEVRESTAT: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_reset_stat(arg);
case HCISETSCAN: @@@ -573,7 -573,7 +573,7 @@@ case HCISETACLMTU: case HCISETSCOMTU: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_cmd(cmd, argp);
case HCIINQUIRY: @@@ -1102,30 -1102,21 +1102,30 @@@ int __init hci_sock_init(void return err;
err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops); - if (err < 0) + if (err < 0) { + BT_ERR("HCI socket registration failed"); goto error; + } + + err = bt_procfs_init(THIS_MODULE, &init_net, "hci", &hci_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create HCI proc file"); + bt_sock_unregister(BTPROTO_HCI); + goto error; + }
BT_INFO("HCI socket layer initialized");
return 0;
error: - BT_ERR("HCI socket registration failed"); proto_unregister(&hci_sk_proto); return err; }
void hci_sock_cleanup(void) { + bt_procfs_cleanup(&init_net, "hci"); if (bt_sock_unregister(BTPROTO_HCI) < 0) BT_ERR("HCI socket unregistration failed");
diff --combined net/bluetooth/hidp/sock.c index eca3889,b24fb3b..82a829d --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c @@@ -25,10 -25,6 +25,10 @@@
#include "hidp.h"
+static struct bt_sock_list hidp_sk_list = { + .lock = __RW_LOCK_UNLOCKED(hidp_sk_list.lock) +}; + static int hidp_sock_release(struct socket *sock) { struct sock *sk = sock->sk; @@@ -38,8 -34,6 +38,8 @@@ if (!sk) return 0;
+ bt_sock_unlink(&hidp_sk_list, sk); + sock_orphan(sk); sock_put(sk);
@@@ -62,7 -56,7 +62,7 @@@ static int hidp_sock_ioctl(struct socke switch (cmd) { case HIDPCONNADD: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; @@@ -97,7 -91,7 +97,7 @@@
case HIDPCONNDEL: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; @@@ -259,8 -253,6 +259,8 @@@ static int hidp_sock_create(struct net sk->sk_protocol = protocol; sk->sk_state = BT_OPEN;
+ bt_sock_link(&hidp_sk_list, sk); + return 0; }
@@@ -279,19 -271,8 +279,19 @@@ int __init hidp_init_sockets(void return err;
err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops); - if (err < 0) + if (err < 0) { + BT_ERR("Can't register HIDP socket"); goto error; + } + + err = bt_procfs_init(THIS_MODULE, &init_net, "hidp", &hidp_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create HIDP proc file"); + bt_sock_unregister(BTPROTO_HIDP); + goto error; + } + + BT_INFO("HIDP socket layer initialized");
return 0;
@@@ -303,7 -284,6 +303,7 @@@ error
void __exit hidp_cleanup_sockets(void) { + bt_procfs_cleanup(&init_net, "hidp"); if (bt_sock_unregister(BTPROTO_HIDP) < 0) BT_ERR("Can't unregister HIDP socket");
diff --combined net/bluetooth/l2cap_core.c index e0abaf3,38c00f1..bda526e --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@@ -416,30 -416,13 +416,30 @@@ struct l2cap_chan *l2cap_chan_create(vo return chan; }
-void l2cap_chan_destroy(struct l2cap_chan *chan) +static void l2cap_chan_destroy(struct l2cap_chan *chan) { + BT_DBG("chan %p", chan); + write_lock(&chan_list_lock); list_del(&chan->global_l); write_unlock(&chan_list_lock);
- l2cap_chan_put(chan); + kfree(chan); +} + +void l2cap_chan_hold(struct l2cap_chan *c) +{ + BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt)); + + atomic_inc(&c->refcnt); +} + +void l2cap_chan_put(struct l2cap_chan *c) +{ + BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt)); + + if (atomic_dec_and_test(&c->refcnt)) + l2cap_chan_destroy(c); }
void l2cap_chan_set_defaults(struct l2cap_chan *chan) @@@ -1025,7 -1008,7 +1025,7 @@@ static void l2cap_send_disconn_req(stru if (!conn) return;
- if (chan->mode == L2CAP_MODE_ERTM) { + if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) { __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); @@@ -5348,7 -5331,7 +5348,7 @@@ int l2cap_connect_ind(struct hci_dev *h return exact ? lm1 : lm2; }
-int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) +void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { struct l2cap_conn *conn;
@@@ -5361,6 -5344,7 +5361,6 @@@ } else l2cap_conn_del(hcon, bt_to_errno(status));
- return 0; }
int l2cap_disconn_ind(struct hci_conn *hcon) @@@ -5374,11 -5358,12 +5374,11 @@@ return conn->disc_reason; }
-int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) +void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) { BT_DBG("hcon %p reason %d", hcon, reason);
l2cap_conn_del(hcon, bt_to_errno(reason)); - return 0; }
static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) @@@ -5421,11 -5406,6 +5421,11 @@@ int l2cap_security_cfm(struct hci_conn BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid, state_to_string(chan->state));
+ if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { + l2cap_chan_unlock(chan); + continue; + } + if (chan->scid == L2CAP_CID_LE_DATA) { if (!status && encrypt) { chan->sec_level = hcon->sec_level; diff --combined net/bluetooth/mgmt.c index a3329cb,eba022d..e329631 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@@ -193,11 -193,6 +193,11 @@@ static u8 mgmt_status_table[] = MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */ };
+bool mgmt_valid_hdev(struct hci_dev *hdev) +{ + return hdev->dev_type == HCI_BREDR; +} + static u8 mgmt_status(u8 hci_status) { if (hci_status < ARRAY_SIZE(mgmt_status_table)) @@@ -322,6 -317,7 +322,6 @@@ static int read_index_list(struct sock u16 data_len) { struct mgmt_rp_read_index_list *rp; - struct list_head *p; struct hci_dev *d; size_t rp_len; u16 count; @@@ -332,10 -328,7 +332,10 @@@ read_lock(&hci_dev_list_lock);
count = 0; - list_for_each(p, &hci_dev_list) { + list_for_each_entry(d, &hci_dev_list, list) { + if (!mgmt_valid_hdev(d)) + continue; + count++; }
@@@ -353,9 -346,6 +353,9 @@@ if (test_bit(HCI_SETUP, &d->dev_flags)) continue;
+ if (!mgmt_valid_hdev(d)) + continue; + rp->index[i++] = cpu_to_le16(d->id); BT_DBG("Added hci%u", d->id); } @@@ -380,10 -370,10 +380,10 @@@ static u32 get_supported_settings(struc settings |= MGMT_SETTING_DISCOVERABLE; settings |= MGMT_SETTING_PAIRABLE;
- if (hdev->features[6] & LMP_SIMPLE_PAIR) + if (lmp_ssp_capable(hdev)) settings |= MGMT_SETTING_SSP;
- if (!(hdev->features[4] & LMP_NO_BREDR)) { + if (lmp_bredr_capable(hdev)) { settings |= MGMT_SETTING_BREDR; settings |= MGMT_SETTING_LINK_SECURITY; } @@@ -391,7 -381,7 +391,7 @@@ if (enable_hs) settings |= MGMT_SETTING_HS;
- if (hdev->features[4] & LMP_LE) + if (lmp_le_capable(hdev)) settings |= MGMT_SETTING_LE;
return settings; @@@ -413,7 -403,7 +413,7 @@@ static u32 get_current_settings(struct if (test_bit(HCI_PAIRABLE, &hdev->dev_flags)) settings |= MGMT_SETTING_PAIRABLE;
- if (!(hdev->features[4] & LMP_NO_BREDR)) + if (lmp_bredr_capable(hdev)) settings |= MGMT_SETTING_BREDR;
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) @@@ -1121,7 -1111,7 +1121,7 @@@ static int set_ssp(struct sock *sk, str
hci_dev_lock(hdev);
- if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) { + if (!lmp_ssp_capable(hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, MGMT_STATUS_NOT_SUPPORTED); goto failed; @@@ -1205,7 -1195,7 +1205,7 @@@ static int set_le(struct sock *sk, stru
hci_dev_lock(hdev);
- if (!(hdev->features[4] & LMP_LE)) { + if (!lmp_le_capable(hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE, MGMT_STATUS_NOT_SUPPORTED); goto unlock; @@@ -2201,7 -2191,7 +2201,7 @@@ static int read_local_oob_data(struct s goto unlock; }
- if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) { + if (!lmp_ssp_capable(hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_NOT_SUPPORTED); goto unlock; @@@ -2830,9 -2820,6 +2830,9 @@@ static void cmd_status_rsp(struct pendi
int mgmt_index_added(struct hci_dev *hdev) { + if (!mgmt_valid_hdev(hdev)) + return -ENOTSUPP; + return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); }
@@@ -2840,9 -2827,6 +2840,9 @@@ int mgmt_index_removed(struct hci_dev * { u8 status = MGMT_STATUS_INVALID_INDEX;
+ if (!mgmt_valid_hdev(hdev)) + return -ENOTSUPP; + mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); @@@ -2891,6 -2875,22 +2891,22 @@@ int mgmt_powered(struct hci_dev *hdev, if (scan) hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { + u8 ssp = 1; + + hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp); + } + + if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { + struct hci_cp_write_le_host_supported cp; + + cp.le = 1; + cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); + + hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, + sizeof(cp), &cp); + } + update_class(hdev); update_name(hdev, hdev->dev_name); update_eir(hdev); diff --combined net/core/dev.c index 707b124,89e33a5..3e645f3 --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -959,30 -959,18 +959,30 @@@ int dev_alloc_name(struct net_device *d } EXPORT_SYMBOL(dev_alloc_name);
-static int dev_get_valid_name(struct net_device *dev, const char *name) +static int dev_alloc_name_ns(struct net *net, + struct net_device *dev, + const char *name) { - struct net *net; + char buf[IFNAMSIZ]; + int ret;
- BUG_ON(!dev_net(dev)); - net = dev_net(dev); + ret = __dev_alloc_name(net, name, buf); + if (ret >= 0) + strlcpy(dev->name, buf, IFNAMSIZ); + return ret; +} + +static int dev_get_valid_name(struct net *net, + struct net_device *dev, + const char *name) +{ + BUG_ON(!net);
if (!dev_valid_name(name)) return -EINVAL;
if (strchr(name, '%')) - return dev_alloc_name(dev, name); + return dev_alloc_name_ns(net, dev, name); else if (__dev_get_by_name(net, name)) return -EEXIST; else if (dev->name != name) @@@ -1018,7 -1006,7 +1018,7 @@@ int dev_change_name(struct net_device *
memcpy(oldname, dev->name, IFNAMSIZ);
- err = dev_get_valid_name(dev, newname); + err = dev_get_valid_name(net, dev, newname); if (err < 0) return err;
@@@ -1121,23 -1109,11 +1121,23 @@@ void netdev_state_change(struct net_dev } EXPORT_SYMBOL(netdev_state_change);
-int netdev_bonding_change(struct net_device *dev, unsigned long event) +/** + * netdev_notify_peers - notify network peers about existence of @dev + * @dev: network device + * + * Generate traffic such that interested network peers are aware of + * @dev, such as by generating a gratuitous ARP. This may be used when + * a device wants to inform the rest of the network about some sort of + * reconfiguration such as a failover event or virtual machine + * migration. + */ +void netdev_notify_peers(struct net_device *dev) { - return call_netdevice_notifiers(event, dev); + rtnl_lock(); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); + rtnl_unlock(); } -EXPORT_SYMBOL(netdev_bonding_change); +EXPORT_SYMBOL(netdev_notify_peers);
/** * dev_load - load a network module @@@ -1418,6 -1394,7 +1418,6 @@@ rollback nb->notifier_call(nb, NETDEV_DOWN, dev); } nb->notifier_call(nb, NETDEV_UNREGISTER, dev); - nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); } }
@@@ -1459,6 -1436,7 +1459,6 @@@ int unregister_netdevice_notifier(struc nb->notifier_call(nb, NETDEV_DOWN, dev); } nb->notifier_call(nb, NETDEV_UNREGISTER, dev); - nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); } } unlock: @@@ -2156,7 -2134,8 +2156,8 @@@ static bool can_checksum_protocol(netde static netdev_features_t harmonize_features(struct sk_buff *skb, __be16 protocol, netdev_features_t features) { - if (!can_checksum_protocol(features, protocol)) { + if (skb->ip_summed != CHECKSUM_NONE && + !can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; features &= ~NETIF_F_SG; } else if (illegal_highdma(skb->dev, skb)) { @@@ -2196,7 -2175,9 +2197,7 @@@ EXPORT_SYMBOL(netif_skb_features) /* * Returns true if either: * 1. skb has frag_list and the device doesn't support FRAGLIST, or - * 2. skb is fragmented and the device does not support SG, or if - * at least one of fragments is in highmem and device does not - * support DMA from it. + * 2. skb is fragmented and the device does not support SG. */ static inline int skb_needs_linearize(struct sk_buff *skb, int features) @@@ -2225,6 -2206,9 +2226,6 @@@ int dev_hard_start_xmit(struct sk_buff if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(skb);
- if (!list_empty(&ptype_all)) - dev_queue_xmit_nit(skb, dev); - features = netif_skb_features(skb);
if (vlan_tx_tag_present(skb) && @@@ -2259,9 -2243,6 +2260,9 @@@ } }
+ if (!list_empty(&ptype_all)) + dev_queue_xmit_nit(skb, dev); + skb_len = skb->len; rc = ops->ndo_start_xmit(skb, dev); trace_net_dev_xmit(skb, rc, dev, skb_len); @@@ -2284,9 -2265,6 +2285,9 @@@ gso if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(nskb);
+ if (!list_empty(&ptype_all)) + dev_queue_xmit_nit(nskb, dev); + skb_len = nskb->len; rc = ops->ndo_start_xmit(nskb, dev); trace_net_dev_xmit(nskb, rc, dev, skb_len); @@@ -2396,8 -2374,8 +2397,8 @@@ static inline int get_xps_queue(struct #endif }
-static struct netdev_queue *dev_pick_tx(struct net_device *dev, - struct sk_buff *skb) +struct netdev_queue *netdev_pick_tx(struct net_device *dev, + struct sk_buff *skb) { int queue_index; const struct net_device_ops *ops = dev->netdev_ops; @@@ -2571,7 -2549,7 +2572,7 @@@ int dev_queue_xmit(struct sk_buff *skb
skb_update_prio(skb);
- txq = dev_pick_tx(dev, skb); + txq = netdev_pick_tx(dev, skb); q = rcu_dereference_bh(txq->qdisc);
#ifdef CONFIG_NET_CLS_ACT @@@ -3345,7 -3323,7 +3346,7 @@@ ncls
if (pt_prev) { if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) - ret = -ENOMEM; + goto drop; else ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } else { @@@ -4534,8 -4512,8 +4535,8 @@@ static void dev_change_rx_flags(struct static int __dev_set_promiscuity(struct net_device *dev, int inc) { unsigned int old_flags = dev->flags; - uid_t uid; - gid_t gid; + kuid_t uid; + kgid_t gid;
ASSERT_RTNL();
@@@ -4567,8 -4545,7 +4568,8 @@@ dev->name, (dev->flags & IFF_PROMISC), (old_flags & IFF_PROMISC), audit_get_loginuid(current), - uid, gid, + from_kuid(&init_user_ns, uid), + from_kgid(&init_user_ns, gid), audit_get_sessionid(current)); }
@@@ -5261,12 -5238,12 +5262,12 @@@ int dev_ioctl(struct net *net, unsigne */ static int dev_new_index(struct net *net) { - static int ifindex; + int ifindex = net->ifindex; for (;;) { if (++ifindex <= 0) ifindex = 1; if (!__dev_get_by_index(net, ifindex)) - return ifindex; + return net->ifindex = ifindex; } }
@@@ -5344,6 -5321,10 +5345,6 @@@ static void rollback_registered_many(st netdev_unregister_kobject(dev); }
- /* Process any work delayed until the end of the batch */ - dev = list_first_entry(head, struct net_device, unreg_list); - call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); - synchronize_net();
list_for_each_entry(dev, head, unreg_list) @@@ -5601,7 -5582,7 +5602,7 @@@ int register_netdevice(struct net_devic
dev->iflink = -1;
- ret = dev_get_valid_name(dev, dev->name); + ret = dev_get_valid_name(net, dev, dev->name); if (ret < 0) goto out;
@@@ -5615,12 -5596,7 +5616,12 @@@ } }
- dev->ifindex = dev_new_index(net); + ret = -EBUSY; + if (!dev->ifindex) + dev->ifindex = dev_new_index(net); + else if (__dev_get_by_index(net, dev->ifindex)) + goto err_uninit; + if (dev->iflink == -1) dev->iflink = dev->ifindex;
@@@ -5663,8 -5639,6 +5664,8 @@@
set_bit(__LINK_STATE_PRESENT, &dev->state);
+ linkwatch_init_dev(dev); + dev_init_scheduler(dev); dev_hold(dev); list_netdevice(dev); @@@ -5798,12 -5772,9 +5799,12 @@@ static void netdev_wait_allrefs(struct
/* Rebroadcast unregister notification */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); - /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users - * should have already handle it the first time */
+ __rtnl_unlock(); + rcu_barrier(); + rtnl_lock(); + + call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); if (test_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { /* We must not have linkwatch events @@@ -5865,8 -5836,9 +5866,8 @@@ void netdev_run_todo(void
__rtnl_unlock();
- /* Wait for rcu callbacks to finish before attempting to drain - * the device list. This usually avoids a 250ms wait. - */ + + /* Wait for rcu callbacks to finish before next phase */ if (!list_empty(&list)) rcu_barrier();
@@@ -5875,10 -5847,6 +5876,10 @@@ = list_first_entry(&list, struct net_device, todo_list); list_del(&dev->todo_list);
+ rtnl_lock(); + call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); + __rtnl_unlock(); + if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { pr_err("network todo '%s' but state %d\n", dev->name, dev->reg_state); @@@ -5974,8 -5942,6 +5975,8 @@@ struct netdev_queue *dev_ingress_queue_ return queue; }
+static const struct ethtool_ops default_ethtool_ops; + /** * alloc_netdev_mqs - allocate network device * @sizeof_priv: size of private data to allocate space for @@@ -6063,8 -6029,6 +6064,8 @@@ struct net_device *alloc_netdev_mqs(in
strcpy(dev->name, name); dev->group = INIT_NETDEV_GROUP; + if (!dev->ethtool_ops) + dev->ethtool_ops = &default_ethtool_ops; return dev;
free_all: @@@ -6249,7 -6213,7 +6250,7 @@@ int dev_change_net_namespace(struct net /* We get here if we can't use the current device name */ if (!pat) goto out; - if (dev_get_valid_name(dev, pat) < 0) + if (dev_get_valid_name(net, dev, pat) < 0) goto out; }
@@@ -6277,8 -6241,7 +6278,8 @@@ the device is just moving and can keep their slaves up. */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); - call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); + rcu_barrier(); + call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
/* diff --combined net/core/skbuff.c index 607a70f,e33ebae..d607bae --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -340,57 -340,43 +340,57 @@@ struct sk_buff *build_skb(void *data, u EXPORT_SYMBOL(build_skb);
struct netdev_alloc_cache { - struct page *page; - unsigned int offset; - unsigned int pagecnt_bias; + struct page_frag frag; + /* we maintain a pagecount bias, so that we dont dirty cache line + * containing page->_count every time we allocate a fragment. + */ + unsigned int pagecnt_bias; }; static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
-#define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES) +#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768) +#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER) +#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { struct netdev_alloc_cache *nc; void *data = NULL; + int order; unsigned long flags;
local_irq_save(flags); nc = &__get_cpu_var(netdev_alloc_cache); - if (unlikely(!nc->page)) { + if (unlikely(!nc->frag.page)) { refill: - nc->page = alloc_page(gfp_mask); - if (unlikely(!nc->page)) - goto end; + for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { + gfp_t gfp = gfp_mask; + + if (order) + gfp |= __GFP_COMP | __GFP_NOWARN; + nc->frag.page = alloc_pages(gfp, order); + if (likely(nc->frag.page)) + break; + if (--order < 0) + goto end; + } + nc->frag.size = PAGE_SIZE << order; recycle: - atomic_set(&nc->page->_count, NETDEV_PAGECNT_BIAS); - nc->pagecnt_bias = NETDEV_PAGECNT_BIAS; - nc->offset = 0; + atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); + nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; + nc->frag.offset = 0; }
- if (nc->offset + fragsz > PAGE_SIZE) { + if (nc->frag.offset + fragsz > nc->frag.size) { /* avoid unnecessary locked operations if possible */ - if ((atomic_read(&nc->page->_count) == nc->pagecnt_bias) || - atomic_sub_and_test(nc->pagecnt_bias, &nc->page->_count)) + if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || + atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) goto recycle; goto refill; }
- data = page_address(nc->page) + nc->offset; - nc->offset += fragsz; + data = page_address(nc->frag.page) + nc->frag.offset; + nc->frag.offset += fragsz; nc->pagecnt_bias--; end: local_irq_restore(flags); @@@ -1669,19 -1655,38 +1669,19 @@@ static struct page *linear_to_page(stru unsigned int *offset, struct sk_buff *skb, struct sock *sk) { - struct page *p = sk->sk_sndmsg_page; - unsigned int off; - - if (!p) { -new_page: - p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); - if (!p) - return NULL; + struct page_frag *pfrag = sk_page_frag(sk);
- off = sk->sk_sndmsg_off = 0; - /* hold one ref to this page until it's full */ - } else { - unsigned int mlen; - - /* If we are the only user of the page, we can reset offset */ - if (page_count(p) == 1) - sk->sk_sndmsg_off = 0; - off = sk->sk_sndmsg_off; - mlen = PAGE_SIZE - off; - if (mlen < 64 && mlen < *len) { - put_page(p); - goto new_page; - } + if (!sk_page_frag_refill(sk, pfrag)) + return NULL;
- *len = min_t(unsigned int, *len, mlen); - } + *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
- memcpy(page_address(p) + off, page_address(page) + *offset, *len); - sk->sk_sndmsg_off += *len; - *offset = off; + memcpy(page_address(pfrag->page) + pfrag->offset, + page_address(page) + *offset, *len); + *offset = pfrag->offset; + pfrag->offset += *len;
- return p; + return pfrag->page; }
static bool spd_can_coalesce(const struct splice_pipe_desc *spd, @@@ -3497,7 -3502,9 +3497,9 @@@ bool skb_try_coalesce(struct sk_buff *t if (!skb_cloned(from)) skb_shinfo(from)->nr_frags = 0;
- /* if the skb is cloned this does nothing since we set nr_frags to 0 */ + /* if the skb is not cloned this does nothing + * since we set nr_frags to 0. + */ for (i = 0; i < skb_shinfo(from)->nr_frags; i++) skb_frag_ref(from, i);
diff --combined net/core/sock.c index f5a4260,a6000fb..7eac864 --- a/net/core/sock.c +++ b/net/core/sock.c @@@ -691,7 -691,8 +691,8 @@@ set_rcvbuf
case SO_KEEPALIVE: #ifdef CONFIG_INET - if (sk->sk_protocol == IPPROTO_TCP) + if (sk->sk_protocol == IPPROTO_TCP && + sk->sk_type == SOCK_STREAM) tcp_set_keepalive(sk, valbool); #endif sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); @@@ -868,8 -869,8 +869,8 @@@ void cred_to_ucred(struct pid *pid, con if (cred) { struct user_namespace *current_ns = current_user_ns();
- ucred->uid = from_kuid(current_ns, cred->euid); - ucred->gid = from_kgid(current_ns, cred->egid); + ucred->uid = from_kuid_munged(current_ns, cred->euid); + ucred->gid = from_kgid_munged(current_ns, cred->egid); } } EXPORT_SYMBOL_GPL(cred_to_ucred); @@@ -1230,7 -1231,7 +1231,7 @@@ void sock_update_classid(struct sock *s rcu_read_lock(); /* doing current task, which cannot vanish. */ classid = task_cls_classid(current); rcu_read_unlock(); - if (classid && classid != sk->sk_classid) + if (classid != sk->sk_classid) sk->sk_classid = classid; } EXPORT_SYMBOL(sock_update_classid); @@@ -1464,6 -1465,19 +1465,6 @@@ void sk_setup_caps(struct sock *sk, str } EXPORT_SYMBOL_GPL(sk_setup_caps);
-void __init sk_init(void) -{ - if (totalram_pages <= 4096) { - sysctl_wmem_max = 32767; - sysctl_rmem_max = 32767; - sysctl_wmem_default = 32767; - sysctl_rmem_default = 32767; - } else if (totalram_pages >= 131072) { - sysctl_wmem_max = 131071; - sysctl_rmem_max = 131071; - } -} - /* * Simple resource managers for sockets. */ @@@ -1521,12 -1535,12 +1522,12 @@@ void sock_edemux(struct sk_buff *skb } EXPORT_SYMBOL(sock_edemux);
-int sock_i_uid(struct sock *sk) +kuid_t sock_i_uid(struct sock *sk) { - int uid; + kuid_t uid;
read_lock_bh(&sk->sk_callback_lock); - uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; + uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; read_unlock_bh(&sk->sk_callback_lock); return uid; } @@@ -1731,45 -1745,6 +1732,45 @@@ struct sk_buff *sock_alloc_send_skb(str } EXPORT_SYMBOL(sock_alloc_send_skb);
+/* On 32bit arches, an skb frag is limited to 2^15 */ +#define SKB_FRAG_PAGE_ORDER get_order(32768) + +bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) +{ + int order; + + if (pfrag->page) { + if (atomic_read(&pfrag->page->_count) == 1) { + pfrag->offset = 0; + return true; + } + if (pfrag->offset < pfrag->size) + return true; + put_page(pfrag->page); + } + + /* We restrict high order allocations to users that can afford to wait */ + order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0; + + do { + gfp_t gfp = sk->sk_allocation; + + if (order) + gfp |= __GFP_COMP | __GFP_NOWARN; + pfrag->page = alloc_pages(gfp, order); + if (likely(pfrag->page)) { + pfrag->offset = 0; + pfrag->size = PAGE_SIZE << order; + return true; + } + } while (--order >= 0); + + sk_enter_memory_pressure(sk); + sk_stream_moderate_sndbuf(sk); + return false; +} +EXPORT_SYMBOL(sk_page_frag_refill); + static void __lock_sock(struct sock *sk) __releases(&sk->sk_lock.slock) __acquires(&sk->sk_lock.slock) @@@ -2199,8 -2174,8 +2200,8 @@@ void sock_init_data(struct socket *sock sk->sk_error_report = sock_def_error_report; sk->sk_destruct = sock_def_destruct;
- sk->sk_sndmsg_page = NULL; - sk->sk_sndmsg_off = 0; + sk->sk_frag.page = NULL; + sk->sk_frag.offset = 0; sk->sk_peek_off = -1;
sk->sk_peer_pid = NULL; @@@ -2443,12 -2418,6 +2444,12 @@@ void sk_common_release(struct sock *sk xfrm_sk_free_policy(sk);
sk_refcnt_debug_release(sk); + + if (sk->sk_frag.page) { + put_page(sk->sk_frag.page); + sk->sk_frag.page = NULL; + } + sock_put(sk); } EXPORT_SYMBOL(sk_common_release); diff --combined net/ipv4/devinet.c index 7b00556,e12fad7..2a6abc1 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@@ -94,22 -94,25 +94,22 @@@ static const struct nla_policy ifa_ipv4 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, };
-/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE - * value. So if you change this define, make appropriate changes to - * inet_addr_hash as well. - */ -#define IN4_ADDR_HSIZE 256 +#define IN4_ADDR_HSIZE_SHIFT 8 +#define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT) + static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE]; static DEFINE_SPINLOCK(inet_addr_hash_lock);
-static inline unsigned int inet_addr_hash(struct net *net, __be32 addr) +static u32 inet_addr_hash(struct net *net, __be32 addr) { - u32 val = (__force u32) addr ^ hash_ptr(net, 8); + u32 val = (__force u32) addr ^ net_hash_mix(net);
- return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) & - (IN4_ADDR_HSIZE - 1)); + return hash_32(val, IN4_ADDR_HSIZE_SHIFT); }
static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa) { - unsigned int hash = inet_addr_hash(net, ifa->ifa_local); + u32 hash = inet_addr_hash(net, ifa->ifa_local);
spin_lock(&inet_addr_hash_lock); hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]); @@@ -133,18 -136,18 +133,18 @@@ static void inet_hash_remove(struct in_ */ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref) { - unsigned int hash = inet_addr_hash(net, addr); + u32 hash = inet_addr_hash(net, addr); struct net_device *result = NULL; struct in_ifaddr *ifa; struct hlist_node *node;
rcu_read_lock(); hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) { - struct net_device *dev = ifa->ifa_dev->dev; - - if (!net_eq(dev_net(dev), net)) - continue; if (ifa->ifa_local == addr) { + struct net_device *dev = ifa->ifa_dev->dev; + + if (!net_eq(dev_net(dev), net)) + continue; result = dev; break; } @@@ -179,10 -182,10 +179,10 @@@ static void inet_del_ifa(struct in_devi static void devinet_sysctl_register(struct in_device *idev); static void devinet_sysctl_unregister(struct in_device *idev); #else -static inline void devinet_sysctl_register(struct in_device *idev) +static void devinet_sysctl_register(struct in_device *idev) { } -static inline void devinet_sysctl_unregister(struct in_device *idev) +static void devinet_sysctl_unregister(struct in_device *idev) { } #endif @@@ -202,7 -205,7 +202,7 @@@ static void inet_rcu_free_ifa(struct rc kfree(ifa); }
-static inline void inet_free_ifa(struct in_ifaddr *ifa) +static void inet_free_ifa(struct in_ifaddr *ifa) { call_rcu(&ifa->rcu_head, inet_rcu_free_ifa); } @@@ -311,7 -314,7 +311,7 @@@ int inet_addr_onlink(struct in_device * }
static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, - int destroy, struct nlmsghdr *nlh, u32 pid) + int destroy, struct nlmsghdr *nlh, u32 portid) { struct in_ifaddr *promote = NULL; struct in_ifaddr *ifa, *ifa1 = *ifap; @@@ -345,7 -348,7 +345,7 @@@ inet_hash_remove(ifa); *ifap1 = ifa->ifa_next;
- rtmsg_ifa(RTM_DELADDR, ifa, nlh, pid); + rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa); inet_free_ifa(ifa); @@@ -382,7 -385,7 +382,7 @@@ is valid, it will try to restore deleted routes... Grr. So that, this order is correct. */ - rtmsg_ifa(RTM_DELADDR, ifa1, nlh, pid); + rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
if (promote) { @@@ -395,7 -398,7 +395,7 @@@ }
promote->ifa_flags &= ~IFA_F_SECONDARY; - rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid); + rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, promote); for (ifa = next_sec; ifa; ifa = ifa->ifa_next) { @@@ -417,7 -420,7 +417,7 @@@ static void inet_del_ifa(struct in_devi }
static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, - u32 pid) + u32 portid) { struct in_device *in_dev = ifa->ifa_dev; struct in_ifaddr *ifa1, **ifap, **last_primary; @@@ -464,7 -467,7 +464,7 @@@ /* Send message first, then call notifier. Notifier will trigger FIB update, so that listeners of netlink will know about new ifaddr */ - rtmsg_ifa(RTM_NEWADDR, ifa, nlh, pid); + rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
return 0; @@@ -563,7 -566,7 +563,7 @@@ static int inet_rtm_deladdr(struct sk_b !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa))) continue;
- __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).pid); + __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid); return 0; }
@@@ -649,14 -652,14 +649,14 @@@ static int inet_rtm_newaddr(struct sk_b if (IS_ERR(ifa)) return PTR_ERR(ifa);
- return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).pid); + return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid); }
/* * Determine a default network mask, based on the IP address. */
-static inline int inet_abc_len(__be32 addr) +static int inet_abc_len(__be32 addr) { int rc = -1; /* Something else, probably a multicast. */
@@@ -722,7 -725,7 +722,7 @@@ int devinet_ioctl(struct net *net, unsi break;
case SIOCSIFFLAGS: - ret = -EACCES; + ret = -EPERM; if (!capable(CAP_NET_ADMIN)) goto out; break; @@@ -730,7 -733,7 +730,7 @@@ case SIOCSIFBRDADDR: /* Set the broadcast address */ case SIOCSIFDSTADDR: /* Set the destination address */ case SIOCSIFNETMASK: /* Set the netmask for the interface */ - ret = -EACCES; + ret = -EPERM; if (!capable(CAP_NET_ADMIN)) goto out; ret = -EINVAL; @@@ -1121,7 -1124,7 +1121,7 @@@ skip } }
-static inline bool inetdev_valid_mtu(unsigned int mtu) +static bool inetdev_valid_mtu(unsigned int mtu) { return mtu >= 68; } @@@ -1236,7 -1239,7 +1236,7 @@@ static struct notifier_block ip_netdev_ .notifier_call = inetdev_event, };
-static inline size_t inet_nlmsg_size(void) +static size_t inet_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + nla_total_size(4) /* IFA_ADDRESS */ @@@ -1246,12 -1249,12 +1246,12 @@@ }
static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, - u32 pid, u32 seq, int event, unsigned int flags) + u32 portid, u32 seq, int event, unsigned int flags) { struct ifaddrmsg *ifm; struct nlmsghdr *nlh;
- nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); + nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags); if (nlh == NULL) return -EMSGSIZE;
@@@ -1313,7 -1316,7 +1313,7 @@@ static int inet_dump_ifaddr(struct sk_b if (ip_idx < s_ip_idx) continue; if (inet_fill_ifaddr(skb, ifa, - NETLINK_CB(cb->skb).pid, + NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWADDR, NLM_F_MULTI) <= 0) { rcu_read_unlock(); @@@ -1335,7 -1338,7 +1335,7 @@@ done }
static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, - u32 pid) + u32 portid) { struct sk_buff *skb; u32 seq = nlh ? nlh->nlmsg_seq : 0; @@@ -1347,14 -1350,14 +1347,14 @@@ if (skb == NULL) goto errout;
- err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0); + err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0); if (err < 0) { /* -EMSGSIZE implies BUG in inet_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } - rtnl_notify(skb, net, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL); + rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL); return; errout: if (err < 0) diff --combined net/ipv4/raw.c index f242578,d23c657..73d1e4d --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@@ -131,18 -131,20 +131,20 @@@ found * 0 - deliver * 1 - block */ - static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb) + static int icmp_filter(const struct sock *sk, const struct sk_buff *skb) { - int type; + struct icmphdr _hdr; + const struct icmphdr *hdr;
- if (!pskb_may_pull(skb, sizeof(struct icmphdr))) + hdr = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_hdr), &_hdr); + if (!hdr) return 1;
- type = icmp_hdr(skb)->type; - if (type < 32) { + if (hdr->type < 32) { __u32 data = raw_sk(sk)->filter.data;
- return ((1 << type) & data) != 0; + return ((1U << hdr->type) & data) != 0; }
/* Do not block unknown ICMP types */ @@@ -992,9 -994,7 +994,9 @@@ static void raw_sock_seq_show(struct se i, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), - 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), + 0, 0L, 0, + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), + 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); }
diff --combined net/ipv4/route.c index 940f4f4,fd9af60..ff62206 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@@ -202,11 -202,6 +202,6 @@@ EXPORT_SYMBOL(ip_tos2prio) static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
- static inline int rt_genid(struct net *net) - { - return atomic_read(&net->ipv4.rt_genid); - } - #ifdef CONFIG_PROC_FS static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) { @@@ -449,7 -444,7 +444,7 @@@ static inline bool rt_is_expired(const
void rt_cache_flush(struct net *net) { - atomic_inc(&net->ipv4.rt_genid); + rt_genid_bump(net); }
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, @@@ -1116,7 -1111,10 +1111,7 @@@ static unsigned int ipv4_mtu(const stru const struct rtable *rt = (const struct rtable *) dst; unsigned int mtu = rt->rt_pmtu;
- if (mtu && time_after_eq(jiffies, rt->dst.expires)) - mtu = 0; - - if (!mtu) + if (!mtu || time_after_eq(jiffies, rt->dst.expires)) mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu && rt_is_output_route(rt)) @@@ -1568,14 -1566,11 +1563,14 @@@ static int ip_route_input_slow(struct s if (ipv4_is_zeronet(daddr)) goto martian_destination;
- if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) { - if (ipv4_is_loopback(daddr)) + /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(), + * and call it once if daddr or/and saddr are loopback addresses + */ + if (ipv4_is_loopback(daddr)) { + if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) goto martian_destination; - - if (ipv4_is_loopback(saddr)) + } else if (ipv4_is_loopback(saddr)) { + if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) goto martian_source; }
@@@ -1600,7 -1595,7 +1595,7 @@@
if (res.type == RTN_LOCAL) { err = fib_validate_source(skb, saddr, daddr, tos, - net->loopback_dev->ifindex, + LOOPBACK_IFINDEX, dev, in_dev, &itag); if (err < 0) goto martian_source_keep_err; @@@ -1876,7 -1871,7 +1871,7 @@@ struct rtable *__ip_route_output_key(st
orig_oif = fl4->flowi4_oif;
- fl4->flowi4_iif = net->loopback_dev->ifindex; + fl4->flowi4_iif = LOOPBACK_IFINDEX; fl4->flowi4_tos = tos & IPTOS_RT_MASK; fl4->flowi4_scope = ((tos & RTO_ONLINK) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); @@@ -1965,7 -1960,7 +1960,7 @@@ if (!fl4->daddr) fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK); dev_out = net->loopback_dev; - fl4->flowi4_oif = net->loopback_dev->ifindex; + fl4->flowi4_oif = LOOPBACK_IFINDEX; res.type = RTN_LOCAL; flags |= RTCF_LOCAL; goto make_route; @@@ -2136,7 -2131,7 +2131,7 @@@ struct rtable *ip_route_output_flow(str EXPORT_SYMBOL_GPL(ip_route_output_flow);
static int rt_fill_info(struct net *net, __be32 dst, __be32 src, - struct flowi4 *fl4, struct sk_buff *skb, u32 pid, + struct flowi4 *fl4, struct sk_buff *skb, u32 portid, u32 seq, int event, int nowait, unsigned int flags) { struct rtable *rt = skb_rtable(skb); @@@ -2146,7 -2141,7 +2141,7 @@@ u32 error; u32 metrics[RTAX_MAX];
- nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); + nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); if (nlh == NULL) return -EMSGSIZE;
@@@ -2306,12 -2301,12 +2301,12 @@@ static int inet_rtm_getroute(struct sk_ rt->rt_flags |= RTCF_NOTIFY;
err = rt_fill_info(net, dst, src, &fl4, skb, - NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, + NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); if (err <= 0) goto errout_free;
- err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); + err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); errout: return err;
@@@ -2506,7 -2501,7 +2501,7 @@@ static __net_initdata struct pernet_ope
static __net_init int rt_genid_init(struct net *net) { - atomic_set(&net->ipv4.rt_genid, 0); + atomic_set(&net->rt_genid, 0); get_random_bytes(&net->ipv4.dev_addr_genid, sizeof(net->ipv4.dev_addr_genid)); return 0; diff --combined net/ipv4/tcp.c index 72ea475,5f64193..f32c02e --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -486,9 -486,8 +486,9 @@@ unsigned int tcp_poll(struct file *file if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLIN | POLLRDNORM | POLLRDHUP;
- /* Connected? */ - if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { + /* Connected or passive Fast Open socket? */ + if (sk->sk_state != TCP_SYN_SENT && + (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) { int target = sock_rcvlowat(sk, 0, INT_MAX);
if (tp->urg_seq == tp->copied_seq && @@@ -841,15 -840,10 +841,15 @@@ static ssize_t do_tcp_sendpages(struct ssize_t copied; long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
- /* Wait for a connection to finish. */ - if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) + /* Wait for a connection to finish. One exception is TCP Fast Open + * (passive side) where data is allowed to be sent before a connection + * is fully established. + */ + if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && + !tcp_passive_fastopen(sk)) { if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_err; + }
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@@ -1048,15 -1042,10 +1048,15 @@@ int tcp_sendmsg(struct kiocb *iocb, str
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
- /* Wait for a connection to finish. */ - if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) + /* Wait for a connection to finish. One exception is TCP Fast Open + * (passive side) where data is allowed to be sent before a connection + * is fully established. + */ + if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && + !tcp_passive_fastopen(sk)) { if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto do_error; + }
if (unlikely(tp->repair)) { if (tp->repair_queue == TCP_RECV_QUEUE) { @@@ -1150,43 -1139,78 +1150,43 @@@ new_segment if (err) goto do_fault; } else { - bool merge = false; + bool merge = true; int i = skb_shinfo(skb)->nr_frags; - struct page *page = sk->sk_sndmsg_page; - int off; - - if (page && page_count(page) == 1) - sk->sk_sndmsg_off = 0; - - off = sk->sk_sndmsg_off; - - if (skb_can_coalesce(skb, i, page, off) && - off != PAGE_SIZE) { - /* We can extend the last page - * fragment. */ - merge = true; - } else if (i == MAX_SKB_FRAGS || !sg) { - /* Need to add new fragment and cannot - * do this because interface is non-SG, - * or because all the page slots are - * busy. */ - tcp_mark_push(tp, skb); - goto new_segment; - } else if (page) { - if (off == PAGE_SIZE) { - put_page(page); - sk->sk_sndmsg_page = page = NULL; - off = 0; + struct page_frag *pfrag = sk_page_frag(sk); + + if (!sk_page_frag_refill(sk, pfrag)) + goto wait_for_memory; + + if (!skb_can_coalesce(skb, i, pfrag->page, + pfrag->offset)) { + if (i == MAX_SKB_FRAGS || !sg) { + tcp_mark_push(tp, skb); + goto new_segment; } - } else - off = 0; + merge = false; + }
- if (copy > PAGE_SIZE - off) - copy = PAGE_SIZE - off; + copy = min_t(int, copy, pfrag->size - pfrag->offset);
if (!sk_wmem_schedule(sk, copy)) goto wait_for_memory;
- if (!page) { - /* Allocate new cache page. */ - if (!(page = sk_stream_alloc_page(sk))) - goto wait_for_memory; - } - - /* Time to copy data. We are close to - * the end! */ err = skb_copy_to_page_nocache(sk, from, skb, - page, off, copy); - if (err) { - /* If this page was new, give it to the - * socket so it does not get leaked. - */ - if (!sk->sk_sndmsg_page) { - sk->sk_sndmsg_page = page; - sk->sk_sndmsg_off = 0; - } + pfrag->page, + pfrag->offset, + copy); + if (err) goto do_error; - }
/* Update the skb. */ if (merge) { skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); } else { - skb_fill_page_desc(skb, i, page, off, copy); - if (sk->sk_sndmsg_page) { - get_page(page); - } else if (off + copy < PAGE_SIZE) { - get_page(page); - sk->sk_sndmsg_page = page; - } + skb_fill_page_desc(skb, i, pfrag->page, + pfrag->offset, copy); + get_page(pfrag->page); } - - sk->sk_sndmsg_off = off + copy; + pfrag->offset += copy; }
if (!copied) @@@ -1738,8 -1762,14 +1738,14 @@@ int tcp_recvmsg(struct kiocb *iocb, str }
#ifdef CONFIG_NET_DMA - if (tp->ucopy.dma_chan) - dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); + if (tp->ucopy.dma_chan) { + if (tp->rcv_wnd == 0 && + !skb_queue_empty(&sk->sk_async_wait_queue)) { + tcp_service_net_dma(sk, true); + tcp_cleanup_rbuf(sk, copied); + } else + dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); + } #endif if (copied >= target) { /* Do not sleep, just process backlog. */ @@@ -2120,10 -2150,6 +2126,10 @@@ void tcp_close(struct sock *sk, long ti * they look as CLOSING or LAST_ACK for Linux) * Probably, I missed some more holelets. * --ANK + * XXX (TFO) - To start off we don't support SYN+ACK+FIN + * in a single packet! (May consider it later but will + * probably need API support or TCP_CORK SYN-ACK until + * data is written and socket is closed.) */ tcp_send_fin(sk); } @@@ -2195,16 -2221,8 +2201,16 @@@ adjudge_to_death } }
- if (sk->sk_state == TCP_CLOSE) + if (sk->sk_state == TCP_CLOSE) { + struct request_sock *req = tcp_sk(sk)->fastopen_rsk; + /* We could get here with a non-NULL req if the socket is + * aborted (e.g., closed with unread data) before 3WHS + * finishes. + */ + if (req != NULL) + reqsk_fastopen_remove(sk, req, false); inet_csk_destroy_sock(sk); + } /* Otherwise, socket is reprieved until protocol close. */
out: @@@ -2290,13 -2308,6 +2296,13 @@@ int tcp_disconnect(struct sock *sk, in } EXPORT_SYMBOL(tcp_disconnect);
+void tcp_sock_destruct(struct sock *sk) +{ + inet_sock_destruct(sk); + + kfree(inet_csk(sk)->icsk_accept_queue.fastopenq); +} + static inline bool tcp_can_repair_sock(const struct sock *sk) { return capable(CAP_NET_ADMIN) && @@@ -2320,10 -2331,17 +2326,17 @@@ static int tcp_repair_options_est(struc tp->rx_opt.mss_clamp = opt.opt_val; break; case TCPOPT_WINDOW: - if (opt.opt_val > 14) - return -EFBIG; + { + u16 snd_wscale = opt.opt_val & 0xFFFF; + u16 rcv_wscale = opt.opt_val >> 16; + + if (snd_wscale > 14 || rcv_wscale > 14) + return -EFBIG;
- tp->rx_opt.snd_wscale = opt.opt_val; + tp->rx_opt.snd_wscale = snd_wscale; + tp->rx_opt.rcv_wscale = rcv_wscale; + tp->rx_opt.wscale_ok = 1; + } break; case TCPOPT_SACK_PERM: if (opt.opt_val != 0) @@@ -2683,14 -2701,6 +2696,14 @@@ static int do_tcp_setsockopt(struct soc else icsk->icsk_user_timeout = msecs_to_jiffies(val); break; + + case TCP_FASTOPEN: + if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | + TCPF_LISTEN))) + err = fastopen_init_queue(sk, val); + else + err = -EINVAL; + break; default: err = -ENOPROTOOPT; break; @@@ -3504,15 -3514,11 +3517,15 @@@ EXPORT_SYMBOL(tcp_cookie_generator)
void tcp_done(struct sock *sk) { + struct request_sock *req = tcp_sk(sk)->fastopen_rsk; + if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
tcp_set_state(sk, TCP_CLOSE); tcp_clear_xmit_timers(sk); + if (req != NULL) + reqsk_fastopen_remove(sk, req, false);
sk->sk_shutdown = SHUTDOWN_MASK;
diff --combined net/ipv4/tcp_input.c index e037697,d377f48..432c366 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@@ -237,11 -237,7 +237,11 @@@ static inline void TCP_ECN_check_ce(str tcp_enter_quickack_mode((struct sock *)tp); break; case INET_ECN_CE: - tp->ecn_flags |= TCP_ECN_DEMAND_CWR; + if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { + /* Better not delay acks, sender can have a very low cwnd */ + tcp_enter_quickack_mode((struct sock *)tp); + tp->ecn_flags |= TCP_ECN_DEMAND_CWR; + } /* fallinto */ default: tp->ecn_flags |= TCP_ECN_SEEN; @@@ -378,7 -374,7 +378,7 @@@ static void tcp_fixup_rcvbuf(struct soc /* 4. Try to fixup all. It is made immediately after connection enters * established state. */ -static void tcp_init_buffer_space(struct sock *sk) +void tcp_init_buffer_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int maxwin; @@@ -743,6 -739,29 +743,6 @@@ __u32 tcp_init_cwnd(const struct tcp_so return min_t(__u32, cwnd, tp->snd_cwnd_clamp); }
-/* Set slow start threshold and cwnd not falling to slow start */ -void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) -{ - struct tcp_sock *tp = tcp_sk(sk); - const struct inet_connection_sock *icsk = inet_csk(sk); - - tp->prior_ssthresh = 0; - tp->bytes_acked = 0; - if (icsk->icsk_ca_state < TCP_CA_CWR) { - tp->undo_marker = 0; - if (set_ssthresh) - tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); - tp->snd_cwnd = min(tp->snd_cwnd, - tcp_packets_in_flight(tp) + 1U); - tp->snd_cwnd_cnt = 0; - tp->high_seq = tp->snd_nxt; - tp->snd_cwnd_stamp = tcp_time_stamp; - TCP_ECN_queue_cwr(tp); - - tcp_set_ca_state(sk, TCP_CA_CWR); - } -} - /* * Packet counting of FACK is based on in-order assumptions, therefore TCP * disables it when reordering is detected @@@ -2470,6 -2489,35 +2470,6 @@@ static inline void tcp_moderate_cwnd(st tp->snd_cwnd_stamp = tcp_time_stamp; }
-/* Lower bound on congestion window is slow start threshold - * unless congestion avoidance choice decides to overide it. - */ -static inline u32 tcp_cwnd_min(const struct sock *sk) -{ - const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; - - return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh; -} - -/* Decrease cwnd each second ack. */ -static void tcp_cwnd_down(struct sock *sk, int flag) -{ - struct tcp_sock *tp = tcp_sk(sk); - int decr = tp->snd_cwnd_cnt + 1; - - if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) || - (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) { - tp->snd_cwnd_cnt = decr & 1; - decr >>= 1; - - if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) - tp->snd_cwnd -= decr; - - tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); - tp->snd_cwnd_stamp = tcp_time_stamp; - } -} - /* Nothing was retransmitted or returned timestamp is less * than timestamp of the first retransmission. */ @@@ -2671,80 -2719,24 +2671,80 @@@ static bool tcp_try_undo_loss(struct so return false; }
-static inline void tcp_complete_cwr(struct sock *sk) +/* The cwnd reduction in CWR and Recovery use the PRR algorithm + * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction... + * It computes the number of packets to send (sndcnt) based on packets newly + * delivered: + * 1) If the packets in flight is larger than ssthresh, PRR spreads the + * cwnd reductions across a full RTT. + * 2) If packets in flight is lower than ssthresh (such as due to excess + * losses and/or application stalls), do not perform any further cwnd + * reductions, but instead slow start up to ssthresh. + */ +static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh) { struct tcp_sock *tp = tcp_sk(sk);
- /* Do not moderate cwnd if it's already undone in cwr or recovery. */ - if (tp->undo_marker) { - if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) { - tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); - tp->snd_cwnd_stamp = tcp_time_stamp; - } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) { - /* PRR algorithm. */ - tp->snd_cwnd = tp->snd_ssthresh; - tp->snd_cwnd_stamp = tcp_time_stamp; - } + tp->high_seq = tp->snd_nxt; + tp->bytes_acked = 0; + tp->snd_cwnd_cnt = 0; + tp->prior_cwnd = tp->snd_cwnd; + tp->prr_delivered = 0; + tp->prr_out = 0; + if (set_ssthresh) + tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); + TCP_ECN_queue_cwr(tp); +} + +static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, + int fast_rexmit) +{ + struct tcp_sock *tp = tcp_sk(sk); + int sndcnt = 0; + int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); + + tp->prr_delivered += newly_acked_sacked; + if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { + u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + + tp->prior_cwnd - 1; + sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; + } else { + sndcnt = min_t(int, delta, + max_t(int, tp->prr_delivered - tp->prr_out, + newly_acked_sacked) + 1); + } + + sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); + tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; +} + +static inline void tcp_end_cwnd_reduction(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + + /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ + if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || + (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { + tp->snd_cwnd = tp->snd_ssthresh; + tp->snd_cwnd_stamp = tcp_time_stamp; } tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); }
+/* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */ +void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) +{ + struct tcp_sock *tp = tcp_sk(sk); + + tp->prior_ssthresh = 0; + tp->bytes_acked = 0; + if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { + tp->undo_marker = 0; + tcp_init_cwnd_reduction(sk, set_ssthresh); + tcp_set_ca_state(sk, TCP_CA_CWR); + } +} + static void tcp_try_keep_open(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); @@@ -2759,7 -2751,7 +2759,7 @@@ } }
-static void tcp_try_to_open(struct sock *sk, int flag) +static void tcp_try_to_open(struct sock *sk, int flag, int newly_acked_sacked) { struct tcp_sock *tp = tcp_sk(sk);
@@@ -2776,7 -2768,7 +2776,7 @@@ if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open) tcp_moderate_cwnd(tp); } else { - tcp_cwnd_down(sk, flag); + tcp_cwnd_reduction(sk, newly_acked_sacked, 0); } }
@@@ -2858,6 -2850,38 +2858,6 @@@ void tcp_simple_retransmit(struct sock } EXPORT_SYMBOL(tcp_simple_retransmit);
-/* This function implements the PRR algorithm, specifcally the PRR-SSRB - * (proportional rate reduction with slow start reduction bound) as described in - * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt. - * It computes the number of packets to send (sndcnt) based on packets newly - * delivered: - * 1) If the packets in flight is larger than ssthresh, PRR spreads the - * cwnd reductions across a full RTT. - * 2) If packets in flight is lower than ssthresh (such as due to excess - * losses and/or application stalls), do not perform any further cwnd - * reductions, but instead slow start up to ssthresh. - */ -static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, - int fast_rexmit, int flag) -{ - struct tcp_sock *tp = tcp_sk(sk); - int sndcnt = 0; - int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); - - if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { - u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + - tp->prior_cwnd - 1; - sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; - } else { - sndcnt = min_t(int, delta, - max_t(int, tp->prr_delivered - tp->prr_out, - newly_acked_sacked) + 1); - } - - sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); - tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; -} - static void tcp_enter_recovery(struct sock *sk, bool ece_ack) { struct tcp_sock *tp = tcp_sk(sk); @@@ -2870,6 -2894,7 +2870,6 @@@
NET_INC_STATS_BH(sock_net(sk), mib_idx);
- tp->high_seq = tp->snd_nxt; tp->prior_ssthresh = 0; tp->undo_marker = tp->snd_una; tp->undo_retrans = tp->retrans_out; @@@ -2877,8 -2902,15 +2877,8 @@@ if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { if (!ece_ack) tp->prior_ssthresh = tcp_current_ssthresh(sk); - tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); - TCP_ECN_queue_cwr(tp); + tcp_init_cwnd_reduction(sk, true); } - - tp->bytes_acked = 0; - tp->snd_cwnd_cnt = 0; - tp->prior_cwnd = tp->snd_cwnd; - tp->prr_delivered = 0; - tp->prr_out = 0; tcp_set_ca_state(sk, TCP_CA_Recovery); }
@@@ -2938,7 -2970,7 +2938,7 @@@ static void tcp_fastretrans_alert(struc /* CWR is to be held something *above* high_seq * is ACKed for CWR bit to reach receiver. */ if (tp->snd_una != tp->high_seq) { - tcp_complete_cwr(sk); + tcp_end_cwnd_reduction(sk); tcp_set_ca_state(sk, TCP_CA_Open); } break; @@@ -2948,7 -2980,7 +2948,7 @@@ tcp_reset_reno_sack(tp); if (tcp_try_undo_recovery(sk)) return; - tcp_complete_cwr(sk); + tcp_end_cwnd_reduction(sk); break; } } @@@ -2989,7 -3021,7 +2989,7 @@@ tcp_try_undo_dsack(sk);
if (!tcp_time_to_recover(sk, flag)) { - tcp_try_to_open(sk, flag); + tcp_try_to_open(sk, flag, newly_acked_sacked); return; }
@@@ -3011,7 -3043,8 +3011,7 @@@
if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) tcp_update_scoreboard(sk, fast_rexmit); - tp->prr_delivered += newly_acked_sacked; - tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag); + tcp_cwnd_reduction(sk, newly_acked_sacked, fast_rexmit); tcp_xmit_retransmit_queue(sk); }
@@@ -3090,12 -3123,6 +3090,12 @@@ void tcp_rearm_rto(struct sock *sk { struct tcp_sock *tp = tcp_sk(sk);
+ /* If the retrans timer is currently being used by Fast Open + * for SYN-ACK retrans purpose, stay put. + */ + if (tp->fastopen_rsk) + return; + if (!tp->packets_out) { inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); } else { @@@ -3357,7 -3384,7 +3357,7 @@@ static inline bool tcp_may_raise_cwnd(c { const struct tcp_sock *tp = tcp_sk(sk); return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && - !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR)); + !tcp_in_cwnd_reduction(sk); }
/* Check that window update is acceptable. @@@ -3425,9 -3452,9 +3425,9 @@@ static void tcp_conservative_spur_to_re }
/* A conservative spurious RTO response algorithm: reduce cwnd using - * rate halving and continue in congestion avoidance. + * PRR and continue in congestion avoidance. */ -static void tcp_ratehalving_spur_to_response(struct sock *sk) +static void tcp_cwr_spur_to_response(struct sock *sk) { tcp_enter_cwr(sk, 0); } @@@ -3435,7 -3462,7 +3435,7 @@@ static void tcp_undo_spur_to_response(struct sock *sk, int flag) { if (flag & FLAG_ECE) - tcp_ratehalving_spur_to_response(sk); + tcp_cwr_spur_to_response(sk); else tcp_undo_cwr(sk, true); } @@@ -3542,7 -3569,7 +3542,7 @@@ static bool tcp_process_frto(struct soc tcp_conservative_spur_to_response(tp); break; default: - tcp_ratehalving_spur_to_response(sk); + tcp_cwr_spur_to_response(sk); break; } tp->frto_counter = 0; @@@ -4007,7 -4034,7 +4007,7 @@@ static inline bool tcp_sequence(const s }
/* When we get a reset we do this. */ -static void tcp_reset(struct sock *sk) +void tcp_reset(struct sock *sk) { /* We want the right error as BSD sees it (and indeed as we do). */ switch (sk->sk_state) { @@@ -4634,7 -4661,7 +4634,7 @@@ queue_and_out
if (eaten > 0) kfree_skb_partial(skb, fragstolen); - else if (!sock_flag(sk, SOCK_DEAD)) + if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); return; } @@@ -5529,8 -5556,7 +5529,7 @@@ no_ack #endif if (eaten) kfree_skb_partial(skb, fragstolen); - else - sk->sk_data_ready(sk, 0); + sk->sk_data_ready(sk, 0); return 0; } } @@@ -5714,7 -5740,7 +5713,7 @@@ static int tcp_rcv_synsent_state_proces
TCP_ECN_rcv_synack(tp, th);
- tp->snd_wl1 = TCP_SKB_CB(skb)->seq; + tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); tcp_ack(sk, skb, FLAG_SLOWPATH);
/* Ok.. it's good. Set up sequence numbers and @@@ -5727,6 -5753,7 +5726,6 @@@ * never scaled. */ tp->snd_wnd = ntohs(th->window); - tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
if (!tp->rx_opt.wscale_ok) { tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; @@@ -5864,9 -5891,7 +5863,9 @@@ discard tcp_send_synack(sk); #if 0 /* Note, we could accept data and URG from this segment. - * There are no obstacles to make this. + * There are no obstacles to make this (except that we must + * either change tcp_recvmsg() to prevent it from returning data + * before 3WHS completes per RFC793, or employ TCP Fast Open). * * However, if we ignore data in ACKless segments sometimes, * we have no reasons to accept it sometimes. @@@ -5906,7 -5931,6 +5905,7 @@@ int tcp_rcv_state_process(struct sock * { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); + struct request_sock *req; int queued = 0;
tp->rx_opt.saw_tstamp = 0; @@@ -5962,14 -5986,6 +5961,14 @@@ return 0; }
+ req = tp->fastopen_rsk; + if (req != NULL) { + BUG_ON(sk->sk_state != TCP_SYN_RECV && + sk->sk_state != TCP_FIN_WAIT1); + + if (tcp_check_req(sk, skb, req, NULL, true) == NULL) + goto discard; + } if (!tcp_validate_incoming(sk, skb, th, 0)) return 0;
@@@ -5980,25 -5996,7 +5979,25 @@@ switch (sk->sk_state) { case TCP_SYN_RECV: if (acceptable) { - tp->copied_seq = tp->rcv_nxt; + /* Once we leave TCP_SYN_RECV, we no longer + * need req so release it. + */ + if (req) { + tcp_synack_rtt_meas(sk, req); + tp->total_retrans = req->retrans; + + reqsk_fastopen_remove(sk, req, false); + } else { + /* Make sure socket is routed, for + * correct metrics. + */ + icsk->icsk_af_ops->rebuild_header(sk); + tcp_init_congestion_control(sk); + + tcp_mtup_init(sk); + tcp_init_buffer_space(sk); + tp->copied_seq = tp->rcv_nxt; + } smp_mb(); tcp_set_state(sk, TCP_ESTABLISHED); sk->sk_state_change(sk); @@@ -6020,27 -6018,23 +6019,27 @@@ if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
- /* Make sure socket is routed, for - * correct metrics. - */ - icsk->icsk_af_ops->rebuild_header(sk); - - tcp_init_metrics(sk); - - tcp_init_congestion_control(sk); + if (req) { + /* Re-arm the timer because data may + * have been sent out. This is similar + * to the regular data transmission case + * when new data has just been ack'ed. + * + * (TFO) - we could try to be more + * aggressive and retranmitting any data + * sooner based on when they were sent + * out. + */ + tcp_rearm_rto(sk); + } else + tcp_init_metrics(sk);
/* Prevent spurious tcp_cwnd_restart() on * first data packet. */ tp->lsndtime = tcp_time_stamp;
- tcp_mtup_init(sk); tcp_initialize_rcv_mss(sk); - tcp_init_buffer_space(sk); tcp_fast_path_on(tp); } else { return 1; @@@ -6048,16 -6042,6 +6047,16 @@@ break;
case TCP_FIN_WAIT1: + /* If we enter the TCP_FIN_WAIT1 state and we are a + * Fast Open socket and this is the first acceptable + * ACK we have received, this would have acknowledged + * our SYNACK so stop the SYNACK timer. + */ + if (acceptable && req != NULL) { + /* We no longer need the request sock. */ + reqsk_fastopen_remove(sk, req, false); + tcp_rearm_rto(sk); + } if (tp->snd_una == tp->write_seq) { struct dst_entry *dst;
diff --combined net/ipv6/raw.c index 7af88ef,4a5f78b..d8e95c7 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@@ -107,21 -107,20 +107,20 @@@ found * 0 - deliver * 1 - block */ - static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb) + static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb) { - struct icmp6hdr *icmph; - struct raw6_sock *rp = raw6_sk(sk); + struct icmp6hdr *_hdr; + const struct icmp6hdr *hdr;
- if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) { - __u32 *data = &rp->filter.data[0]; - int bit_nr; + hdr = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_hdr), &_hdr); + if (hdr) { + const __u32 *data = &raw6_sk(sk)->filter.data[0]; + unsigned int type = hdr->icmp6_type;
- icmph = (struct icmp6hdr *) skb->data; - bit_nr = icmph->icmp6_type; - - return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0; + return (data[type >> 5] & (1U << (type & 31))) != 0; } - return 0; + return 1; }
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) @@@ -1251,8 -1250,7 +1250,8 @@@ static void raw6_sock_seq_show(struct s sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, - sock_i_uid(sp), 0, + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), + 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); } diff --combined net/ipv6/route.c index 0607ee3,854e401..d1ddbc6 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -222,11 -222,11 +222,11 @@@ static const u32 ip6_template_metrics[R [RTAX_HOPLIMIT - 1] = 255, };
-static struct rt6_info ip6_null_entry_template = { +static const struct rt6_info ip6_null_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, - .obsolete = -1, + .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -ENETUNREACH, .input = ip6_pkt_discard, .output = ip6_pkt_discard_out, @@@ -242,11 -242,11 +242,11 @@@ static int ip6_pkt_prohibit(struct sk_buff *skb); static int ip6_pkt_prohibit_out(struct sk_buff *skb);
-static struct rt6_info ip6_prohibit_entry_template = { +static const struct rt6_info ip6_prohibit_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, - .obsolete = -1, + .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EACCES, .input = ip6_pkt_prohibit, .output = ip6_pkt_prohibit_out, @@@ -257,11 -257,11 +257,11 @@@ .rt6i_ref = ATOMIC_INIT(1), };
-static struct rt6_info ip6_blk_hole_entry_template = { +static const struct rt6_info ip6_blk_hole_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, - .obsolete = -1, + .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EINVAL, .input = dst_discard, .output = dst_discard, @@@ -281,13 -281,14 +281,14 @@@ static inline struct rt6_info *ip6_dst_ struct fib6_table *table) { struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, - 0, DST_OBSOLETE_NONE, flags); + 0, DST_OBSOLETE_FORCE_CHK, flags);
if (rt) { struct dst_entry *dst = &rt->dst;
memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); + rt->rt6i_genid = rt_genid(net); } return rt; } @@@ -369,11 -370,15 +370,11 @@@ static void ip6_dst_ifdown(struct dst_e
static bool rt6_check_expired(const struct rt6_info *rt) { - struct rt6_info *ort = NULL; - if (rt->rt6i_flags & RTF_EXPIRES) { if (time_after(jiffies, rt->dst.expires)) return true; } else if (rt->dst.from) { - ort = (struct rt6_info *) rt->dst.from; - return (ort->rt6i_flags & RTF_EXPIRES) && - time_after(jiffies, ort->dst.expires); + return rt6_check_expired((struct rt6_info *) rt->dst.from); } return false; } @@@ -447,9 -452,10 +448,9 @@@ static void rt6_probe(struct rt6_info * * Router Reachability Probe MUST be rate-limited * to no more than one per minute. */ - rcu_read_lock(); neigh = rt ? rt->n : NULL; if (!neigh || (neigh->nud_state & NUD_VALID)) - goto out; + return; read_lock_bh(&neigh->lock); if (!(neigh->nud_state & NUD_VALID) && time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { @@@ -465,6 -471,8 +466,6 @@@ } else { read_unlock_bh(&neigh->lock); } -out: - rcu_read_unlock(); } #else static inline void rt6_probe(struct rt6_info *rt) @@@ -491,6 -499,7 +492,6 @@@ static inline int rt6_check_neigh(struc struct neighbour *neigh; int m;
- rcu_read_lock(); neigh = rt->n; if (rt->rt6i_flags & RTF_NONEXTHOP || !(rt->rt6i_flags & RTF_GATEWAY)) @@@ -508,6 -517,7 +509,6 @@@ read_unlock_bh(&neigh->lock); } else m = 0; - rcu_read_unlock(); return m; }
@@@ -956,7 -966,7 +957,7 @@@ struct dst_entry * ip6_route_output(str { int flags = 0;
- fl6->flowi6_iif = net->loopback_dev->ifindex; + fl6->flowi6_iif = LOOPBACK_IFINDEX;
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) flags |= RT6_LOOKUP_F_IFACE; @@@ -1022,6 -1032,13 +1023,13 @@@ static struct dst_entry *ip6_dst_check(
rt = (struct rt6_info *) dst;
+ /* All IPV6 dsts are created with ->obsolete set to the value + * DST_OBSOLETE_FORCE_CHK which forces validation calls down + * into this function always. + */ + if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev))) + return NULL; + if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { if (rt->rt6i_peer_genid != rt6_peer_genid()) { if (!rt6_has_peer(rt)) @@@ -1388,8 -1405,6 +1396,6 @@@ int ip6_route_add(struct fib6_config *c goto out; }
- rt->dst.obsolete = -1; - if (cfg->fc_flags & RTF_EXPIRES) rt6_set_expires(rt, jiffies + clock_t_to_jiffies(cfg->fc_expires)); @@@ -1454,21 -1469,8 +1460,21 @@@ } rt->dst.output = ip6_pkt_discard_out; rt->dst.input = ip6_pkt_discard; - rt->dst.error = -ENETUNREACH; rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; + switch (cfg->fc_type) { + case RTN_BLACKHOLE: + rt->dst.error = -EINVAL; + break; + case RTN_PROHIBIT: + rt->dst.error = -EACCES; + break; + case RTN_THROW: + rt->dst.error = -EAGAIN; + break; + default: + rt->dst.error = -ENETUNREACH; + break; + } goto install_route; }
@@@ -1833,7 -1835,7 +1839,7 @@@ static struct rt6_info *rt6_get_route_i if (!table) return NULL;
- write_lock_bh(&table->tb6_lock); + read_lock_bh(&table->tb6_lock); fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0); if (!fn) goto out; @@@ -1849,7 -1851,7 +1855,7 @@@ break; } out: - write_unlock_bh(&table->tb6_lock); + read_unlock_bh(&table->tb6_lock); return rt; }
@@@ -1865,7 -1867,7 +1871,7 @@@ static struct rt6_info *rt6_add_route_i .fc_dst_len = prefixlen, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | RTF_UP | RTF_PREF(pref), - .fc_nlinfo.pid = 0, + .fc_nlinfo.portid = 0, .fc_nlinfo.nlh = NULL, .fc_nlinfo.nl_net = net, }; @@@ -1892,7 -1894,7 +1898,7 @@@ struct rt6_info *rt6_get_dflt_router(co if (!table) return NULL;
- write_lock_bh(&table->tb6_lock); + read_lock_bh(&table->tb6_lock); for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) { if (dev == rt->dst.dev && ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && @@@ -1901,7 -1903,7 +1907,7 @@@ } if (rt) dst_hold(&rt->dst); - write_unlock_bh(&table->tb6_lock); + read_unlock_bh(&table->tb6_lock); return rt; }
@@@ -1915,7 -1917,7 +1921,7 @@@ struct rt6_info *rt6_add_dflt_router(co .fc_ifindex = dev->ifindex, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | RTF_UP | RTF_EXPIRES | RTF_PREF(pref), - .fc_nlinfo.pid = 0, + .fc_nlinfo.portid = 0, .fc_nlinfo.nlh = NULL, .fc_nlinfo.nl_net = dev_net(dev), }; @@@ -2084,7 -2086,6 +2090,6 @@@ struct rt6_info *addrconf_dst_alloc(str rt->dst.input = ip6_input; rt->dst.output = ip6_output; rt->rt6i_idev = idev; - rt->dst.obsolete = -1;
rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; if (anycast) @@@ -2265,18 -2266,14 +2270,18 @@@ static int rtm_to_fib6_config(struct sk cfg->fc_src_len = rtm->rtm_src_len; cfg->fc_flags = RTF_UP; cfg->fc_protocol = rtm->rtm_protocol; + cfg->fc_type = rtm->rtm_type;
- if (rtm->rtm_type == RTN_UNREACHABLE) + if (rtm->rtm_type == RTN_UNREACHABLE || + rtm->rtm_type == RTN_BLACKHOLE || + rtm->rtm_type == RTN_PROHIBIT || + rtm->rtm_type == RTN_THROW) cfg->fc_flags |= RTF_REJECT;
if (rtm->rtm_type == RTN_LOCAL) cfg->fc_flags |= RTF_LOCAL;
- cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid; + cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid; cfg->fc_nlinfo.nlh = nlh; cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
@@@ -2367,7 -2364,7 +2372,7 @@@ static inline size_t rt6_nlmsg_size(voi static int rt6_fill_node(struct net *net, struct sk_buff *skb, struct rt6_info *rt, struct in6_addr *dst, struct in6_addr *src, - int iif, int type, u32 pid, u32 seq, + int iif, int type, u32 portid, u32 seq, int prefix, int nowait, unsigned int flags) { struct rtmsg *rtm; @@@ -2383,7 -2380,7 +2388,7 @@@ } }
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags); + nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); if (!nlh) return -EMSGSIZE;
@@@ -2399,22 -2396,8 +2404,22 @@@ rtm->rtm_table = table; if (nla_put_u32(skb, RTA_TABLE, table)) goto nla_put_failure; - if (rt->rt6i_flags & RTF_REJECT) - rtm->rtm_type = RTN_UNREACHABLE; + if (rt->rt6i_flags & RTF_REJECT) { + switch (rt->dst.error) { + case -EINVAL: + rtm->rtm_type = RTN_BLACKHOLE; + break; + case -EACCES: + rtm->rtm_type = RTN_PROHIBIT; + break; + case -EAGAIN: + rtm->rtm_type = RTN_THROW; + break; + default: + rtm->rtm_type = RTN_UNREACHABLE; + break; + } + } else if (rt->rt6i_flags & RTF_LOCAL) rtm->rtm_type = RTN_LOCAL; else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) @@@ -2487,11 -2470,15 +2492,11 @@@ if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) goto nla_put_failure;
- rcu_read_lock(); n = rt->n; if (n) { - if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) { - rcu_read_unlock(); + if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) goto nla_put_failure; - } } - rcu_read_unlock();
if (rt->dst.dev && nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) @@@ -2524,7 -2511,7 +2529,7 @@@ int rt6_dump_route(struct rt6_info *rt
return rt6_fill_node(arg->net, arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, - NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq, + NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq, prefix, 0, NLM_F_MULTI); }
@@@ -2604,14 -2591,14 +2609,14 @@@ static int inet6_rtm_getroute(struct sk skb_dst_set(skb, &rt->dst);
err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, - RTM_NEWROUTE, NETLINK_CB(in_skb).pid, + RTM_NEWROUTE, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0, 0, 0); if (err < 0) { kfree_skb(skb); goto errout; }
- err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); + err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); errout: return err; } @@@ -2631,14 -2618,14 +2636,14 @@@ void inet6_rt_notify(int event, struct goto errout;
err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, - event, info->pid, seq, 0, 0, 0); + event, info->portid, seq, 0, 0, 0); if (err < 0) { /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } - rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE, + rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, info->nlh, gfp_any()); return; errout: @@@ -2693,12 -2680,14 +2698,12 @@@ static int rt6_info_route(struct rt6_in #else seq_puts(m, "00000000000000000000000000000000 00 "); #endif - rcu_read_lock(); n = rt->n; if (n) { seq_printf(m, "%pi6", n->primary_key); } else { seq_puts(m, "00000000000000000000000000000000"); } - rcu_read_unlock(); seq_printf(m, " %08x %08x %08x %08x %8s\n", rt->rt6i_metric, atomic_read(&rt->dst.__refcnt), rt->dst.__use, rt->rt6i_flags, diff --combined net/l2tp/l2tp_netlink.c index 6ec3f67,6f93635..6c4cc12 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@@ -78,16 -78,16 +78,16 @@@ static int l2tp_nl_cmd_noop(struct sk_b goto out; }
- hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, + hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &l2tp_nl_family, 0, L2TP_CMD_NOOP); - if (IS_ERR(hdr)) { - ret = PTR_ERR(hdr); + if (!hdr) { + ret = -EMSGSIZE; goto err_out; }
genlmsg_end(msg, hdr);
- return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); + return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
err_out: nlmsg_free(msg); @@@ -235,7 -235,7 +235,7 @@@ out return ret; }
-static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, +static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int flags, struct l2tp_tunnel *tunnel) { void *hdr; @@@ -248,10 -248,10 +248,10 @@@ struct l2tp_stats stats; unsigned int start;
- hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, + hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, L2TP_CMD_TUNNEL_GET); - if (IS_ERR(hdr)) - return PTR_ERR(hdr); + if (!hdr) + return -EMSGSIZE;
if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) || nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || @@@ -359,12 -359,12 +359,12 @@@ static int l2tp_nl_cmd_tunnel_get(struc goto out; }
- ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq, + ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq, NLM_F_ACK, tunnel); if (ret < 0) goto err_out;
- return genlmsg_unicast(net, msg, info->snd_pid); + return genlmsg_unicast(net, msg, info->snd_portid);
err_out: nlmsg_free(msg); @@@ -384,7 -384,7 +384,7 @@@ static int l2tp_nl_cmd_tunnel_dump(stru if (tunnel == NULL) goto out;
- if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid, + if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, tunnel) <= 0) goto out; @@@ -604,7 -604,7 +604,7 @@@ out return ret; }
-static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, +static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int flags, struct l2tp_session *session) { void *hdr; @@@ -616,9 -616,9 +616,9 @@@
sk = tunnel->sock;
- hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); + hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); - if (IS_ERR(hdr)) - return PTR_ERR(hdr); + if (!hdr) + return -EMSGSIZE;
if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) || @@@ -705,12 -705,12 +705,12 @@@ static int l2tp_nl_cmd_session_get(stru goto out; }
- ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq, + ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 0, session); if (ret < 0) goto err_out;
- return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); + return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
err_out: nlmsg_free(msg); @@@ -742,7 -742,7 +742,7 @@@ static int l2tp_nl_cmd_session_dump(str continue; }
- if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid, + if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, session) <= 0) break; diff --combined net/sched/sch_qfq.c index 2556620,211a212..f0dd83c --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@@ -865,7 -865,10 +865,10 @@@ static void qfq_update_start(struct qfq if (mask) { struct qfq_group *next = qfq_ffs(q, mask); if (qfq_gt(roundedF, next->F)) { - cl->S = next->F; + if (qfq_gt(limit, next->F)) + cl->S = next->F; + else /* preserve timestamp correctness */ + cl->S = limit; return; } } @@@ -878,7 -881,7 +881,7 @@@ static int qfq_enqueue(struct sk_buff * { struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl; - int err; + int err = 0;
cl = qfq_classify(skb, sch, &err); if (cl == NULL) { diff --combined net/wireless/reg.c index 1ad04e5,72d170c..4de18ae --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@@ -350,6 -350,9 +350,9 @@@ static void reg_regdb_search(struct wor struct reg_regdb_search_request *request; const struct ieee80211_regdomain *curdom, *regdom; int i, r; + bool set_reg = false; + + mutex_lock(&cfg80211_mutex);
mutex_lock(®_regdb_search_mutex); while (!list_empty(®_regdb_search_list)) { @@@ -365,9 -368,7 +368,7 @@@ r = reg_copy_regd(®dom, curdom); if (r) break; - mutex_lock(&cfg80211_mutex); - set_regdom(regdom); - mutex_unlock(&cfg80211_mutex); + set_reg = true; break; } } @@@ -375,6 -376,11 +376,11 @@@ kfree(request); } mutex_unlock(®_regdb_search_mutex); + + if (set_reg) + set_regdom(regdom); + + mutex_unlock(&cfg80211_mutex); }
static DECLARE_WORK(reg_regdb_work, reg_regdb_search); @@@ -1949,7 -1955,8 +1955,7 @@@ static void restore_regulatory_settings if (reg_request->initiator != NL80211_REGDOM_SET_BY_USER) continue; - list_del(®_request->list); - list_add_tail(®_request->list, &tmp_reg_req_list); + list_move_tail(®_request->list, &tmp_reg_req_list); } } spin_unlock(®_requests_lock); @@@ -2008,7 -2015,8 +2014,7 @@@ "into the queue\n", reg_request->alpha2[0], reg_request->alpha2[1]); - list_del(®_request->list); - list_add_tail(®_request->list, ®_requests_list); + list_move_tail(®_request->list, ®_requests_list); } spin_unlock(®_requests_lock);
diff --combined net/xfrm/xfrm_policy.c index 741a32a,387848e..f4e0a6a --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@@ -42,12 -42,13 +42,12 @@@ static DEFINE_SPINLOCK(xfrm_policy_sk_b static struct dst_entry *xfrm_policy_sk_bundles; static DEFINE_RWLOCK(xfrm_policy_lock);
-static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); -static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; +static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); +static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] + __read_mostly;
static struct kmem_cache *xfrm_dst_cache __read_mostly;
-static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); -static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); static void xfrm_init_pmtu(struct dst_entry *dst); static int stale_bundle(struct dst_entry *dst); static int xfrm_bundle_ok(struct xfrm_dst *xdst); @@@ -94,24 -95,6 +94,24 @@@ bool xfrm_selector_match(const struct x return false; }
+static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) +{ + struct xfrm_policy_afinfo *afinfo; + + if (unlikely(family >= NPROTO)) + return NULL; + rcu_read_lock(); + afinfo = rcu_dereference(xfrm_policy_afinfo[family]); + if (unlikely(!afinfo)) + rcu_read_unlock(); + return afinfo; +} + +static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) +{ + rcu_read_unlock(); +} + static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, const xfrm_address_t *saddr, const xfrm_address_t *daddr, @@@ -602,6 -585,7 +602,7 @@@ int xfrm_policy_insert(int dir, struct xfrm_pol_hold(policy); net->xfrm.policy_count[dir]++; atomic_inc(&flow_cache_genid); + rt_genid_bump(net); if (delpol) __xfrm_policy_unlink(delpol, dir); policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); @@@ -1780,7 -1764,7 +1781,7 @@@ static struct dst_entry *make_blackhole
if (!afinfo) { dst_release(dst_orig); - ret = ERR_PTR(-EINVAL); + return ERR_PTR(-EINVAL); } else { ret = afinfo->blackhole_route(net, dst_orig); } @@@ -2437,7 -2421,7 +2438,7 @@@ int xfrm_policy_register_afinfo(struct return -EINVAL; if (unlikely(afinfo->family >= NPROTO)) return -EAFNOSUPPORT; - write_lock_bh(&xfrm_policy_afinfo_lock); + spin_lock(&xfrm_policy_afinfo_lock); if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) err = -ENOBUFS; else { @@@ -2458,9 -2442,9 +2459,9 @@@ dst_ops->neigh_lookup = xfrm_neigh_lookup; if (likely(afinfo->garbage_collect == NULL)) afinfo->garbage_collect = xfrm_garbage_collect_deferred; - xfrm_policy_afinfo[afinfo->family] = afinfo; + rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo); } - write_unlock_bh(&xfrm_policy_afinfo_lock); + spin_unlock(&xfrm_policy_afinfo_lock);
rtnl_lock(); for_each_net(net) { @@@ -2493,26 -2477,21 +2494,26 @@@ int xfrm_policy_unregister_afinfo(struc return -EINVAL; if (unlikely(afinfo->family >= NPROTO)) return -EAFNOSUPPORT; - write_lock_bh(&xfrm_policy_afinfo_lock); + spin_lock(&xfrm_policy_afinfo_lock); if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) err = -EINVAL; - else { - struct dst_ops *dst_ops = afinfo->dst_ops; - xfrm_policy_afinfo[afinfo->family] = NULL; - dst_ops->kmem_cachep = NULL; - dst_ops->check = NULL; - dst_ops->negative_advice = NULL; - dst_ops->link_failure = NULL; - afinfo->garbage_collect = NULL; - } + else + RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family], + NULL); + } + spin_unlock(&xfrm_policy_afinfo_lock); + if (!err) { + struct dst_ops *dst_ops = afinfo->dst_ops; + + synchronize_rcu(); + + dst_ops->kmem_cachep = NULL; + dst_ops->check = NULL; + dst_ops->negative_advice = NULL; + dst_ops->link_failure = NULL; + afinfo->garbage_collect = NULL; } - write_unlock_bh(&xfrm_policy_afinfo_lock); return err; } EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); @@@ -2521,16 -2500,33 +2522,16 @@@ static void __net_init xfrm_dst_ops_ini { struct xfrm_policy_afinfo *afinfo;
- read_lock_bh(&xfrm_policy_afinfo_lock); - afinfo = xfrm_policy_afinfo[AF_INET]; + rcu_read_lock(); + afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]); if (afinfo) net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; #if IS_ENABLED(CONFIG_IPV6) - afinfo = xfrm_policy_afinfo[AF_INET6]; + afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]); if (afinfo) net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; #endif - read_unlock_bh(&xfrm_policy_afinfo_lock); -} - -static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) -{ - struct xfrm_policy_afinfo *afinfo; - if (unlikely(family >= NPROTO)) - return NULL; - read_lock(&xfrm_policy_afinfo_lock); - afinfo = xfrm_policy_afinfo[family]; - if (unlikely(!afinfo)) - read_unlock(&xfrm_policy_afinfo_lock); - return afinfo; -} - -static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) -{ - read_unlock(&xfrm_policy_afinfo_lock); + rcu_read_unlock(); }
static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) diff --combined net/xfrm/xfrm_user.c index 5d6eb4b,289f4bf..94a2a1f --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@@ -123,9 -123,21 +123,21 @@@ static inline int verify_replay(struct struct nlattr **attrs) { struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; + struct xfrm_replay_state_esn *rs;
- if ((p->flags & XFRM_STATE_ESN) && !rt) - return -EINVAL; + if (p->flags & XFRM_STATE_ESN) { + if (!rt) + return -EINVAL; + + rs = nla_data(rt); + + if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) + return -EINVAL; + + if (nla_len(rt) < xfrm_replay_state_esn_len(rs) && + nla_len(rt) != sizeof(*rs)) + return -EINVAL; + }
if (!rt) return 0; @@@ -370,14 -382,15 +382,15 @@@ static inline int xfrm_replay_verify_le struct nlattr *rp) { struct xfrm_replay_state_esn *up; + int ulen;
if (!replay_esn || !rp) return 0;
up = nla_data(rp); + ulen = xfrm_replay_state_esn_len(up);
- if (xfrm_replay_state_esn_len(replay_esn) != - xfrm_replay_state_esn_len(up)) + if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) return -EINVAL;
return 0; @@@ -388,22 -401,28 +401,28 @@@ static int xfrm_alloc_replay_state_esn( struct nlattr *rta) { struct xfrm_replay_state_esn *p, *pp, *up; + int klen, ulen;
if (!rta) return 0;
up = nla_data(rta); + klen = xfrm_replay_state_esn_len(up); + ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
- p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); + p = kzalloc(klen, GFP_KERNEL); if (!p) return -ENOMEM;
- pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); + pp = kzalloc(klen, GFP_KERNEL); if (!pp) { kfree(p); return -ENOMEM; }
+ memcpy(p, up, ulen); + memcpy(pp, up, ulen); + *replay_esn = p; *preplay_esn = pp;
@@@ -442,10 -461,11 +461,11 @@@ static void copy_from_user_state(struc * somehow made shareable and move it to xfrm_state.c - JHS * */ - static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs) + static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, + int update_esn) { struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; - struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; + struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL; struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; @@@ -555,7 -575,7 +575,7 @@@ static struct xfrm_state *xfrm_state_co goto error;
/* override default values from above */ - xfrm_update_ae_params(x, attrs); + xfrm_update_ae_params(x, attrs, 0);
return x;
@@@ -603,7 -623,7 +623,7 @@@ static int xfrm_add_sa(struct sk_buff * }
c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; c.event = nlh->nlmsg_type;
km_state_notify(x, &c); @@@ -676,7 -696,7 +696,7 @@@ static int xfrm_del_sa(struct sk_buff * goto out;
c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; c.event = nlh->nlmsg_type; km_state_notify(x, &c);
@@@ -689,6 -709,7 +709,7 @@@ out
static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) { + memset(p, 0, sizeof(*p)); memcpy(&p->id, &x->id, sizeof(p->id)); memcpy(&p->sel, &x->sel, sizeof(p->sel)); memcpy(&p->lft, &x->lft, sizeof(p->lft)); @@@ -742,7 -763,7 +763,7 @@@ static int copy_to_user_auth(struct xfr return -EMSGSIZE;
algo = nla_data(nla); - strcpy(algo->alg_name, auth->alg_name); + strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name)); memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); algo->alg_key_len = auth->alg_key_len;
@@@ -826,7 -847,7 +847,7 @@@ static int dump_one_state(struct xfrm_s struct nlmsghdr *nlh; int err;
- nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, + nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); if (nlh == NULL) return -EMSGSIZE; @@@ -878,6 -899,7 +899,7 @@@ static struct sk_buff *xfrm_state_netli { struct xfrm_dump_info info; struct sk_buff *skb; + int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) @@@ -888,9 -910,10 +910,10 @@@ info.nlmsg_seq = seq; info.nlmsg_flags = 0;
- if (dump_one_state(x, 0, &info)) { + err = dump_one_state(x, 0, &info); + if (err) { kfree_skb(skb); - return NULL; + return ERR_PTR(err); }
return skb; @@@ -904,7 -927,7 +927,7 @@@ static inline size_t xfrm_spdinfo_msgsi }
static int build_spdinfo(struct sk_buff *skb, struct net *net, - u32 pid, u32 seq, u32 flags) + u32 portid, u32 seq, u32 flags) { struct xfrmk_spdinfo si; struct xfrmu_spdinfo spc; @@@ -913,7 -936,7 +936,7 @@@ int err; u32 *f;
- nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); + nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); if (nlh == NULL) /* shouldn't really happen ... */ return -EMSGSIZE;
@@@ -946,17 -969,17 +969,17 @@@ static int xfrm_get_spdinfo(struct sk_b struct net *net = sock_net(skb->sk); struct sk_buff *r_skb; u32 *flags = nlmsg_data(nlh); - u32 spid = NETLINK_CB(skb).pid; + u32 sportid = NETLINK_CB(skb).portid; u32 seq = nlh->nlmsg_seq;
r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); if (r_skb == NULL) return -ENOMEM;
- if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0) + if (build_spdinfo(r_skb, net, sportid, seq, *flags) < 0) BUG();
- return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); + return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid); }
static inline size_t xfrm_sadinfo_msgsize(void) @@@ -967,7 -990,7 +990,7 @@@ }
static int build_sadinfo(struct sk_buff *skb, struct net *net, - u32 pid, u32 seq, u32 flags) + u32 portid, u32 seq, u32 flags) { struct xfrmk_sadinfo si; struct xfrmu_sadhinfo sh; @@@ -975,7 -998,7 +998,7 @@@ int err; u32 *f;
- nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); + nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); if (nlh == NULL) /* shouldn't really happen ... */ return -EMSGSIZE;
@@@ -1003,17 -1026,17 +1026,17 @@@ static int xfrm_get_sadinfo(struct sk_b struct net *net = sock_net(skb->sk); struct sk_buff *r_skb; u32 *flags = nlmsg_data(nlh); - u32 spid = NETLINK_CB(skb).pid; + u32 sportid = NETLINK_CB(skb).portid; u32 seq = nlh->nlmsg_seq;
r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); if (r_skb == NULL) return -ENOMEM;
- if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0) + if (build_sadinfo(r_skb, net, sportid, seq, *flags) < 0) BUG();
- return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); + return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid); }
static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, @@@ -1033,7 -1056,7 +1056,7 @@@ if (IS_ERR(resp_skb)) { err = PTR_ERR(resp_skb); } else { - err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); + err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid); } xfrm_state_put(x); out_noput: @@@ -1114,7 -1137,7 +1137,7 @@@ static int xfrm_alloc_userspi(struct sk goto out; }
- err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); + err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
out: xfrm_state_put(x); @@@ -1317,6 -1340,7 +1340,7 @@@ static void copy_from_user_policy(struc
static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) { + memset(p, 0, sizeof(*p)); memcpy(&p->sel, &xp->selector, sizeof(p->sel)); memcpy(&p->lft, &xp->lft, sizeof(p->lft)); memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); @@@ -1401,7 -1425,7 +1425,7 @@@ static int xfrm_add_policy(struct sk_bu
c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; km_policy_notify(xp, p->dir, &c);
xfrm_pol_put(xp); @@@ -1421,6 -1445,7 +1445,7 @@@ static int copy_to_user_tmpl(struct xfr struct xfrm_user_tmpl *up = &vec[i]; struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
+ memset(up, 0, sizeof(*up)); memcpy(&up->id, &kp->id, sizeof(up->id)); up->family = kp->encap_family; memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); @@@ -1486,7 -1511,7 +1511,7 @@@ static int dump_one_policy(struct xfrm_ struct nlmsghdr *nlh; int err;
- nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, + nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); if (nlh == NULL) return -EMSGSIZE; @@@ -1546,6 -1571,7 +1571,7 @@@ static struct sk_buff *xfrm_policy_netl { struct xfrm_dump_info info; struct sk_buff *skb; + int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) @@@ -1556,9 -1582,10 +1582,10 @@@ info.nlmsg_seq = seq; info.nlmsg_flags = 0;
- if (dump_one_policy(xp, dir, 0, &info) < 0) { + err = dump_one_policy(xp, dir, 0, &info); + if (err) { kfree_skb(skb); - return NULL; + return ERR_PTR(err); }
return skb; @@@ -1621,7 -1648,7 +1648,7 @@@ static int xfrm_get_policy(struct sk_bu err = PTR_ERR(resp_skb); } else { err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, - NETLINK_CB(skb).pid); + NETLINK_CB(skb).portid); } } else { uid_t loginuid = audit_get_loginuid(current); @@@ -1638,7 -1665,7 +1665,7 @@@ c.data.byid = p->index; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; km_policy_notify(xp, p->dir, &c); }
@@@ -1668,7 -1695,7 +1695,7 @@@ static int xfrm_flush_sa(struct sk_buf c.data.proto = p->proto; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; c.net = net; km_state_notify(NULL, &c);
@@@ -1695,7 -1722,7 +1722,7 @@@ static int build_aevent(struct sk_buff struct nlmsghdr *nlh; int err;
- nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); + nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); if (nlh == NULL) return -EMSGSIZE;
@@@ -1777,11 -1804,11 +1804,11 @@@ static int xfrm_get_ae(struct sk_buff * spin_lock_bh(&x->lock); c.data.aevent = p->flags; c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid;
if (build_aevent(r_skb, x, &c) < 0) BUG(); - err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid); + err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid); spin_unlock_bh(&x->lock); xfrm_state_put(x); return err; @@@ -1822,12 -1849,12 +1849,12 @@@ static int xfrm_new_ae(struct sk_buff * goto out;
spin_lock_bh(&x->lock); - xfrm_update_ae_params(x, attrs); + xfrm_update_ae_params(x, attrs, 1); spin_unlock_bh(&x->lock);
c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; c.data.aevent = XFRM_AE_CU; km_state_notify(x, &c); err = 0; @@@ -1862,7 -1889,7 +1889,7 @@@ static int xfrm_flush_policy(struct sk_ c.data.type = type; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; c.net = net; km_policy_notify(NULL, 0, &c); return 0; @@@ -1930,7 -1957,7 +1957,7 @@@ static int xfrm_add_pol_expire(struct s // reset the timers here? WARN(1, "Dont know what to do with soft policy expire\n"); } - km_policy_expired(xp, p->dir, up->hard, current->pid); + km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
out: xfrm_pol_put(xp); @@@ -1958,7 -1985,7 +1985,7 @@@ static int xfrm_add_sa_expire(struct sk err = -EINVAL; if (x->km.state != XFRM_STATE_VALID) goto out; - km_state_expired(x, ue->hard, current->pid); + km_state_expired(x, ue->hard, nlh->nlmsg_pid);
if (ue->hard) { uid_t loginuid = audit_get_loginuid(current); @@@ -2370,7 -2397,7 +2397,7 @@@ static int build_expire(struct sk_buff struct nlmsghdr *nlh; int err;
- nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); + nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); if (nlh == NULL) return -EMSGSIZE;
@@@ -2429,7 -2456,7 +2456,7 @@@ static int xfrm_notify_sa_flush(const s if (skb == NULL) return -ENOMEM;
- nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); + nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); if (nlh == NULL) { kfree_skb(skb); return -EMSGSIZE; @@@ -2497,7 -2524,7 +2524,7 @@@ static int xfrm_notify_sa(struct xfrm_s if (skb == NULL) return -ENOMEM;
- nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); + nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); err = -EMSGSIZE; if (nlh == NULL) goto out_free_skb; @@@ -2567,7 -2594,8 +2594,7 @@@ static inline size_t xfrm_acquire_msgsi }
static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, - struct xfrm_tmpl *xt, struct xfrm_policy *xp, - int dir) + struct xfrm_tmpl *xt, struct xfrm_policy *xp) { __u32 seq = xfrm_get_acqseq(); struct xfrm_user_acquire *ua; @@@ -2582,7 -2610,7 +2609,7 @@@ memcpy(&ua->id, &x->id, sizeof(ua->id)); memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); - copy_to_user_policy(xp, &ua->policy, dir); + copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT); ua->aalgos = xt->aalgos; ua->ealgos = xt->ealgos; ua->calgos = xt->calgos; @@@ -2604,7 -2632,7 +2631,7 @@@ }
static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, - struct xfrm_policy *xp, int dir) + struct xfrm_policy *xp) { struct net *net = xs_net(x); struct sk_buff *skb; @@@ -2613,7 -2641,7 +2640,7 @@@ if (skb == NULL) return -ENOMEM;
- if (build_acquire(skb, x, xt, xp, dir) < 0) + if (build_acquire(skb, x, xt, xp) < 0) BUG();
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); @@@ -2696,7 -2724,7 +2723,7 @@@ static int build_polexpire(struct sk_bu struct nlmsghdr *nlh; int err;
- nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); + nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); if (nlh == NULL) return -EMSGSIZE;
@@@ -2756,7 -2784,7 +2783,7 @@@ static int xfrm_notify_policy(struct xf if (skb == NULL) return -ENOMEM;
- nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); + nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); err = -EMSGSIZE; if (nlh == NULL) goto out_free_skb; @@@ -2810,7 -2838,7 +2837,7 @@@ static int xfrm_notify_policy_flush(con if (skb == NULL) return -ENOMEM;
- nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); + nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); err = -EMSGSIZE; if (nlh == NULL) goto out_free_skb; @@@ -2963,7 -2991,7 +2990,7 @@@ static int __net_init xfrm_user_net_ini .input = xfrm_netlink_rcv, };
- nlsk = netlink_kernel_create(net, NETLINK_XFRM, THIS_MODULE, &cfg); + nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg); if (nlsk == NULL) return -ENOMEM; net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
linux-merge@lists.open-mesh.org