The following commit has been merged in the master branch: commit ccc214588d25b4ebc3f1b8e01c134238dd2a15fd Merge: d26e2986c9bba1747e2422a7497b86a0127906f2 842b08bbee448b2069aaebb7f18b40942ad2a1bd Author: Stephen Rothwell sfr@canb.auug.org.au Date: Wed Sep 26 11:46:19 2012 +1000
Merge remote-tracking branch 'net-next/master'
Conflicts: drivers/net/team/team.c drivers/net/usb/qmi_wwan.c net/batman-adv/bat_iv_ogm.c net/ipv4/fib_frontend.c net/ipv4/route.c net/socket.c
diff --combined Documentation/feature-removal-schedule.txt index 80b068d,e45fa5c..bc777bc --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@@ -6,15 -6,6 +6,15 @@@ be removed from this file. The suggest
---------------------------
+What: support for i.mx25 in mx2_camera.c +When: v3.8 +Why: it's been broken for a year. Furthermore, i.MX25 video capture + HW doesn't have much in common with i.MX27. A separate driver + will be needed for it. +Who: Javier Martinjavier.martin@vista-silicon.com + +--------------------------- + What: ddebug_query="query" boot cmdline param When: v3.8 Why: obsoleted by dyndbg="query" and module.dyndbg="query" @@@ -262,6 -253,38 +262,6 @@@ Who: Dave Jones davej@redhat.com, Mat
-----------------------------
-What: fakephp and associated sysfs files in /sys/bus/pci/slots/ -When: 2011 -Why: In 2.6.27, the semantics of /sys/bus/pci/slots was redefined to - represent a machine's physical PCI slots. The change in semantics - had userspace implications, as the hotplug core no longer allowed - drivers to create multiple sysfs files per physical slot (required - for multi-function devices, e.g.). fakephp was seen as a developer's - tool only, and its interface changed. Too late, we learned that - there were some users of the fakephp interface. - - In 2.6.30, the original fakephp interface was restored. At the same - time, the PCI core gained the ability that fakephp provided, namely - function-level hot-remove and hot-add. - - Since the PCI core now provides the same functionality, exposed in: - - /sys/bus/pci/rescan - /sys/bus/pci/devices/.../remove - /sys/bus/pci/devices/.../rescan - - there is no functional reason to maintain fakephp as well. - - We will keep the existing module so that 'modprobe fakephp' will - present the old /sys/bus/pci/slots/... interface for compatibility, - but users are urged to migrate their applications to the API above. - - After a reasonable transition period, we will remove the legacy - fakephp interface. -Who: Alex Chiang achiang@hp.com - ---------------------------- - What: CONFIG_RFKILL_INPUT When: 2.6.33 Why: Should be implemented in userspace, policy daemon. @@@ -330,14 -353,6 +330,6 @@@ Why: Internal alias support has been pr
Who: Wey-Yi Guy wey-yi.w.guy@intel.com
- --------------------------- - - What: xt_NOTRACK - Files: net/netfilter/xt_NOTRACK.c - When: April 2011 - Why: Superseded by xt_CT - Who: Netfilter developer team netfilter-devel@vger.kernel.org - ----------------------------
What: IRQF_DISABLED @@@ -366,6 -381,16 +358,6 @@@ Who: Wey-Yi Guy <wey-yi.w.guy@intel.com
----------------------------
-What: Legacy, non-standard chassis intrusion detection interface. -When: June 2011 -Why: The adm9240, w83792d and w83793 hardware monitoring drivers have - legacy interfaces for chassis intrusion detection. A standard - interface has been added to each driver, so the legacy interface - can be removed. -Who: Jean Delvare khali@linux-fr.org - ----------------------------- - What: i2c_driver.attach_adapter i2c_driver.detach_adapter When: September 2011 diff --combined arch/powerpc/configs/ppc64_defconfig index e263e6a,06b5624..3dd7ac5 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@@ -51,7 -51,6 +51,7 @@@ CONFIG_KEXEC= CONFIG_IRQ_ALL_CPUS=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_SCHED_SMT=y +CONFIG_PPC_DENORMALISATION=y CONFIG_PCCARD=y CONFIG_ELECTRA_CF=y CONFIG_HOTPLUG_PCI=m @@@ -93,7 -92,6 +93,6 @@@ CONFIG_NETFILTER_XT_TARGET_DSCP= CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m - CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m diff --combined drivers/infiniband/hw/cxgb4/qp.c index e2bf9c6,5213bab..05bfe53 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@@ -137,25 -137,19 +137,25 @@@ static int create_qp(struct c4iw_rdev * return -ENOMEM;
wq->rq.qid = c4iw_get_qpid(rdev, uctx); - if (!wq->rq.qid) - goto err1; + if (!wq->rq.qid) { + ret = -ENOMEM; + goto free_sq_qid; + }
if (!user) { wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, GFP_KERNEL); - if (!wq->sq.sw_sq) - goto err2; + if (!wq->sq.sw_sq) { + ret = -ENOMEM; + goto free_rq_qid; + }
wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, GFP_KERNEL); - if (!wq->rq.sw_rq) - goto err3; + if (!wq->rq.sw_rq) { + ret = -ENOMEM; + goto free_sw_sq; + } }
/* @@@ -163,23 -157,15 +163,23 @@@ */ wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size); wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); - if (!wq->rq.rqt_hwaddr) - goto err4; + if (!wq->rq.rqt_hwaddr) { + ret = -ENOMEM; + goto free_sw_rq; + }
if (user) { - if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq)) - goto err5; + ret = alloc_oc_sq(rdev, &wq->sq); + if (ret) + goto free_hwaddr; + + ret = alloc_host_sq(rdev, &wq->sq); + if (ret) + goto free_sq; } else - if (alloc_host_sq(rdev, &wq->sq)) - goto err5; + ret = alloc_host_sq(rdev, &wq->sq); + if (ret) + goto free_hwaddr; memset(wq->sq.queue, 0, wq->sq.memsize); dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
@@@ -187,7 -173,7 +187,7 @@@ wq->rq.memsize, &(wq->rq.dma_addr), GFP_KERNEL); if (!wq->rq.queue) - goto err6; + goto free_sq; PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", __func__, wq->sq.queue, (unsigned long long)virt_to_phys(wq->sq.queue), @@@ -215,7 -201,7 +215,7 @@@ skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) { ret = -ENOMEM; - goto err7; + goto free_dma; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
@@@ -280,33 -266,33 +280,33 @@@
ret = c4iw_ofld_send(rdev, skb); if (ret) - goto err7; + goto free_dma; ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); if (ret) - goto err7; + goto free_dma;
PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n", __func__, wq->sq.qid, wq->rq.qid, wq->db, (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
return 0; -err7: +free_dma: dma_free_coherent(&(rdev->lldi.pdev->dev), wq->rq.memsize, wq->rq.queue, dma_unmap_addr(&wq->rq, mapping)); -err6: +free_sq: dealloc_sq(rdev, &wq->sq); -err5: +free_hwaddr: c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); -err4: +free_sw_rq: kfree(wq->rq.sw_rq); -err3: +free_sw_sq: kfree(wq->sq.sw_sq); -err2: +free_rq_qid: c4iw_put_qpid(rdev, wq->rq.qid, uctx); -err1: +free_sq_qid: c4iw_put_qpid(rdev, wq->sq.qid, uctx); - return -ENOMEM; + return ret; }
static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, @@@ -1169,7 -1155,7 +1169,7 @@@ static int ring_kernel_db(struct c4iw_q */ if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) < (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) { - writel(V_QID(qid) | V_PIDX(inc), qhp->wq.db); + writel(QID(qid) | PIDX(inc), qhp->wq.db); break; } set_current_state(TASK_UNINTERRUPTIBLE); diff --combined drivers/infiniband/ulp/ipoib/ipoib.h index 0af216d,381f51b..ac48f86 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@@ -104,6 -104,10 +104,10 @@@ enum
MAX_SEND_CQE = 16, IPOIB_CM_COPYBREAK = 256, + + IPOIB_NON_CHILD = 0, + IPOIB_LEGACY_CHILD = 1, + IPOIB_RTNL_CHILD = 2, };
#define IPOIB_OP_RECV (1ul << 31) @@@ -262,10 -266,7 +266,10 @@@ struct ipoib_ethtool_st u16 max_coalesced_frames; };
+struct ipoib_neigh_table; + struct ipoib_neigh_hash { + struct ipoib_neigh_table *ntbl; struct ipoib_neigh __rcu **buckets; struct rcu_head rcu; u32 mask; @@@ -274,9 -275,9 +278,9 @@@
struct ipoib_neigh_table { struct ipoib_neigh_hash __rcu *htbl; - rwlock_t rwlock; atomic_t entries; struct completion flushed; + struct completion deleted; };
/* @@@ -353,6 -354,7 +357,7 @@@ struct ipoib_dev_priv struct net_device *parent; struct list_head child_intfs; struct list_head list; + int child_type;
#ifdef CONFIG_INFINIBAND_IPOIB_CM struct ipoib_cm_dev_priv cm; @@@ -512,6 -514,14 +517,14 @@@ void ipoib_event(struct ib_event_handle int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
+ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, + u16 pkey, int child_type); + + int __init ipoib_netlink_init(void); + void __exit ipoib_netlink_fini(void); + + void ipoib_setup(struct net_device *dev); + void ipoib_pkey_poll(struct work_struct *work); int ipoib_pkey_dev_delay_open(struct net_device *dev); void ipoib_drain_cq(struct net_device *dev); diff --combined drivers/infiniband/ulp/ipoib/ipoib_main.c index ea0dfc7,b3e9709..b229df1 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@@ -150,7 -150,7 +150,7 @@@ static int ipoib_stop(struct net_devic
netif_stop_queue(dev);
- ipoib_ib_dev_down(dev, 0); + ipoib_ib_dev_down(dev, 1); ipoib_ib_dev_stop(dev, 0);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { @@@ -173,6 -173,11 +173,11 @@@ return 0; }
+ static void ipoib_uninit(struct net_device *dev) + { + ipoib_dev_cleanup(dev); + } + static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@@ -546,15 -551,15 +551,15 @@@ static void neigh_add_path(struct sk_bu struct ipoib_neigh *neigh; unsigned long flags;
+ spin_lock_irqsave(&priv->lock, flags); neigh = ipoib_neigh_alloc(daddr, dev); if (!neigh) { + spin_unlock_irqrestore(&priv->lock, flags); ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); return; }
- spin_lock_irqsave(&priv->lock, flags); - path = __path_find(dev, daddr + 4); if (!path) { path = path_rec_create(dev, daddr + 4); @@@ -863,10 -868,10 +868,10 @@@ static void __ipoib_reap_neigh(struct i if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) return;
- write_lock_bh(&ntbl->rwlock); + spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock));
if (!htbl) goto out_unlock; @@@ -883,14 -888,16 +888,14 @@@ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
while ((neigh = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock))) != NULL) { + lockdep_is_held(&priv->lock))) != NULL) { /* was the neigh idle for two GC periods */ if (time_after(neigh_obsolete, neigh->alive)) { rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); /* remove from path/mc list */ - spin_lock_irqsave(&priv->lock, flags); list_del(&neigh->list); - spin_unlock_irqrestore(&priv->lock, flags); call_rcu(&neigh->rcu, ipoib_neigh_reclaim); } else { np = &neigh->hnext; @@@ -900,7 -907,7 +905,7 @@@ }
out_unlock: - write_unlock_bh(&ntbl->rwlock); + spin_unlock_irqrestore(&priv->lock, flags); }
static void ipoib_reap_neigh(struct work_struct *work) @@@ -945,8 -952,10 +950,8 @@@ struct ipoib_neigh *ipoib_neigh_alloc(u struct ipoib_neigh *neigh; u32 hash_val;
- write_lock_bh(&ntbl->rwlock); - htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); if (!htbl) { neigh = NULL; goto out_unlock; @@@ -957,10 -966,10 +962,10 @@@ */ hash_val = ipoib_addr_hash(htbl, daddr); for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); neigh != NULL; neigh = rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))) { + lockdep_is_held(&priv->lock))) { if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { /* found, take one ref on behalf of the caller */ if (!atomic_inc_not_zero(&neigh->refcnt)) { @@@ -983,11 -992,12 +988,11 @@@ /* put in hash */ rcu_assign_pointer(neigh->hnext, rcu_dereference_protected(htbl->buckets[hash_val], - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); rcu_assign_pointer(htbl->buckets[hash_val], neigh); atomic_inc(&ntbl->entries);
out_unlock: - write_unlock_bh(&ntbl->rwlock);
return neigh; } @@@ -1035,29 -1045,35 +1040,29 @@@ void ipoib_neigh_free(struct ipoib_neig struct ipoib_neigh *n; u32 hash_val;
- write_lock_bh(&ntbl->rwlock); - htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); if (!htbl) - goto out_unlock; + return;
hash_val = ipoib_addr_hash(htbl, neigh->daddr); np = &htbl->buckets[hash_val]; for (n = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); n != NULL; n = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock))) { + lockdep_is_held(&priv->lock))) { if (n == neigh) { /* found */ rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); call_rcu(&neigh->rcu, ipoib_neigh_reclaim); - goto out_unlock; + return; } else { np = &n->hnext; } } - -out_unlock: - write_unlock_bh(&ntbl->rwlock); - }
static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) @@@ -1069,6 -1085,7 +1074,6 @@@
clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); ntbl->htbl = NULL; - rwlock_init(&ntbl->rwlock); htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); if (!htbl) return -ENOMEM; @@@ -1083,7 -1100,6 +1088,7 @@@ htbl->mask = (size - 1); htbl->buckets = buckets; ntbl->htbl = htbl; + htbl->ntbl = ntbl; atomic_set(&ntbl->entries, 0);
/* start garbage collection */ @@@ -1100,11 -1116,9 +1105,11 @@@ static void neigh_hash_free_rcu(struct struct ipoib_neigh_hash, rcu); struct ipoib_neigh __rcu **buckets = htbl->buckets; + struct ipoib_neigh_table *ntbl = htbl->ntbl;
kfree(buckets); kfree(htbl); + complete(&ntbl->deleted); }
void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) @@@ -1116,10 -1130,10 +1121,10 @@@ int i;
/* remove all neigh connected to a given path or mcast */ - write_lock_bh(&ntbl->rwlock); + spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock));
if (!htbl) goto out_unlock; @@@ -1129,14 -1143,16 +1134,14 @@@ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
while ((neigh = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock))) != NULL) { + lockdep_is_held(&priv->lock))) != NULL) { /* delete neighs belong to this parent */ if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); /* remove from parent list */ - spin_lock_irqsave(&priv->lock, flags); list_del(&neigh->list); - spin_unlock_irqrestore(&priv->lock, flags); call_rcu(&neigh->rcu, ipoib_neigh_reclaim); } else { np = &neigh->hnext; @@@ -1145,7 -1161,7 +1150,7 @@@ } } out_unlock: - write_unlock_bh(&ntbl->rwlock); + spin_unlock_irqrestore(&priv->lock, flags); }
static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) @@@ -1153,44 -1169,37 +1158,44 @@@ struct ipoib_neigh_table *ntbl = &priv->ntbl; struct ipoib_neigh_hash *htbl; unsigned long flags; - int i; + int i, wait_flushed = 0;
- write_lock_bh(&ntbl->rwlock); + init_completion(&priv->ntbl.flushed); + + spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); if (!htbl) goto out_unlock;
+ wait_flushed = atomic_read(&priv->ntbl.entries); + if (!wait_flushed) + goto free_htbl; + for (i = 0; i < htbl->size; i++) { struct ipoib_neigh *neigh; struct ipoib_neigh __rcu **np = &htbl->buckets[i];
while ((neigh = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock))) != NULL) { + lockdep_is_held(&priv->lock))) != NULL) { rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); /* remove from path/mc list */ - spin_lock_irqsave(&priv->lock, flags); list_del(&neigh->list); - spin_unlock_irqrestore(&priv->lock, flags); call_rcu(&neigh->rcu, ipoib_neigh_reclaim); } }
+free_htbl: rcu_assign_pointer(ntbl->htbl, NULL); call_rcu(&htbl->rcu, neigh_hash_free_rcu);
out_unlock: - write_unlock_bh(&ntbl->rwlock); + spin_unlock_irqrestore(&priv->lock, flags); + if (wait_flushed) + wait_for_completion(&priv->ntbl.flushed); }
static void ipoib_neigh_hash_uninit(struct net_device *dev) @@@ -1199,7 -1208,7 +1204,7 @@@ int stopped;
ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); - init_completion(&priv->ntbl.flushed); + init_completion(&priv->ntbl.deleted); set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
/* Stop GC if called at init fail need to cancel work */ @@@ -1207,9 -1216,10 +1212,9 @@@ if (!stopped) cancel_delayed_work(&priv->neigh_reap_task);
- if (atomic_read(&priv->ntbl.entries)) { - ipoib_flush_neighs(priv); - wait_for_completion(&priv->ntbl.flushed); - } + ipoib_flush_neighs(priv); + + wait_for_completion(&priv->ntbl.deleted); }
@@@ -1257,6 -1267,9 +1262,9 @@@ out void ipoib_dev_cleanup(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; + LIST_HEAD(head); + + ASSERT_RTNL();
ipoib_delete_debug_files(dev);
@@@ -1265,10 -1278,9 +1273,9 @@@ /* Stop GC on child */ set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags); cancel_delayed_work(&cpriv->neigh_reap_task); - unregister_netdev(cpriv->dev); - ipoib_dev_cleanup(cpriv->dev); - free_netdev(cpriv->dev); + unregister_netdevice_queue(cpriv->dev, &head); } + unregister_netdevice_many(&head);
ipoib_ib_dev_cleanup(dev);
@@@ -1286,6 -1298,7 +1293,7 @@@ static const struct header_ops ipoib_he };
static const struct net_device_ops ipoib_netdev_ops = { + .ndo_uninit = ipoib_uninit, .ndo_open = ipoib_open, .ndo_stop = ipoib_stop, .ndo_change_mtu = ipoib_change_mtu, @@@ -1295,7 -1308,7 +1303,7 @@@ .ndo_set_rx_mode = ipoib_set_mcast_list, };
- static void ipoib_setup(struct net_device *dev) + void ipoib_setup(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev);
@@@ -1657,7 -1670,6 +1665,6 @@@ static void ipoib_remove_one(struct ib_ flush_workqueue(ipoib_workqueue);
unregister_netdev(priv->dev); - ipoib_dev_cleanup(priv->dev); free_netdev(priv->dev); }
@@@ -1709,8 -1721,15 +1716,15 @@@ static int __init ipoib_init_module(voi if (ret) goto err_sa;
+ ret = ipoib_netlink_init(); + if (ret) + goto err_client; + return 0;
+ err_client: + ib_unregister_client(&ipoib_client); + err_sa: ib_sa_unregister_client(&ipoib_sa_client); destroy_workqueue(ipoib_workqueue); @@@ -1723,6 -1742,7 +1737,7 @@@ err_fs
static void __exit ipoib_cleanup_module(void) { + ipoib_netlink_fini(); ib_unregister_client(&ipoib_client); ib_sa_unregister_client(&ipoib_sa_client); ipoib_unregister_debugfs(); diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e8e97a7,ca80487..f67e700 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@@ -662,16 -662,14 +662,16 @@@ void bnx2x_csum_validate(struct sk_buf struct bnx2x_fastpath *fp, struct bnx2x_eth_q_stats *qstats) { - /* Do nothing if no IP/L4 csum validation was done */ - + /* Do nothing if no L4 csum validation was done. + * We do not check whether IP csum was validated. For IPv4 we assume + * that if the card got as far as validating the L4 csum, it also + * validated the IP csum. IPv6 has no IP csum. + */ if (cqe->fast_path_cqe.status_flags & - (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | - ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) return;
- /* If both IP/L4 validation were done, check if an error was found. */ + /* If L4 validation was done, check if an error was found. */
if (cqe->fast_path_cqe.type_error_flags & (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | @@@ -2285,7 -2283,7 +2285,7 @@@ int bnx2x_nic_load(struct bnx2x *bp, in /* Wait for all pending SP commands to complete */ if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { BNX2X_ERR("Timeout waiting for SP elements to complete\n"); - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); return -EBUSY; }
@@@ -2333,7 -2331,7 +2333,7 @@@ load_error0 }
/* must be called with rtnl_lock */ - int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) + int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) { int i; bool global = false; @@@ -2395,7 -2393,7 +2395,7 @@@
/* Cleanup the chip if needed */ if (unload_mode != UNLOAD_RECOVERY) - bnx2x_chip_cleanup(bp, unload_mode); + bnx2x_chip_cleanup(bp, unload_mode, keep_link); else { /* Send the UNLOAD_REQUEST to the MCP */ bnx2x_send_unload_req(bp, unload_mode); @@@ -2419,7 -2417,7 +2419,7 @@@ bnx2x_free_irq(bp);
/* Report UNLOAD_DONE to MCP */ - bnx2x_send_unload_done(bp); + bnx2x_send_unload_done(bp, false); }
/* @@@ -3770,7 -3768,7 +3770,7 @@@ int bnx2x_reload_if_running(struct net_ if (unlikely(!netif_running(dev))) return 0;
- bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); return bnx2x_nic_load(bp, LOAD_NORMAL); }
@@@ -3967,7 -3965,7 +3967,7 @@@ int bnx2x_suspend(struct pci_dev *pdev
netif_device_detach(dev);
- bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e11485c,7a91570..f7ed122 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@@ -1162,9 -1162,14 +1162,9 @@@ static int bnx2x_send_final_clnup(struc
static u8 bnx2x_is_pcie_pending(struct pci_dev *dev) { - int pos; u16 status;
- pos = pci_pcie_cap(dev); - if (!pos) - return false; - - pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); return status & PCI_EXP_DEVSTA_TRPND; }
@@@ -2166,7 -2171,6 +2166,6 @@@ void bnx2x_link_set(struct bnx2x *bp { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp);
@@@ -2179,12 -2183,19 +2178,19 @@@ static void bnx2x__link_reset(struct bn { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); } else BNX2X_ERR("Bootcode is missing - can not reset link\n"); }
+ void bnx2x_force_link_reset(struct bnx2x *bp) + { + bnx2x_acquire_phy_lock(bp); + bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_release_phy_lock(bp); + } + u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) { u8 rc = 0; @@@ -6130,7 -6141,8 +6136,7 @@@ static void bnx2x_init_pxp(struct bnx2 u16 devctl; int r_order, w_order;
- pci_read_config_word(bp->pdev, - pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl); + pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); if (bp->mrrs == -1) @@@ -6751,7 -6763,6 +6757,6 @@@ static int bnx2x_init_hw_port(struct bn u32 low, high; u32 val;
- bnx2x__link_reset(bp);
DP(NETIF_MSG_HW, "starting port init port %d\n", port);
@@@ -8244,12 -8255,15 +8249,15 @@@ u32 bnx2x_send_unload_req(struct bnx2x * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. * * @bp: driver handle + * @keep_link: true iff link should be kept up */ - void bnx2x_send_unload_done(struct bnx2x *bp) + void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) { + u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; + /* Report UNLOAD_DONE to MCP */ if (!BP_NOMCP(bp)) - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); }
static int bnx2x_func_wait_started(struct bnx2x *bp) @@@ -8318,7 -8332,7 +8326,7 @@@ return 0; }
- void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) + void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) { int port = BP_PORT(bp); int i, rc = 0; @@@ -8440,7 -8454,7 +8448,7 @@@ unload_error
/* Report UNLOAD_DONE to MCP */ - bnx2x_send_unload_done(bp); + bnx2x_send_unload_done(bp, keep_link); }
void bnx2x_disable_close_the_gate(struct bnx2x *bp) @@@ -8852,7 -8866,8 +8860,8 @@@ int bnx2x_leader_reset(struct bnx2x *bp * driver is owner of the HW */ if (!global && !BP_NOMCP(bp)) { - load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, + DRV_MSG_CODE_LOAD_REQ_WITH_LFA); if (!load_code) { BNX2X_ERR("MCP response failure, aborting\n"); rc = -EAGAIN; @@@ -8958,7 -8973,7 +8967,7 @@@ static void bnx2x_parity_recover(struc
/* Stop the driver */ /* If interface has been removed - break */ - if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) + if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) return;
bp->recovery_state = BNX2X_RECOVERY_WAIT; @@@ -9124,7 -9139,7 +9133,7 @@@ static void bnx2x_sp_rtnl_task(struct w bp->sp_rtnl_state = 0; smp_mb();
- bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_nic_load(bp, LOAD_NORMAL);
goto sp_rtnl_exit; @@@ -9310,7 -9325,8 +9319,8 @@@ static void __devinit bnx2x_prev_unload
static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp) { - u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, + DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); if (!rc) { BNX2X_ERR("MCP response failure, aborting\n"); return -EBUSY; @@@ -9374,7 -9390,7 +9384,7 @@@ static int __devinit bnx2x_prev_mark_pa
static int __devinit bnx2x_do_flr(struct bnx2x *bp) { - int i, pos; + int i; u16 status; struct pci_dev *dev = bp->pdev;
@@@ -9391,12 -9407,16 +9401,12 @@@ return -EINVAL; }
- pos = pci_pcie_cap(dev); - if (!pos) - return -ENOTTY; - /* Wait for Transaction Pending bit clean */ for (i = 0; i < 4; i++) { if (i) msleep((1 << (i - 1)) * 100);
- pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); if (!(status & PCI_EXP_DEVSTA_TRPND)) goto clear; } @@@ -9821,13 -9841,12 +9831,13 @@@ static void __devinit bnx2x_get_igu_cam }
#ifdef CONFIG_PCI_MSI - /* - * It's expected that number of CAM entries for this functions is equal - * to the number evaluated based on the MSI-X table size. We want a - * harsh warning if these values are different! + /* Due to new PF resource allocation by MFW T7.4 and above, it's + * optional that number of CAM entries will not be equal to the value + * advertised in PCI. + * Driver should use the minimal value of both as the actual status + * block count */ - WARN_ON(bp->igu_sb_cnt != igu_sb_cnt); + bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); #endif
if (igu_sb_cnt == 0) @@@ -11000,7 -11019,7 +11010,7 @@@ static int bnx2x_close(struct net_devic struct bnx2x *bp = netdev_priv(dev);
/* Unload the driver, release IRQs */ - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
/* Power off */ bnx2x_set_power_state(bp, PCI_D3hot); @@@ -12157,7 -12176,7 +12167,7 @@@ static void bnx2x_io_resume(struct pci_ rtnl_unlock(); }
-static struct pci_error_handlers bnx2x_err_handler = { +static const struct pci_error_handlers bnx2x_err_handler = { .error_detected = bnx2x_io_error_detected, .slot_reset = bnx2x_io_slot_reset, .resume = bnx2x_io_resume, diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 9339854,34d510d..b6c742c --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@@ -2470,8 -2470,8 +2470,8 @@@ int cxgb4_sync_txq_pidx(struct net_devi else delta = size - hw_pidx + pidx; wmb(); - t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), - V_QID(qid) | V_PIDX(delta)); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(qid) | PIDX(delta)); } out: return ret; @@@ -2579,8 -2579,8 +2579,8 @@@ static void sync_txq_pidx(struct adapte else delta = q->size - hw_pidx + q->db_pidx; wmb(); - t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), - V_QID(q->cntxt_id) | V_PIDX(delta)); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(delta)); } out: q->db_disabled = 0; @@@ -2617,9 -2617,9 +2617,9 @@@ static void process_db_full(struct work
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); drain_db_fifo(adap, dbfifo_drain_delay); - t4_set_reg_field(adap, A_SGE_INT_ENABLE3, - F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, - F_DBFIFO_HP_INT | F_DBFIFO_LP_INT); + t4_set_reg_field(adap, SGE_INT_ENABLE3, + DBFIFO_HP_INT | DBFIFO_LP_INT, + DBFIFO_HP_INT | DBFIFO_LP_INT); notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); }
@@@ -2639,8 -2639,8 +2639,8 @@@ static void process_db_drop(struct work
void t4_db_full(struct adapter *adap) { - t4_set_reg_field(adap, A_SGE_INT_ENABLE3, - F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0); + t4_set_reg_field(adap, SGE_INT_ENABLE3, + DBFIFO_HP_INT | DBFIFO_LP_INT, 0); queue_work(workq, &adap->db_full_task); }
@@@ -3453,7 -3453,7 +3453,7 @@@ static void eeh_resume(struct pci_dev * rtnl_unlock(); }
-static struct pci_error_handlers cxgb4_eeh = { +static const struct pci_error_handlers cxgb4_eeh = { .error_detected = eeh_err_detected, .slot_reset = eeh_slot_reset, .resume = eeh_resume, @@@ -3694,7 -3694,15 +3694,7 @@@ static void __devinit print_port_info(c
static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev) { - u16 v; - int pos; - - pos = pci_pcie_cap(dev); - if (pos > 0) { - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v); - v |= PCI_EXP_DEVCTL_RELAX_EN; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v); - } + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); }
/* diff --combined drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index af16013,8e988d6..dccecdc --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@@ -1018,9 -1018,9 +1018,9 @@@ static void sge_intr_handler(struct ada { ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large", -1, 0 }, { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, - { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, - { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, - { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, + { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, + { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, + { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, "SGE IQID > 1023 received CPL for FL", -1, 0 }, { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, @@@ -1520,7 -1520,7 +1520,7 @@@ void t4_intr_enable(struct adapter *ada ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | - F_DBFIFO_HP_INT | F_DBFIFO_LP_INT | + DBFIFO_HP_INT | DBFIFO_LP_INT | EGRESS_SIZE_ERR); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); @@@ -2033,8 -2033,8 +2033,8 @@@ int t4_mem_win_read_len(struct adapter if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) return -EINVAL;
- t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15); - t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET); + t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15); + t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
for (i = 0; i < len; i += 4) *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i)); @@@ -2741,9 -2741,11 +2741,9 @@@ static void __devinit get_pci_mode(stru struct pci_params *p) { u16 val; - u32 pcie_cap = pci_pcie_cap(adapter->pdev);
- if (pcie_cap) { - pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, - &val); + if (pci_is_pcie(adapter->pdev)) { + pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); p->speed = val & PCI_EXP_LNKSTA_CLS; p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; } diff --combined drivers/net/ethernet/emulex/benet/be_main.c index 95d1047,84379f4..778618d --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@@ -20,6 -20,7 +20,7 @@@ #include "be.h" #include "be_cmds.h" #include <asm/div64.h> + #include <linux/aer.h>
MODULE_VERSION(DRV_VER); MODULE_DEVICE_TABLE(pci, be_dev_ids); @@@ -2176,8 -2177,7 +2177,7 @@@ static uint be_num_rss_want(struct be_a { u32 num = 0; if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && - !sriov_want(adapter) && be_physfn(adapter) && - !be_is_mc(adapter)) { + !sriov_want(adapter) && be_physfn(adapter)) { num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; num = min_t(u32, num, (u32)netif_get_num_default_rss_queues()); } @@@ -2646,8 -2646,8 +2646,8 @@@ static int be_vf_setup(struct be_adapte }
for_all_vfs(adapter, vf_cfg, vf) { - status = be_cmd_link_status_query(adapter, NULL, &lnk_speed, - NULL, vf + 1); + lnk_speed = 1000; + status = be_cmd_set_qos(adapter, lnk_speed, vf + 1); if (status) goto err; vf_cfg->tx_rate = lnk_speed * 10; @@@ -2724,6 -2724,8 +2724,8 @@@ static int be_get_config(struct be_adap if (pos) { pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF, &dev_num_vfs); + if (!lancer_chip(adapter)) + dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS); adapter->dev_num_vfs = dev_num_vfs; } return 0; @@@ -3437,6 -3439,7 +3439,7 @@@ static void be_ctrl_cleanup(struct be_a if (mem->va) dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, mem->dma); + kfree(adapter->pmac_id); }
static int be_ctrl_init(struct be_adapter *adapter) @@@ -3473,6 -3476,12 +3476,12 @@@ } memset(rx_filter->va, 0, rx_filter->size);
+ /* primary mac needs 1 pmac entry */ + adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1, + sizeof(*adapter->pmac_id), GFP_KERNEL); + if (!adapter->pmac_id) + return -ENOMEM; + mutex_init(&adapter->mbox_lock); spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); @@@ -3543,6 -3552,8 +3552,8 @@@ static void __devexit be_remove(struct
be_ctrl_cleanup(adapter);
+ pci_disable_pcie_error_reporting(pdev); + pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); pci_disable_device(pdev); @@@ -3609,12 -3620,6 +3620,6 @@@ static int be_get_initial_config(struc else adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
- /* primary mac needs 1 pmac entry */ - adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1, - sizeof(u32), GFP_KERNEL); - if (!adapter->pmac_id) - return -ENOMEM; - status = be_cmd_get_cntl_attributes(adapter); if (status) return status; @@@ -3844,6 -3849,10 +3849,10 @@@ static int __devinit be_probe(struct pc } }
+ status = pci_enable_pcie_error_reporting(pdev); + if (status) + dev_err(&pdev->dev, "Could not use PCIe error reporting\n"); + status = be_ctrl_init(adapter); if (status) goto free_netdev; @@@ -4066,6 -4075,7 +4075,7 @@@ static pci_ers_result_t be_eeh_reset(st if (status) return PCI_ERS_RESULT_DISCONNECT;
+ pci_cleanup_aer_uncorrect_error_status(pdev); return PCI_ERS_RESULT_RECOVERED; }
@@@ -4106,7 -4116,7 +4116,7 @@@ err dev_err(&adapter->pdev->dev, "EEH resume failed\n"); }
-static struct pci_error_handlers be_eeh_handlers = { +static const struct pci_error_handlers be_eeh_handlers = { .error_detected = be_eeh_err_detected, .slot_reset = be_eeh_reset, .resume = be_eeh_resume, diff --combined drivers/net/ethernet/freescale/gianfar_ptp.c index 0daa66b,18762a3..b9db0e0 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@@ -510,12 -510,12 +510,12 @@@ static int gianfar_ptp_probe(struct pla
spin_unlock_irqrestore(&etsects->lock, flags);
- etsects->clock = ptp_clock_register(&etsects->caps); + etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev); if (IS_ERR(etsects->clock)) { err = PTR_ERR(etsects->clock); goto no_clock; } - gfar_phc_clock = ptp_clock_index(etsects->clock); + gfar_phc_index = ptp_clock_index(etsects->clock);
dev_set_drvdata(&dev->dev, etsects);
@@@ -539,7 -539,7 +539,7 @@@ static int gianfar_ptp_remove(struct pl gfar_write(&etsects->regs->tmr_temask, 0); gfar_write(&etsects->regs->tmr_ctrl, 0);
- gfar_phc_clock = -1; + gfar_phc_index = -1; ptp_clock_unregister(etsects->clock); iounmap(etsects->regs); release_resource(etsects->rsrc); diff --combined drivers/net/ethernet/intel/e1000/e1000_main.c index f3f9aeb,3a8368e..222bfaf --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@@ -192,7 -192,7 +192,7 @@@ static pci_ers_result_t e1000_io_error_ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); static void e1000_io_resume(struct pci_dev *pdev);
-static struct pci_error_handlers e1000_err_handler = { +static const struct pci_error_handlers e1000_err_handler = { .error_detected = e1000_io_error_detected, .slot_reset = e1000_io_slot_reset, .resume = e1000_io_resume, @@@ -2014,6 -2014,7 +2014,7 @@@ static void e1000_clean_tx_ring(struct e1000_unmap_and_free_tx_resource(adapter, buffer_info); }
+ netdev_reset_queue(adapter->netdev); size = sizeof(struct e1000_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size);
@@@ -3149,17 -3150,6 +3150,17 @@@ static netdev_tx_t e1000_xmit_frame(str return NETDEV_TX_OK; }
+ /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (skb->len < ETH_ZLEN) { + if (skb_pad(skb, ETH_ZLEN - skb->len)) + return NETDEV_TX_OK; + skb->len = ETH_ZLEN; + skb_set_tail_pointer(skb, ETH_ZLEN); + } + mss = skb_shinfo(skb)->gso_size; /* The controller does a simple calculation to * make sure there is enough room in the FIFO before @@@ -3273,6 -3263,7 +3274,7 @@@ nr_frags, mss);
if (count) { + netdev_sent_queue(netdev, skb->len); skb_tx_timestamp(skb);
e1000_tx_queue(adapter, tx_ring, tx_flags, count); @@@ -3860,6 -3851,7 +3862,7 @@@ static bool e1000_clean_tx_irq(struct e unsigned int i, eop; unsigned int count = 0; unsigned int total_tx_bytes=0, total_tx_packets=0; + unsigned int bytes_compl = 0, pkts_compl = 0;
i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; @@@ -3877,6 -3869,11 +3880,11 @@@ if (cleaned) { total_tx_packets += buffer_info->segs; total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + } e1000_unmap_and_free_tx_resource(adapter, buffer_info); tx_desc->upper.data = 0; @@@ -3890,6 -3887,8 +3898,8 @@@
tx_ring->next_to_clean = i;
+ netdev_completed_queue(netdev, pkts_compl, bytes_compl); + #define TX_WAKE_THRESHOLD 32 if (unlikely(count && netif_carrier_ok(netdev) && E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { @@@ -4950,6 -4949,10 +4960,10 @@@ int e1000_set_spd_dplx(struct e1000_ada default: goto err_inval; } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + return 0;
err_inval: diff --combined drivers/net/ethernet/intel/e1000e/netdev.c index 3f0223a,121990c..fb659dd --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@@ -56,7 -56,7 +56,7 @@@
#define DRV_EXTRAVERSION "-k"
- #define DRV_VERSION "2.0.0" DRV_EXTRAVERSION + #define DRV_VERSION "2.1.4" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION;
@@@ -3446,7 -3446,7 +3446,7 @@@ void e1000e_reset(struct e1000_adapter
/* * if short on Rx space, Rx wins and must trump Tx - * adjustment or use Early Receive if available + * adjustment */ if (pba < min_rx_space) pba = min_rx_space; @@@ -3755,6 -3755,10 +3755,10 @@@ static irqreturn_t e1000_intr_msi_test( e_dbg("icr is %08X\n", icr); if (icr & E1000_ICR_RXSEQ) { adapter->flags &= ~FLAG_MSI_TEST_FAILED; + /* + * Force memory writes to complete before acknowledging the + * interrupt is handled. + */ wmb(); }
@@@ -3796,6 -3800,10 +3800,10 @@@ static int e1000_test_msi_interrupt(str goto msi_test_failed; }
+ /* + * Force memory writes to complete before enabling and firing an + * interrupt. + */ wmb();
e1000_irq_enable(adapter); @@@ -3807,7 -3815,7 +3815,7 @@@
e1000_irq_disable(adapter);
- rmb(); + rmb(); /* read flags after interrupt has been fired */
if (adapter->flags & FLAG_MSI_TEST_FAILED) { adapter->int_mode = E1000E_INT_MODE_LEGACY; @@@ -4670,7 -4678,7 +4678,7 @@@ static int e1000_tso(struct e1000_ring struct e1000_buffer *buffer_info; unsigned int i; u32 cmd_length = 0; - u16 ipcse = 0, tucse, mss; + u16 ipcse = 0, mss; u8 ipcss, ipcso, tucss, tucso, hdr_len;
if (!skb_is_gso(skb)) @@@ -4704,7 -4712,6 +4712,6 @@@ ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; tucss = skb_transport_offset(skb); tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; - tucse = 0;
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); @@@ -4718,7 -4725,7 +4725,7 @@@ context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); context_desc->upper_setup.tcp_fields.tucss = tucss; context_desc->upper_setup.tcp_fields.tucso = tucso; - context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->upper_setup.tcp_fields.tucse = 0; context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; context_desc->cmd_and_length = cpu_to_le32(cmd_length); @@@ -5584,15 -5591,16 +5591,15 @@@ static void e1000_complete_shutdown(str */ if (adapter->flags & FLAG_IS_QUAD_PORT) { struct pci_dev *us_dev = pdev->bus->self; - int pos = pci_pcie_cap(us_dev); u16 devctl;
- pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); - pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, - (devctl & ~PCI_EXP_DEVCTL_CERE)); + pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); + pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, + (devctl & ~PCI_EXP_DEVCTL_CERE));
e1000_power_off(pdev, sleep, wake);
- pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); + pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); } else { e1000_power_off(pdev, sleep, wake); } @@@ -5606,15 -5614,25 +5613,15 @@@ static void __e1000e_disable_aspm(struc #else static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) { - int pos; - u16 reg16; - /* * Both device and parent should have the same ASPM setting. * Disable ASPM in downstream component first and then upstream. */ - pos = pci_pcie_cap(pdev); - pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); - reg16 &= ~state; - pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); - - if (!pdev->bus->self) - return; + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state);
- pos = pci_pcie_cap(pdev->bus->self); - pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); - reg16 &= ~state; - pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); + if (pdev->bus->self) + pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL, + state); } #endif static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) @@@ -6475,7 -6493,7 +6482,7 @@@ static void __devexit e1000_remove(stru }
/* PCI Error Recovery (ERS) */ -static struct pci_error_handlers e1000_err_handler = { +static const struct pci_error_handlers e1000_err_handler = { .error_detected = e1000_io_error_detected, .slot_reset = e1000_io_slot_reset, .resume = e1000_io_resume, diff --combined drivers/net/ethernet/intel/igb/igb_main.c index f88c822,60bf465..e1ceb37 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@@ -172,8 -172,7 +172,7 @@@ static void igb_check_vf_rate_limit(str
#ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); - static int igb_find_enabled_vfs(struct igb_adapter *adapter); - static int igb_check_vf_assignment(struct igb_adapter *adapter); + static bool igb_vfs_are_assigned(struct igb_adapter *adapter); #endif
#ifdef CONFIG_PM @@@ -217,7 -216,7 +216,7 @@@ static pci_ers_result_t igb_io_error_de static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); static void igb_io_resume(struct pci_dev *);
-static struct pci_error_handlers igb_err_handler = { +static const struct pci_error_handlers igb_err_handler = { .error_detected = igb_io_error_detected, .slot_reset = igb_io_slot_reset, .resume = igb_io_resume, @@@ -404,8 -403,8 +403,8 @@@ static void igb_dump(struct igb_adapte buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", n, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)buffer_info->dma, - buffer_info->length, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), buffer_info->next_to_watch, (u64)buffer_info->time_stamp); } @@@ -456,8 -455,8 +455,8 @@@ " %04X %p %016llX %p%s\n", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), - (u64)buffer_info->dma, - buffer_info->length, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), buffer_info->next_to_watch, (u64)buffer_info->time_stamp, buffer_info->skb, next_desc); @@@ -466,7 -465,8 +465,8 @@@ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, buffer_info->skb->data, - buffer_info->length, true); + dma_unmap_len(buffer_info, len), + true); } }
@@@ -683,52 -683,29 +683,29 @@@ static int igb_alloc_queues(struct igb_ { struct igb_ring *ring; int i; - int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) { - if (orig_node == -1) { - int cur_node = next_online_node(adapter->node); - if (cur_node == MAX_NUMNODES) - cur_node = first_online_node; - adapter->node = cur_node; - } - ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL, - adapter->node); - if (!ring) - ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); + ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); if (!ring) goto err; ring->count = adapter->tx_ring_count; ring->queue_index = i; ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; - ring->numa_node = adapter->node; /* For 82575, context index must be unique per ring. */ if (adapter->hw.mac.type == e1000_82575) set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); adapter->tx_ring[i] = ring; } - /* Restore the adapter's original node */ - adapter->node = orig_node;
for (i = 0; i < adapter->num_rx_queues; i++) { - if (orig_node == -1) { - int cur_node = next_online_node(adapter->node); - if (cur_node == MAX_NUMNODES) - cur_node = first_online_node; - adapter->node = cur_node; - } - ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL, - adapter->node); - if (!ring) - ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); + ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); if (!ring) goto err; ring->count = adapter->rx_ring_count; ring->queue_index = i; ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; - ring->numa_node = adapter->node; /* set flag indicating ring supports SCTP checksum offload */ if (adapter->hw.mac.type >= e1000_82576) set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); @@@ -742,16 -719,12 +719,12 @@@
adapter->rx_ring[i] = ring; } - /* Restore the adapter's original node */ - adapter->node = orig_node;
igb_cache_ring_register(adapter);
return 0;
err: - /* Restore the adapter's original node */ - adapter->node = orig_node; igb_free_queues(adapter);
return -ENOMEM; @@@ -1117,24 -1090,10 +1090,10 @@@ static int igb_alloc_q_vectors(struct i struct igb_q_vector *q_vector; struct e1000_hw *hw = &adapter->hw; int v_idx; - int orig_node = adapter->node;
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { - if ((adapter->num_q_vectors == (adapter->num_rx_queues + - adapter->num_tx_queues)) && - (adapter->num_rx_queues == v_idx)) - adapter->node = orig_node; - if (orig_node == -1) { - int cur_node = next_online_node(adapter->node); - if (cur_node == MAX_NUMNODES) - cur_node = first_online_node; - adapter->node = cur_node; - } - q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL, - adapter->node); - if (!q_vector) - q_vector = kzalloc(sizeof(struct igb_q_vector), - GFP_KERNEL); + q_vector = kzalloc(sizeof(struct igb_q_vector), + GFP_KERNEL); if (!q_vector) goto err_out; q_vector->adapter = adapter; @@@ -1143,14 -1102,10 +1102,10 @@@ netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); adapter->q_vector[v_idx] = q_vector; } - /* Restore the adapter's original node */ - adapter->node = orig_node;
return 0;
err_out: - /* Restore the adapter's original node */ - adapter->node = orig_node; igb_free_q_vectors(adapter); return -ENOMEM; } @@@ -1751,6 -1706,11 +1706,11 @@@ void igb_reset(struct igb_adapter *adap /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+ #ifdef CONFIG_IGB_PTP + /* Re-enable PTP, where applicable. */ + igb_ptp_reset(adapter); + #endif /* CONFIG_IGB_PTP */ + igb_get_phy_info(hw); }
@@@ -2180,11 -2140,12 +2140,12 @@@ static int __devinit igb_probe(struct p }
#endif + #ifdef CONFIG_IGB_PTP /* do hw tstamp init after resetting */ igb_ptp_init(adapter); + #endif /* CONFIG_IGB_PTP */
- #endif dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); /* print bus type/speed/width info */ dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", @@@ -2259,9 -2220,9 +2220,9 @@@ static void __devexit igb_remove(struc
pm_runtime_get_noresume(&pdev->dev); #ifdef CONFIG_IGB_PTP - igb_ptp_remove(adapter); + igb_ptp_stop(adapter); + #endif /* CONFIG_IGB_PTP */
- #endif /* * The watchdog timer may be rescheduled, so explicitly * disable watchdog from being rescheduled. @@@ -2294,11 -2255,11 +2255,11 @@@ /* reclaim resources allocated to VFs */ if (adapter->vf_data) { /* disable iov and allow time for transactions to clear */ - if (!igb_check_vf_assignment(adapter)) { + if (igb_vfs_are_assigned(adapter)) { + dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + } else { pci_disable_sriov(pdev); msleep(500); - } else { - dev_info(&pdev->dev, "VF(s) assigned to guests!\n"); }
kfree(adapter->vf_data); @@@ -2338,7 -2299,7 +2299,7 @@@ static void __devinit igb_probe_vfs(str #ifdef CONFIG_PCI_IOV struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; - int old_vfs = igb_find_enabled_vfs(adapter); + int old_vfs = pci_num_vf(adapter->pdev); int i;
/* Virtualization features not supported on i210 family. */ @@@ -2418,8 -2379,6 +2379,6 @@@ static int __devinit igb_sw_init(struc VLAN_HLEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
- adapter->node = -1; - spin_lock_init(&adapter->stats64_lock); #ifdef CONFIG_PCI_IOV switch (hw->mac.type) { @@@ -2666,13 -2625,11 +2625,11 @@@ static int igb_close(struct net_device int igb_setup_tx_resources(struct igb_ring *tx_ring) { struct device *dev = tx_ring->dev; int size;
size = sizeof(struct igb_tx_buffer) * tx_ring->count; - tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node); - if (!tx_ring->tx_buffer_info) - tx_ring->tx_buffer_info = vzalloc(size); + + tx_ring->tx_buffer_info = vzalloc(size); if (!tx_ring->tx_buffer_info) goto err;
@@@ -2680,18 -2637,10 +2637,10 @@@ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096);
- set_dev_node(dev, tx_ring->numa_node); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); - set_dev_node(dev, orig_node); - if (!tx_ring->desc) - tx_ring->desc = dma_alloc_coherent(dev, - tx_ring->size, - &tx_ring->dma, - GFP_KERNEL); - if (!tx_ring->desc) goto err;
@@@ -2702,8 -2651,8 +2651,8 @@@
err: vfree(tx_ring->tx_buffer_info); - dev_err(dev, - "Unable to allocate memory for the transmit descriptor ring\n"); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); return -ENOMEM; }
@@@ -2820,34 -2769,23 +2769,23 @@@ static void igb_configure_tx(struct igb int igb_setup_rx_resources(struct igb_ring *rx_ring) { struct device *dev = rx_ring->dev; - int orig_node = dev_to_node(dev); - int size, desc_len; + int size;
size = sizeof(struct igb_rx_buffer) * rx_ring->count; - rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node); - if (!rx_ring->rx_buffer_info) - rx_ring->rx_buffer_info = vzalloc(size); + + rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) goto err;
- desc_len = sizeof(union e1000_adv_rx_desc);
/* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096);
- set_dev_node(dev, rx_ring->numa_node); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); - set_dev_node(dev, orig_node); - if (!rx_ring->desc) - rx_ring->desc = dma_alloc_coherent(dev, - rx_ring->size, - &rx_ring->dma, - GFP_KERNEL); - if (!rx_ring->desc) goto err;
@@@ -2859,8 -2797,7 +2797,7 @@@ err: vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; - dev_err(dev, "Unable to allocate memory for the receive descriptor" - " ring\n"); + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); return -ENOMEM; }
@@@ -2898,57 -2835,48 +2835,48 @@@ static void igb_setup_mrqc(struct igb_a { struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; - u32 j, num_rx_queues, shift = 0, shift2 = 0; - union e1000_reta { - u32 dword; - u8 bytes[4]; - } reta; - static const u8 rsshash[40] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, - 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, - 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, - 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; + u32 j, num_rx_queues, shift = 0; + static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, + 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, + 0xA32DCB77, 0x0CF23080, 0x3BB7426A, + 0xFA01ACBE };
/* Fill out hash function seeds */ - for (j = 0; j < 10; j++) { - u32 rsskey = rsshash[(j * 4)]; - rsskey |= rsshash[(j * 4) + 1] << 8; - rsskey |= rsshash[(j * 4) + 2] << 16; - rsskey |= rsshash[(j * 4) + 3] << 24; - array_wr32(E1000_RSSRK(0), j, rsskey); - } + for (j = 0; j < 10; j++) + wr32(E1000_RSSRK(j), rsskey[j]);
num_rx_queues = adapter->rss_queues;
- if (adapter->vfs_allocated_count) { - /* 82575 and 82576 supports 2 RSS queues for VMDq */ - switch (hw->mac.type) { - case e1000_i350: - case e1000_82580: - num_rx_queues = 1; - shift = 0; - break; - case e1000_82576: + switch (hw->mac.type) { + case e1000_82575: + shift = 6; + break; + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) { shift = 3; num_rx_queues = 2; - break; - case e1000_82575: - shift = 2; - shift2 = 6; - default: - break; } - } else { - if (hw->mac.type == e1000_82575) - shift = 6; + break; + default: + break; }
- for (j = 0; j < (32 * 4); j++) { - reta.bytes[j & 3] = (j % num_rx_queues) << shift; - if (shift2) - reta.bytes[j & 3] |= num_rx_queues << shift2; - if ((j & 3) == 3) - wr32(E1000_RETA(j >> 2), reta.dword); + /* + * Populate the indirection table 4 entries at a time. To do this + * we are generating the results for n and n+2 and then interleaving + * those with the results with n+1 and n+3. + */ + for (j = 0; j < 32; j++) { + /* first pass generates n and n+2 */ + u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues; + u32 reta = (base & 0x07800780) >> (7 - shift); + + /* second pass generates n+1 and n+3 */ + base += 0x00010001 * num_rx_queues; + reta |= (base & 0x07800780) << (1 + shift); + + wr32(E1000_RETA(j), reta); }
/* @@@ -3184,8 -3112,10 +3112,10 @@@ void igb_configure_rx_ring(struct igb_a srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT; #endif srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + #ifdef CONFIG_IGB_PTP if (hw->mac.type >= e1000_82580) srrctl |= E1000_SRRCTL_TIMESTAMP; + #endif /* CONFIG_IGB_PTP */ /* Only set Drop Enable if we are supporting multiple queues */ if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) srrctl |= E1000_SRRCTL_DROP_EN; @@@ -3269,20 -3199,20 +3199,20 @@@ void igb_unmap_and_free_tx_resource(str { if (tx_buffer->skb) { dev_kfree_skb_any(tx_buffer->skb); - if (tx_buffer->dma) + if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, - tx_buffer->dma, - tx_buffer->length, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); - } else if (tx_buffer->dma) { + } else if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(ring->dev, - tx_buffer->dma, - tx_buffer->length, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; - tx_buffer->dma = 0; + dma_unmap_len_set(tx_buffer, len, 0); /* buffer_info must be completely set up in the transmit path */ }
@@@ -4229,9 -4159,11 +4159,11 @@@ static __le32 igb_tx_cmd_type(u32 tx_fl if (tx_flags & IGB_TX_FLAGS_VLAN) cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
+ #ifdef CONFIG_IGB_PTP /* set timestamp bit if present */ - if (tx_flags & IGB_TX_FLAGS_TSTAMP) + if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP); + #endif /* CONFIG_IGB_PTP */
/* set segmentation bits for TSO */ if (tx_flags & IGB_TX_FLAGS_TSO) @@@ -4275,7 -4207,7 +4207,7 @@@ static void igb_tx_map(struct igb_ring const u8 hdr_len) { struct sk_buff *skb = first->skb; - struct igb_tx_buffer *tx_buffer_info; + struct igb_tx_buffer *tx_buffer; union e1000_adv_tx_desc *tx_desc; dma_addr_t dma; struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; @@@ -4296,8 -4228,8 +4228,8 @@@ goto dma_error;
/* record length, and DMA address */ - first->length = size; - first->dma = dma; + dma_unmap_len_set(first, len, size); + dma_unmap_addr_set(first, dma, dma); tx_desc->read.buffer_addr = cpu_to_le64(dma);
for (;;) { @@@ -4339,9 -4271,9 +4271,9 @@@ if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error;
- tx_buffer_info = &tx_ring->tx_buffer_info[i]; - tx_buffer_info->length = size; - tx_buffer_info->dma = dma; + tx_buffer = &tx_ring->tx_buffer_info[i]; + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma);
tx_desc->read.olinfo_status = 0; tx_desc->read.buffer_addr = cpu_to_le64(dma); @@@ -4392,9 -4324,9 +4324,9 @@@ dma_error
/* clear dma mappings for failed tx_buffer_info map */ for (;;) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); - if (tx_buffer_info == first) + tx_buffer = &tx_ring->tx_buffer_info[i]; + igb_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) break; if (i == 0) i = tx_ring->count; @@@ -4440,6 -4372,9 +4372,9 @@@ static inline int igb_maybe_stop_tx(str netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, struct igb_ring *tx_ring) { + #ifdef CONFIG_IGB_PTP + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + #endif /* CONFIG_IGB_PTP */ struct igb_tx_buffer *first; int tso; u32 tx_flags = 0; @@@ -4462,10 -4397,17 +4397,17 @@@ first->bytecount = skb->len; first->gso_segs = 1;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + #ifdef CONFIG_IGB_PTP + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + !(adapter->ptp_tx_skb))) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGB_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + if (adapter->hw.mac.type == e1000_82576) + schedule_work(&adapter->ptp_tx_work); } + #endif /* CONFIG_IGB_PTP */
if (vlan_tx_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; @@@ -4661,11 -4603,13 +4603,13 @@@ void igb_update_stats(struct igb_adapte bytes = 0; packets = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; + u32 rqdpc = rd32(E1000_RQDPC(i)); struct igb_ring *ring = adapter->rx_ring[i];
- ring->rx_stats.drops += rqdpc_tmp; - net_stats->rx_fifo_errors += rqdpc_tmp; + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + }
do { start = u64_stats_fetch_begin_bh(&ring->rx_syncp); @@@ -4755,7 -4699,11 +4699,11 @@@ reg = rd32(E1000_CTRL_EXT); if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { adapter->stats.rxerrc += rd32(E1000_RXERRC); - adapter->stats.tncrs += rd32(E1000_TNCRS); + + /* this stat has invalid values on i210/i211 */ + if ((hw->mac.type != e1000_i210) && + (hw->mac.type != e1000_i211)) + adapter->stats.tncrs += rd32(E1000_TNCRS); }
adapter->stats.tsctc += rd32(E1000_TSCTC); @@@ -4852,6 -4800,19 +4800,19 @@@ static irqreturn_t igb_msix_other(int i mod_timer(&adapter->watchdog_timer, jiffies + 1); }
+ #ifdef CONFIG_IGB_PTP + if (icr & E1000_ICR_TS) { + u32 tsicr = rd32(E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ + wr32(E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } + #endif /* CONFIG_IGB_PTP */ + wr32(E1000_EIMS, adapter->eims_other);
return IRQ_HANDLED; @@@ -5002,102 -4963,43 +4963,43 @@@ static int igb_notify_dca(struct notifi static int igb_vf_configure(struct igb_adapter *adapter, int vf) { unsigned char mac_addr[ETH_ALEN]; - struct pci_dev *pdev = adapter->pdev; - struct e1000_hw *hw = &adapter->hw; - struct pci_dev *pvfdev; - unsigned int device_id; - u16 thisvf_devfn;
eth_random_addr(mac_addr); igb_set_vf_mac(adapter, vf, mac_addr);
- switch (adapter->hw.mac.type) { - case e1000_82576: - device_id = IGB_82576_VF_DEV_ID; - /* VF Stride for 82576 is 2 */ - thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) | - (pdev->devfn & 1); - break; - case e1000_i350: - device_id = IGB_I350_VF_DEV_ID; - /* VF Stride for I350 is 4 */ - thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) | - (pdev->devfn & 3); - break; - default: - device_id = 0; - thisvf_devfn = 0; - break; - } - - pvfdev = pci_get_device(hw->vendor_id, device_id, NULL); - while (pvfdev) { - if (pvfdev->devfn == thisvf_devfn) - break; - pvfdev = pci_get_device(hw->vendor_id, - device_id, pvfdev); - } - - if (pvfdev) - adapter->vf_data[vf].vfdev = pvfdev; - else - dev_err(&pdev->dev, - "Couldn't find pci dev ptr for VF %4.4x\n", - thisvf_devfn); - return pvfdev != NULL; + return 0; }
- static int igb_find_enabled_vfs(struct igb_adapter *adapter) + static bool igb_vfs_are_assigned(struct igb_adapter *adapter) { - struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - struct pci_dev *pvfdev; - u16 vf_devfn = 0; - u16 vf_stride; - unsigned int device_id; - int vfs_found = 0; + struct pci_dev *vfdev; + int dev_id;
switch (adapter->hw.mac.type) { case e1000_82576: - device_id = IGB_82576_VF_DEV_ID; - /* VF Stride for 82576 is 2 */ - vf_stride = 2; + dev_id = IGB_82576_VF_DEV_ID; break; case e1000_i350: - device_id = IGB_I350_VF_DEV_ID; - /* VF Stride for I350 is 4 */ - vf_stride = 4; + dev_id = IGB_I350_VF_DEV_ID; break; default: - device_id = 0; - vf_stride = 0; - break; - } - - vf_devfn = pdev->devfn + 0x80; - pvfdev = pci_get_device(hw->vendor_id, device_id, NULL); - while (pvfdev) { - if (pvfdev->devfn == vf_devfn && - (pvfdev->bus->number >= pdev->bus->number)) - vfs_found++; - vf_devfn += vf_stride; - pvfdev = pci_get_device(hw->vendor_id, - device_id, pvfdev); + return false; }
- return vfs_found; - } - - static int igb_check_vf_assignment(struct igb_adapter *adapter) - { - int i; - for (i = 0; i < adapter->vfs_allocated_count; i++) { - if (adapter->vf_data[i].vfdev) { - if (adapter->vf_data[i].vfdev->dev_flags & - PCI_DEV_FLAGS_ASSIGNED) + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL); + while (vfdev) { + /* if we don't own it we don't care */ + if (vfdev->is_virtfn && vfdev->physfn == pdev) { + /* if it is assigned we cannot release it */ + if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) return true; } + + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev); } + return false; }
@@@ -5643,6 -5545,19 +5545,19 @@@ static irqreturn_t igb_intr_msi(int irq mod_timer(&adapter->watchdog_timer, jiffies + 1); }
+ #ifdef CONFIG_IGB_PTP + if (icr & E1000_ICR_TS) { + u32 tsicr = rd32(E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ + wr32(E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } + #endif /* CONFIG_IGB_PTP */ + napi_schedule(&q_vector->napi);
return IRQ_HANDLED; @@@ -5684,6 -5599,19 +5599,19 @@@ static irqreturn_t igb_intr(int irq, vo mod_timer(&adapter->watchdog_timer, jiffies + 1); }
+ #ifdef CONFIG_IGB_PTP + if (icr & E1000_ICR_TS) { + u32 tsicr = rd32(E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ + wr32(E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } + #endif /* CONFIG_IGB_PTP */ + napi_schedule(&q_vector->napi);
return IRQ_HANDLED; @@@ -5743,37 -5671,6 +5671,6 @@@ static int igb_poll(struct napi_struct return 0; }
- #ifdef CONFIG_IGB_PTP - /** - * igb_tx_hwtstamp - utility function which checks for TX time stamp - * @q_vector: pointer to q_vector containing needed info - * @buffer: pointer to igb_tx_buffer structure - * - * If we were asked to do hardware stamping and such a time stamp is - * available, then it must have been for this skb here because we only - * allow only one such packet into the queue. - */ - static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, - struct igb_tx_buffer *buffer_info) - { - struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; - struct skb_shared_hwtstamps shhwtstamps; - u64 regval; - - /* if skb does not support hw timestamp or TX stamp not valid exit */ - if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) || - !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) - return; - - regval = rd32(E1000_TXSTMPL); - regval |= (u64)rd32(E1000_TXSTMPH) << 32; - - igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); - skb_tstamp_tx(buffer_info->skb, &shhwtstamps); - } - - #endif /** * igb_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: pointer to q_vector containing needed info @@@ -5785,7 -5682,7 +5682,7 @@@ static bool igb_clean_tx_irq(struct igb struct igb_adapter *adapter = q_vector->adapter; struct igb_ring *tx_ring = q_vector->tx.ring; struct igb_tx_buffer *tx_buffer; - union e1000_adv_tx_desc *tx_desc, *eop_desc; + union e1000_adv_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; unsigned int budget = q_vector->tx.work_limit; unsigned int i = tx_ring->next_to_clean; @@@ -5797,16 -5694,16 +5694,16 @@@ tx_desc = IGB_TX_DESC(tx_ring, i); i -= tx_ring->count;
- for (; budget; budget--) { - eop_desc = tx_buffer->next_to_watch; - - /* prevent any other reads prior to eop_desc */ - rmb(); + do { + union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
/* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break;
+ /* prevent any other reads prior to eop_desc */ + rmb(); + /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) break; @@@ -5818,25 -5715,21 +5715,21 @@@ total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs;
- #ifdef CONFIG_IGB_PTP - /* retrieve hardware timestamp */ - igb_tx_hwtstamp(q_vector, tx_buffer); - - #endif /* free the skb */ dev_kfree_skb_any(tx_buffer->skb);
/* unmap skb header data */ dma_unmap_single(tx_ring->dev, - tx_buffer->dma, - tx_buffer->length, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE);
+ /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* clear last DMA location and unmap remaining buffers */ while (tx_desc != eop_desc) { - tx_buffer->dma = 0; - tx_buffer++; tx_desc++; i++; @@@ -5847,17 -5740,15 +5740,15 @@@ }
/* unmap any remaining paged data */ - if (tx_buffer->dma) { + if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(tx_ring->dev, - tx_buffer->dma, - tx_buffer->length, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); } }
- /* clear last DMA location */ - tx_buffer->dma = 0; - /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; tx_desc++; @@@ -5867,7 -5758,13 +5758,13 @@@ tx_buffer = tx_ring->tx_buffer_info; tx_desc = IGB_TX_DESC(tx_ring, 0); } - } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget));
netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); @@@ -5883,12 -5780,10 +5780,10 @@@ if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { struct e1000_hw *hw = &adapter->hw;
/* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); - if (eop_desc && + if (tx_buffer->next_to_watch && time_after(jiffies, tx_buffer->time_stamp + (adapter->tx_timeout_factor * HZ)) && !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { @@@ -5912,9 -5807,9 +5807,9 @@@ tx_ring->next_to_use, tx_ring->next_to_clean, tx_buffer->time_stamp, - eop_desc, + tx_buffer->next_to_watch, jiffies, - eop_desc->wb.status); + tx_buffer->next_to_watch->wb.status); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
@@@ -5994,47 -5889,6 +5889,6 @@@ static inline void igb_rx_hash(struct i skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); }
- #ifdef CONFIG_IGB_PTP - static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) - { - struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; - u64 regval; - - if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP | - E1000_RXDADV_STAT_TS)) - return; - - /* - * If this bit is set, then the RX registers contain the time stamp. No - * other packet will be time stamped until we read these registers, so - * read the registers to make them available again. Because only one - * packet can be time stamped at a time, we know that the register - * values must belong to this one here and therefore we don't need to - * compare any of the additional attributes stored for it. - * - * If nothing went wrong, then it should have a shared tx_flags that we - * can turn into a skb_shared_hwtstamps. - */ - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - u32 *stamp = (u32 *)skb->data; - regval = le32_to_cpu(*(stamp + 2)); - regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32; - skb_pull(skb, IGB_TS_HDR_LEN); - } else { - if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) - return; - - regval = rd32(E1000_RXSTMPL); - regval |= (u64)rd32(E1000_RXSTMPH) << 32; - } - - igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); - } - - #endif static void igb_rx_vlan(struct igb_ring *ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) @@@ -6146,8 -6000,8 +6000,8 @@@ static bool igb_clean_rx_irq(struct igb }
#ifdef CONFIG_IGB_PTP - igb_rx_hwtstamp(q_vector, rx_desc, skb); - #endif + igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb); + #endif /* CONFIG_IGB_PTP */ igb_rx_hash(rx_ring, rx_desc, skb); igb_rx_checksum(rx_ring, rx_desc, skb); igb_rx_vlan(rx_ring, rx_desc, skb); @@@ -6341,181 -6195,6 +6195,6 @@@ static int igb_mii_ioctl(struct net_dev }
/** - * igb_hwtstamp_ioctl - control hardware time stamping - * @netdev: - * @ifreq: - * @cmd: - * - * Outgoing time stamping can be enabled and disabled. Play nice and - * disable it when requested, although it shouldn't case any overhead - * when no packet needs it. At most one packet in the queue may be - * marked for time stamping, otherwise it would be impossible to tell - * for sure to which packet the hardware time stamp belongs. - * - * Incoming time stamping has to be configured via the hardware - * filters. Not all combinations are supported, in particular event - * type has to be specified. Matching the kind of event packet is - * not supported, with the exception of "all V2 events regardless of - * level 2 or 4". - * - **/ - static int igb_hwtstamp_ioctl(struct net_device *netdev, - struct ifreq *ifr, int cmd) - { - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct hwtstamp_config config; - u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; - u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; - u32 tsync_rx_cfg = 0; - bool is_l4 = false; - bool is_l2 = false; - u32 regval; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - tsync_tx_ctl = 0; - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - tsync_rx_ctl = 0; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_ALL: - /* - * register TSYNCRXCFG must be set, therefore it is not - * possible to time stamp both Sync and Delay_Req messages - * => fall back to time stamping all packets - */ - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; - is_l4 = true; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; - is_l4 = true; - break; - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; - is_l2 = true; - is_l4 = true; - config.rx_filter = HWTSTAMP_FILTER_SOME; - break; - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; - is_l2 = true; - is_l4 = true; - config.rx_filter = HWTSTAMP_FILTER_SOME; - break; - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - is_l2 = true; - is_l4 = true; - break; - default: - return -ERANGE; - } - - if (hw->mac.type == e1000_82575) { - if (tsync_rx_ctl | tsync_tx_ctl) - return -EINVAL; - return 0; - } - - /* - * Per-packet timestamping only works if all packets are - * timestamped, so enable timestamping in all packets as - * long as one rx filter was configured. - */ - if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { - tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; - } - - /* enable/disable TX */ - regval = rd32(E1000_TSYNCTXCTL); - regval &= ~E1000_TSYNCTXCTL_ENABLED; - regval |= tsync_tx_ctl; - wr32(E1000_TSYNCTXCTL, regval); - - /* enable/disable RX */ - regval = rd32(E1000_TSYNCRXCTL); - regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); - regval |= tsync_rx_ctl; - wr32(E1000_TSYNCRXCTL, regval); - - /* define which PTP packets are time stamped */ - wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); - - /* define ethertype filter for timestamped packets */ - if (is_l2) - wr32(E1000_ETQF(3), - (E1000_ETQF_FILTER_ENABLE | /* enable filter */ - E1000_ETQF_1588 | /* enable timestamping */ - ETH_P_1588)); /* 1588 eth protocol type */ - else - wr32(E1000_ETQF(3), 0); - - #define PTP_PORT 319 - /* L4 Queue Filter[3]: filter by destination port and protocol */ - if (is_l4) { - u32 ftqf = (IPPROTO_UDP /* UDP */ - | E1000_FTQF_VF_BP /* VF not compared */ - | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ - | E1000_FTQF_MASK); /* mask all inputs */ - ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ - - wr32(E1000_IMIR(3), htons(PTP_PORT)); - wr32(E1000_IMIREXT(3), - (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); - if (hw->mac.type == e1000_82576) { - /* enable source port check */ - wr32(E1000_SPQF(3), htons(PTP_PORT)); - ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; - } - wr32(E1000_FTQF(3), ftqf); - } else { - wr32(E1000_FTQF(3), E1000_FTQF_MASK); - } - wrfl(); - - adapter->hwtstamp_config = config; - - /* clear TX/RX time stamp registers, just to be sure */ - regval = rd32(E1000_TXSTMPH); - regval = rd32(E1000_RXSTMPH); - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; - } - - /** * igb_ioctl - * @netdev: * @ifreq: @@@ -6528,8 -6207,10 +6207,10 @@@ static int igb_ioctl(struct net_device case SIOCGMIIREG: case SIOCSMIIREG: return igb_mii_ioctl(netdev, ifr, cmd); + #ifdef CONFIG_IGB_PTP case SIOCSHWTSTAMP: - return igb_hwtstamp_ioctl(netdev, ifr, cmd); + return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd); + #endif /* CONFIG_IGB_PTP */ default: return -EOPNOTSUPP; } @@@ -6538,20 -6219,28 +6219,20 @@@ s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; - u16 cap_offset;
- cap_offset = adapter->pdev->pcie_cap; - if (!cap_offset) + if (pcie_capability_read_word(adapter->pdev, reg, value)) return -E1000_ERR_CONFIG;
- pci_read_config_word(adapter->pdev, cap_offset + reg, value); - return 0; }
s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; - u16 cap_offset;
- cap_offset = adapter->pdev->pcie_cap; - if (!cap_offset) + if (pcie_capability_write_word(adapter->pdev, reg, *value)) return -E1000_ERR_CONFIG;
- pci_write_config_word(adapter->pdev, cap_offset + reg, *value); - return 0; }
@@@ -6667,6 -6356,10 +6348,10 @@@ int igb_set_spd_dplx(struct igb_adapte default: goto err_inval; } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + return 0;
err_inval: diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ee61819,29465be..3d6150a --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -1167,7 -1167,7 +1167,7 @@@ static bool ixgbe_alloc_mapped_page(str }
bi->dma = dma; - bi->page_offset ^= ixgbe_rx_bufsz(rx_ring); + bi->page_offset = 0;
return true; } @@@ -1320,29 -1320,6 +1320,6 @@@ static unsigned int ixgbe_get_headlen(u return max_len; }
- static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) - { - __le32 rsc_enabled; - u32 rsc_cnt; - - if (!ring_is_rsc_enabled(rx_ring)) - return; - - rsc_enabled = rx_desc->wb.lower.lo_dword.data & - cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); - - /* If this is an RSC frame rsc_cnt should be non-zero */ - if (!rsc_enabled) - return; - - rsc_cnt = le32_to_cpu(rsc_enabled); - rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; - - IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; - } - static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, struct sk_buff *skb) { @@@ -1440,16 -1417,28 +1417,28 @@@ static bool ixgbe_is_non_eop(struct ixg
prefetch(IXGBE_RX_DESC(rx_ring, ntc));
- if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) - return false; + /* update RSC append count if present */ + if (ring_is_rsc_enabled(rx_ring)) { + __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); + + if (unlikely(rsc_enabled)) { + u32 rsc_cnt = le32_to_cpu(rsc_enabled); + + rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; + IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
- /* append_cnt indicates packet is RSC, if so fetch nextp */ - if (IXGBE_CB(skb)->append_cnt) { - ntc = le32_to_cpu(rx_desc->wb.upper.status_error); - ntc &= IXGBE_RXDADV_NEXTP_MASK; - ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; + /* update ntc based on RSC value */ + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= IXGBE_RXDADV_NEXTP_MASK; + ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; + } }
+ /* if we are the last buffer then there is nothing else to do */ + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + return false; + /* place skb in next buffer to be received */ rx_ring->rx_buffer_info[ntc].skb = skb; rx_ring->rx_stats.non_eop_descs++; @@@ -1458,6 -1447,78 +1447,78 @@@ }
/** + * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an ixgbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ + static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) + { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; + } + + /** + * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ + static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) + { + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(IXGBE_CB(skb)->page_released)) { + dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + IXGBE_CB(skb)->page_released = false; + } else { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + frag->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + } + IXGBE_CB(skb)->dma = 0; + } + + /** * ixgbe_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@@ -1479,24 -1540,7 +1540,7 @@@ static bool ixgbe_cleanup_headers(struc union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; struct net_device *netdev = rx_ring->netdev; - unsigned char *va; - unsigned int pull_len; - - /* if the page was released unmap it, else just sync our portion */ - if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); - IXGBE_CB(skb)->page_released = false; - } else { - dma_sync_single_range_for_cpu(rx_ring->dev, - IXGBE_CB(skb)->dma, - frag->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - } - IXGBE_CB(skb)->dma = 0;
/* verify that the packet does not have any known errors */ if (unlikely(ixgbe_test_staterr(rx_desc, @@@ -1506,40 -1550,9 +1550,9 @@@ return true; }
- /* - * it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* - * we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = skb_frag_size(frag); - if (pull_len > IXGBE_RX_HDR_SIZE) - pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - - /* - * if we sucked the frag empty then we should free it, - * if there are other frags here something is screwed up in hardware - */ - if (skb_frag_size(frag) == 0) { - BUG_ON(skb_shinfo(skb)->nr_frags != 1); - skb_shinfo(skb)->nr_frags = 0; - __skb_frag_unref(frag); - skb->truesize -= ixgbe_rx_bufsz(rx_ring); - } + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + ixgbe_pull_tail(rx_ring, skb);
#ifdef IXGBE_FCOE /* do not attempt to pad FCoE Frames as this will disrupt DDP */ @@@ -1560,33 -1573,17 +1573,17 @@@ }
/** - * ixgbe_can_reuse_page - determine if we can reuse a page - * @rx_buffer: pointer to rx_buffer containing the page we want to reuse - * - * Returns true if page can be reused in another Rx buffer - **/ - static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer) - { - struct page *page = rx_buffer->page; - - /* if we are only owner of page and it is local we can reuse it */ - return likely(page_count(page) == 1) && - likely(page_to_nid(page) == numa_node_id()); - } - - /** * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * - * Syncronizes page for reuse by the adapter + * Synchronizes page for reuse by the adapter **/ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *old_buff) { struct ixgbe_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; - u16 bufsz = ixgbe_rx_bufsz(rx_ring);
new_buff = &rx_ring->rx_buffer_info[nta];
@@@ -1597,17 -1594,13 +1594,13 @@@ /* transfer page from old buffer to new buffer */ new_buff->page = old_buff->page; new_buff->dma = old_buff->dma; - - /* flip page offset to other buffer and store to new_buff */ - new_buff->page_offset = old_buff->page_offset ^ bufsz; + new_buff->page_offset = old_buff->page_offset;
/* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, - new_buff->page_offset, bufsz, + new_buff->page_offset, + ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); - - /* bump ref count on page before it is given to the stack */ - get_page(new_buff->page); }
/** @@@ -1617,20 -1610,159 +1610,159 @@@ * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into * - * This function is based on skb_add_rx_frag. I would have used that - * function however it doesn't handle the truesize case correctly since we - * are allocating more memory than might be used for a single receive. + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. **/ - static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, + static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - struct sk_buff *skb, int size) + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_buffer->page, rx_buffer->page_offset, - size); - skb->len += size; - skb->data_len += size; - skb->truesize += ixgbe_rx_bufsz(rx_ring); + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); + #if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_bufsz(rx_ring); + #else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - + ixgbe_rx_bufsz(rx_ring); + #endif + + if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* we can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(page) == numa_node_id())) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_node_id())) + return false; + + #if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; + + /* + * since we are the only owner of the page and we need to + * increment it, just set the value to 2 in order to avoid + * an unecessary locked operation + */ + atomic_set(&page->_count, 2); + #else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; + + /* bump ref count on page before it is given to the stack */ + get_page(page); + #endif + + return true; + } + + static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc) + { + struct ixgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); + #if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); + #endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + IXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header + * after the writeback. Only unmap it when EOP is + * reached + */ + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + goto dma_sync; + + IXGBE_CB(skb)->dma = rx_buffer->dma; + } else { + if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + ixgbe_dma_sync_frag(rx_ring, skb); + + dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + } + + /* pull page into skb */ + if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ixgbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + IXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + rx_buffer->page = NULL; + + return skb; }
/** @@@ -1653,16 -1785,14 +1785,14 @@@ static bool ixgbe_clean_rx_irq(struct i unsigned int total_rx_bytes = 0, total_rx_packets = 0; #ifdef IXGBE_FCOE struct ixgbe_adapter *adapter = q_vector->adapter; - int ddp_bytes = 0; + int ddp_bytes; + unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring);
do { - struct ixgbe_rx_buffer *rx_buffer; union ixgbe_adv_rx_desc *rx_desc; struct sk_buff *skb; - struct page *page; - u16 ntc;
/* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { @@@ -1670,9 -1800,7 +1800,7 @@@ cleaned_count = 0; }
- ntc = rx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC(rx_ring, ntc); - rx_buffer = &rx_ring->rx_buffer_info[ntc]; + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) break; @@@ -1684,75 -1812,12 +1812,12 @@@ */ rmb();
- page = rx_buffer->page; - prefetchw(page); - - skb = rx_buffer->skb; - - if (likely(!skb)) { - void *page_addr = page_address(page) + - rx_buffer->page_offset; - - /* prefetch first cache line of first page */ - prefetch(page_addr); - #if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); - #endif + /* retrieve a buffer from the ring */ + skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
- /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - IXGBE_RX_HDR_SIZE); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - break; - } - - /* - * we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); - - /* - * Delay unmapping of the first packet. It carries the - * header information, HW may still access the header - * after the writeback. Only unmap it when EOP is - * reached - */ - IXGBE_CB(skb)->dma = rx_buffer->dma; - } else { - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - } - - /* pull page into skb */ - ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, - le16_to_cpu(rx_desc->wb.upper.length)); - - if (ixgbe_can_reuse_page(rx_buffer)) { - /* hand second half of page back to the ring */ - ixgbe_reuse_rx_page(rx_ring, rx_buffer); - } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { - /* the page has been released from the ring */ - IXGBE_CB(skb)->page_released = true; - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); - } - - /* clear contents of buffer_info */ - rx_buffer->skb = NULL; - rx_buffer->dma = 0; - rx_buffer->page = NULL; - - ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb); + /* exit if we failed to retrieve a buffer */ + if (!skb) + break;
cleaned_count++;
@@@ -1775,6 -1840,20 +1840,20 @@@ /* if ddp, not passing to ULD unless for FCP_RSP or error */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + if (!mss) { + mss = rx_ring->netdev->mtu - + sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + } + total_rx_bytes += ddp_bytes; + total_rx_packets += DIV_ROUND_UP(ddp_bytes, + mss); + } if (!ddp_bytes) { dev_kfree_skb_any(skb); continue; @@@ -1788,21 -1867,6 +1867,6 @@@ budget--; } while (likely(budget));
- #ifdef IXGBE_FCOE - /* include DDPed FCoE data */ - if (ddp_bytes > 0) { - unsigned int mss; - - mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - - sizeof(struct fc_frame_header) - - sizeof(struct fcoe_crc_eof); - if (mss > 512) - mss &= ~511; - total_rx_bytes += ddp_bytes; - total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); - } - - #endif /* IXGBE_FCOE */ u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@@ -2868,11 -2932,7 +2932,7 @@@ static void ixgbe_configure_srrctl(stru srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */ - #if PAGE_SIZE > IXGBE_MAX_RXBUFFER - srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - #else srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - #endif
/* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@@ -2980,13 -3040,7 +3040,7 @@@ static void ixgbe_configure_rscctl(stru * total size of max desc * buf_len is not greater * than 65536 */ - #if (PAGE_SIZE <= 8192) rscctrl |= IXGBE_RSCCTL_MAXDESC_16; - #elif (PAGE_SIZE <= 16384) - rscctrl |= IXGBE_RSCCTL_MAXDESC_8; - #else - rscctrl |= IXGBE_RSCCTL_MAXDESC_4; - #endif IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); }
@@@ -3606,8 -3660,6 +3660,6 @@@ static void ixgbe_configure_dcb(struct if (hw->mac.type == ixgbe_mac_82598EB) netif_set_gso_max_size(adapter->netdev, 32768);
- hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); - #ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); @@@ -3807,6 -3859,11 +3859,11 @@@ static void ixgbe_configure(struct ixgb #ifdef CONFIG_IXGBE_DCB ixgbe_configure_dcb(adapter); #endif + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + ixgbe_configure_virtualization(adapter);
ixgbe_set_rx_mode(adapter->netdev); ixgbe_restore_vlan(adapter); @@@ -3838,8 -3895,6 +3895,6 @@@ break; }
- ixgbe_configure_virtualization(adapter); - #ifdef IXGBE_FCOE /* configure FCoE L2 filters, redirection table, and Rx control */ ixgbe_configure_fcoe(adapter); @@@ -4130,27 -4185,6 +4185,6 @@@ void ixgbe_reset(struct ixgbe_adapter * }
/** - * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers - * @rx_ring: ring to setup - * - * On many IA platforms the L1 cache has a critical stride of 4K, this - * results in each receive buffer starting in the same cache set. To help - * reduce the pressure on this cache set we can interleave the offsets so - * that only every other buffer will be in the same cache set. - **/ - static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring) - { - struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info; - u16 i; - - for (i = 0; i < rx_ring->count; i += 2) { - rx_buffer[0].page_offset = 0; - rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring); - rx_buffer = &rx_buffer[2]; - } - } - - /** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ @@@ -4195,8 -4229,6 +4229,6 @@@ static void ixgbe_clean_rx_ring(struct size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; memset(rx_ring->rx_buffer_info, 0, size);
- ixgbe_init_rx_page_offset(rx_ring); - /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size);
@@@ -4646,8 -4678,6 +4678,6 @@@ int ixgbe_setup_rx_resources(struct ixg rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0;
- ixgbe_init_rx_page_offset(rx_ring); - return 0; err: vfree(rx_ring->rx_buffer_info); @@@ -5530,8 -5560,9 +5560,9 @@@ static void ixgbe_spoof_check(struct ix { u32 ssvpc;
- /* Do not perform spoof check for 82598 */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) + /* Do not perform spoof check for 82598 or if not in IOV mode */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB || + adapter->num_vfs == 0) return;
ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); @@@ -5543,7 -5574,7 +5574,7 @@@ if (!ssvpc) return;
- e_warn(drv, "%d Spoofed packets detected\n", ssvpc); + e_warn(drv, "%u Spoofed packets detected\n", ssvpc); }
/** @@@ -5874,9 -5905,12 +5905,12 @@@ static void ixgbe_tx_csum(struct ixgbe_ u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && - !(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) - return; + if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) { + if (unlikely(skb->no_fcs)) + first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS; + if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) + return; + } } else { u8 l4_hdr = 0; switch (first->protocol) { @@@ -5938,7 -5972,6 +5972,6 @@@ static __le32 ixgbe_tx_cmd_type(u32 tx_ { /* set type for advanced descriptor with frame checksum insertion */ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
/* set HW vlan bit if vlan is present */ @@@ -5958,6 -5991,10 +5991,10 @@@ #endif cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
+ /* insert frame checksum */ + if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS)) + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS); + return cmd_type; }
@@@ -6063,8 -6100,6 +6100,6 @@@ static void ixgbe_tx_map(struct ixgbe_r if (likely(!data_len)) break;
- if (unlikely(skb->no_fcs)) - cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS)); tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
i++; @@@ -6856,7 -6891,7 +6891,7 @@@ static int ixgbe_set_features(struct ne
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct net_device *dev, - unsigned char *addr, + const unsigned char *addr, u16 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); @@@ -6893,7 -6928,7 +6928,7 @@@
static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev, - unsigned char *addr) + const unsigned char *addr) { struct ixgbe_adapter *adapter = netdev_priv(dev); int err = -EOPNOTSUPP; @@@ -7136,11 -7171,6 +7171,6 @@@ static int __devinit ixgbe_probe(struc goto err_ioremap; }
- for (i = 1; i <= 5; i++) { - if (pci_resource_len(pdev, i) == 0) - continue; - } - netdev->netdev_ops = &ixgbe_netdev_ops; ixgbe_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; @@@ -7419,6 -7449,10 +7449,10 @@@ e_err(probe, "failed to allocate sysfs resources\n"); #endif /* CONFIG_IXGBE_HWMON */
+ #ifdef CONFIG_DEBUG_FS + ixgbe_dbg_adapter_init(adapter); + #endif /* CONFIG_DEBUG_FS */ + return 0;
err_register: @@@ -7453,6 -7487,10 +7487,10 @@@ static void __devexit ixgbe_remove(stru struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev;
+ #ifdef CONFIG_DEBUG_FS + ixgbe_dbg_adapter_exit(adapter); + #endif /*CONFIG_DEBUG_FS */ + set_bit(__IXGBE_DOWN, &adapter->state); cancel_work_sync(&adapter->service_task);
@@@ -7527,7 -7565,7 +7565,7 @@@ static pci_ers_result_t ixgbe_io_error_ goto skip_bad_vf_detection;
bdev = pdev->bus->self; - while (bdev && (bdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) bdev = bdev->bus->self;
if (!bdev) @@@ -7677,7 -7715,7 +7715,7 @@@ static void ixgbe_io_resume(struct pci_ netif_device_attach(netdev); }
-static struct pci_error_handlers ixgbe_err_handler = { +static const struct pci_error_handlers ixgbe_err_handler = { .error_detected = ixgbe_io_error_detected, .slot_reset = ixgbe_io_slot_reset, .resume = ixgbe_io_resume, @@@ -7708,6 -7746,10 +7746,10 @@@ static int __init ixgbe_init_module(voi pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); pr_info("%s\n", ixgbe_copyright);
+ #ifdef CONFIG_DEBUG_FS + ixgbe_dbg_init(); + #endif /* CONFIG_DEBUG_FS */ + #ifdef CONFIG_IXGBE_DCA dca_register_notify(&dca_notifier); #endif @@@ -7730,6 -7772,11 +7772,11 @@@ static void __exit ixgbe_exit_module(vo dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&ixgbe_driver); + + #ifdef CONFIG_DEBUG_FS + ixgbe_dbg_exit(); + #endif /* CONFIG_DEBUG_FS */ + rcu_barrier(); /* Wait for completion of call_rcu()'s */ }
diff --combined drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 6647383,cf372ee..0ee9bd4 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@@ -263,6 -263,8 +263,8 @@@ cont_loop tx_ring->total_bytes += total_bytes; tx_ring->total_packets += total_packets; u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets;
return count < tx_ring->count; } @@@ -272,12 -274,10 +274,10 @@@ * @q_vector: structure containing interrupt and ring information * @skb: packet to send up * @status: hardware indication of status of receive - * @rx_ring: rx descriptor ring (for a specific queue) to setup * @rx_desc: rx descriptor **/ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb, u8 status, - struct ixgbevf_ring *ring, union ixgbe_adv_rx_desc *rx_desc) { struct ixgbevf_adapter *adapter = q_vector->adapter; @@@ -433,11 -433,21 +433,21 @@@ static bool ixgbevf_clean_rx_irq(struc
if (!(staterr & IXGBE_RXD_STAT_EOP)) { skb->next = next_buffer->skb; - skb->next->prev = skb; + IXGBE_CB(skb->next)->prev = skb; adapter->non_eop_descs++; goto next_desc; }
+ /* we should not be chaining buffers, if we did drop the skb */ + if (IXGBE_CB(skb)->prev) { + do { + struct sk_buff *this = skb; + skb = IXGBE_CB(skb)->prev; + dev_kfree_skb(this); + } while (skb); + goto next_desc; + } + /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { dev_kfree_skb_irq(skb); @@@ -461,7 -471,7 +471,7 @@@ } skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); + ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
next_desc: rx_desc->wb.upper.status_error = 0; @@@ -490,6 -500,8 +500,8 @@@ rx_ring->total_packets += total_rx_packets; rx_ring->total_bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes;
return !!budget; } @@@ -716,40 -728,15 +728,15 @@@ static void ixgbevf_set_itr(struct ixgb } }
- static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) + static irqreturn_t ixgbevf_msix_other(int irq, void *data) { struct ixgbevf_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; - u32 msg; - bool got_ack = false; - - if (!hw->mbx.ops.check_for_ack(hw)) - got_ack = true;
- if (!hw->mbx.ops.check_for_msg(hw)) { - hw->mbx.ops.read(hw, &msg, 1); + hw->mac.get_link_status = 1;
- if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 1)); - - if (msg & IXGBE_VT_MSGTYPE_NACK) - pr_warn("Last Request of type %2.2x to PF Nacked\n", - msg & 0xFF); - /* - * Restore the PFSTS bit in case someone is polling for a - * return message from the PF - */ - hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; - } - - /* - * checking for the ack clears the PFACK bit. Place - * it back in the v2p_mailbox cache so that anyone - * polling for an ack will not miss it - */ - if (got_ack) - hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@@ -899,10 -886,10 +886,10 @@@ static int ixgbevf_request_msix_irqs(st }
err = request_irq(adapter->msix_entries[vector].vector, - &ixgbevf_msix_mbx, 0, netdev->name, adapter); + &ixgbevf_msix_other, 0, netdev->name, adapter); if (err) { hw_dbg(&adapter->hw, - "request_irq for msix_mbx failed: %d\n", err); + "request_irq for msix_other failed: %d\n", err); goto free_queue_irqs; }
@@@ -1057,15 -1044,46 +1044,46 @@@ static void ixgbevf_configure_srrctl(st
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) - srrctl |= IXGBEVF_RXBUFFER_2048 >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - else - srrctl |= rx_ring->rx_buf_len >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); }
+ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + int i; + u16 rx_buf_len; + + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, max_frame); + + /* PF will allow an extra 4 bytes past for vlan tagged frames */ + max_frame += VLAN_HLEN; + + /* + * Make best use of allocation by using all but 1K of a + * power of 2 allocation that will be used for skb->head. + */ + if ((hw->mac.type == ixgbe_mac_X540_vf) && + (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + else if (max_frame <= IXGBEVF_RXBUFFER_3K) + rx_buf_len = IXGBEVF_RXBUFFER_3K; + else if (max_frame <= IXGBEVF_RXBUFFER_7K) + rx_buf_len = IXGBEVF_RXBUFFER_7K; + else if (max_frame <= IXGBEVF_RXBUFFER_15K) + rx_buf_len = IXGBEVF_RXBUFFER_15K; + else + rx_buf_len = IXGBEVF_MAX_RXBUFFER; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i].rx_buf_len = rx_buf_len; + } + /** * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset * @adapter: board private structure @@@ -1076,18 -1094,14 +1094,14 @@@ static void ixgbevf_configure_rx(struc { u64 rdba; struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, j; u32 rdlen;
/* PSRTYPE must be initialized in 82599 */ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); - if (netdev->mtu <= ETH_DATA_LEN) - rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; - else - rx_buf_len = ALIGN(max_frame, 1024); + + /* set_rx_buffer_len must be called before ring initialization */ + ixgbevf_set_rx_buffer_len(adapter);
rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); /* Setup the HW Rx Head and Tail Descriptor Pointers and @@@ -1103,7 -1117,6 +1117,6 @@@ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); adapter->rx_ring[i].head = IXGBE_VFRDH(j); adapter->rx_ring[i].tail = IXGBE_VFRDT(j); - adapter->rx_ring[i].rx_buf_len = rx_buf_len;
ixgbevf_configure_srrctl(adapter, j); } @@@ -1113,36 -1126,47 +1126,47 @@@ static int ixgbevf_vlan_rx_add_vid(stru { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + int err; + + if (!hw->mac.ops.set_vfta) + return -EOPNOTSUPP;
spin_lock(&adapter->mbx_lock);
/* add VID to filter table */ - if (hw->mac.ops.set_vfta) - hw->mac.ops.set_vfta(hw, vid, 0, true); + err = hw->mac.ops.set_vfta(hw, vid, 0, true);
spin_unlock(&adapter->mbx_lock);
+ /* translate error return types so error makes sense */ + if (err == IXGBE_ERR_MBX) + return -EIO; + + if (err == IXGBE_ERR_INVALID_ARGUMENT) + return -EACCES; + set_bit(vid, adapter->active_vlans);
- return 0; + return err; }
static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + int err = -EOPNOTSUPP;
spin_lock(&adapter->mbx_lock);
/* remove VID from filter table */ if (hw->mac.ops.set_vfta) - hw->mac.ops.set_vfta(hw, vid, 0, false); + err = hw->mac.ops.set_vfta(hw, vid, 0, false);
spin_unlock(&adapter->mbx_lock);
clear_bit(vid, adapter->active_vlans);
- return 0; + return err; }
static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) @@@ -1308,6 -1332,25 +1332,25 @@@ static void ixgbevf_init_last_counter_s adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; }
+ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + int api[] = { ixgbe_mbox_api_10, + ixgbe_mbox_api_unknown }; + int err = 0, idx = 0; + + spin_lock(&adapter->mbx_lock); + + while (api[idx] != ixgbe_mbox_api_unknown) { + err = ixgbevf_negotiate_api_version(hw, api[idx]); + if (!err) + break; + idx++; + } + + spin_unlock(&adapter->mbx_lock); + } + static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@@ -1315,7 -1358,6 +1358,6 @@@ int i, j = 0; int num_rx_rings = adapter->num_rx_queues; u32 txdctl, rxdctl; - u32 msg[2];
for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i].reg_idx; @@@ -1356,10 -1398,6 +1398,6 @@@ hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); }
- msg[0] = IXGBE_VF_SET_LPE; - msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - hw->mbx.ops.write_posted(hw, msg, 2); - spin_unlock(&adapter->mbx_lock);
clear_bit(__IXGBEVF_DOWN, &adapter->state); @@@ -1371,6 -1409,7 +1409,7 @@@ ixgbevf_save_reset_stats(adapter); ixgbevf_init_last_counter_stats(adapter);
+ hw->mac.get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies); }
@@@ -1378,6 -1417,8 +1417,8 @@@ void ixgbevf_up(struct ixgbevf_adapter { struct ixgbe_hw *hw = &adapter->hw;
+ ixgbevf_negotiate_api(adapter); + ixgbevf_configure(adapter);
ixgbevf_up_complete(adapter); @@@ -1419,7 -1460,7 +1460,7 @@@ static void ixgbevf_clean_rx_ring(struc rx_buffer_info->skb = NULL; do { struct sk_buff *this = skb; - skb = skb->prev; + skb = IXGBE_CB(skb)->prev; dev_kfree_skb(this); } while (skb); } @@@ -1547,8 -1588,6 +1588,6 @@@ void ixgbevf_down(struct ixgbevf_adapte
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; - WARN_ON(in_interrupt());
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) @@@ -1561,10 -1600,8 +1600,8 @@@ * watchdog task will continue to schedule reset tasks until * the PF is up and running. */ - if (!hw->mac.ops.reset_hw(hw)) { - ixgbevf_down(adapter); - ixgbevf_up(adapter); - } + ixgbevf_down(adapter); + ixgbevf_up(adapter);
clear_bit(__IXGBEVF_RESETTING, &adapter->state); } @@@ -1867,6 -1904,22 +1904,22 @@@ err_set_interrupt }
/** + * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ + static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) + { + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + ixgbevf_free_q_vectors(adapter); + ixgbevf_reset_interrupt_capability(adapter); + } + + /** * ixgbevf_sw_init - Initialize general software structures * (struct ixgbevf_adapter) * @adapter: board private structure to initialize @@@ -2351,6 -2404,8 +2404,8 @@@ static int ixgbevf_open(struct net_devi } }
+ ixgbevf_negotiate_api(adapter); + /* allocate transmit descriptors */ err = ixgbevf_setup_all_tx_resources(adapter); if (err) @@@ -2860,10 -2915,8 +2915,8 @@@ static int ixgbevf_set_mac(struct net_d static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; - u32 msg[2];
if (adapter->hw.mac.type == ixgbe_mac_X540_vf) max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; @@@ -2877,35 -2930,91 +2930,91 @@@ /* must set new MTU before calling down or up */ netdev->mtu = new_mtu;
- if (!netif_running(netdev)) { - msg[0] = IXGBE_VF_SET_LPE; - msg[1] = max_frame; - hw->mbx.ops.write_posted(hw, msg, 2); - } - if (netif_running(netdev)) ixgbevf_reinit_locked(adapter);
return 0; }
- static void ixgbevf_shutdown(struct pci_dev *pdev) + static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); + #ifdef CONFIG_PM + int retval = 0; + #endif
netif_device_detach(netdev);
if (netif_running(netdev)) { + rtnl_lock(); ixgbevf_down(adapter); ixgbevf_free_irq(adapter); ixgbevf_free_all_tx_resources(adapter); ixgbevf_free_all_rx_resources(adapter); + rtnl_unlock(); }
- pci_save_state(pdev); + ixgbevf_clear_interrupt_scheme(adapter);
+ #ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + + #endif pci_disable_device(pdev); + + return 0; + } + + #ifdef CONFIG_PM + static int ixgbevf_resume(struct pci_dev *pdev) + { + struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + rtnl_lock(); + err = ixgbevf_init_interrupt_scheme(adapter); + rtnl_unlock(); + if (err) { + dev_err(&pdev->dev, "Cannot initialize interrupts\n"); + return err; + } + + ixgbevf_reset(adapter); + + if (netif_running(netdev)) { + err = ixgbevf_open(netdev); + if (err) + return err; + } + + netif_device_attach(netdev); + + return err; + } + + #endif /* CONFIG_PM */ + static void ixgbevf_shutdown(struct pci_dev *pdev) + { + ixgbevf_suspend(pdev, PMSG_SUSPEND); }
static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, @@@ -2946,7 -3055,7 +3055,7 @@@ return stats; }
- static const struct net_device_ops ixgbe_netdev_ops = { + static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, .ndo_start_xmit = ixgbevf_xmit_frame, @@@ -2962,7 -3071,7 +3071,7 @@@
static void ixgbevf_assign_netdev_ops(struct net_device *dev) { - dev->netdev_ops = &ixgbe_netdev_ops; + dev->netdev_ops = &ixgbevf_netdev_ops; ixgbevf_set_ethtool_ops(dev); dev->watchdog_timeo = 5 * HZ; } @@@ -3131,6 -3240,7 +3240,7 @@@ static int __devinit ixgbevf_probe(stru return 0;
err_register: + ixgbevf_clear_interrupt_scheme(adapter); err_sw_init: ixgbevf_reset_interrupt_capability(adapter); iounmap(hw->hw_addr); @@@ -3168,6 -3278,7 +3278,7 @@@ static void __devexit ixgbevf_remove(st if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev);
+ ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_reset_interrupt_capability(adapter);
iounmap(adapter->hw.hw_addr); @@@ -3256,7 -3367,7 +3367,7 @@@ static void ixgbevf_io_resume(struct pc }
/* PCI Error Recovery (ERS) */ -static struct pci_error_handlers ixgbevf_err_handler = { +static const struct pci_error_handlers ixgbevf_err_handler = { .error_detected = ixgbevf_io_error_detected, .slot_reset = ixgbevf_io_slot_reset, .resume = ixgbevf_io_resume, @@@ -3267,6 -3378,11 +3378,11 @@@ static struct pci_driver ixgbevf_drive .id_table = ixgbevf_pci_tbl, .probe = ixgbevf_probe, .remove = __devexit_p(ixgbevf_remove), + #ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = ixgbevf_suspend, + .resume = ixgbevf_resume, + #endif .shutdown = ixgbevf_shutdown, .err_handler = &ixgbevf_err_handler }; diff --combined drivers/net/ethernet/realtek/r8169.c index a7cc560,8b4e0a9..e7ff886 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@@ -77,7 -77,7 +77,7 @@@ static const int multicast_filter_limit = 32;
#define MAX_READ_REQUEST_SHIFT 12 - #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ + #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@@ -287,6 -287,8 +287,8 @@@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, @@@ -833,8 -835,15 +835,8 @@@ static void rtl_unlock_work(struct rtl8
static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) { - int cap = pci_pcie_cap(pdev); - - if (cap) { - u16 ctl; - - pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl); - ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force; - pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); - } + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, force); }
struct rtl_cond { @@@ -4732,14 -4741,28 +4734,14 @@@ static void rtl_ephy_init(struct rtl816
static void rtl_disable_clock_request(struct pci_dev *pdev) { - int cap = pci_pcie_cap(pdev); - - if (cap) { - u16 ctl; - - pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl); - ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN; - pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl); - } + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); }
static void rtl_enable_clock_request(struct pci_dev *pdev) { - int cap = pci_pcie_cap(pdev); - - if (cap) { - u16 ctl; - - pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl); - ctl |= PCI_EXP_LNKCTL_CLKREQ_EN; - pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl); - } + pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); }
#define R8168_CPCMD_QUIRK_MASK (\ @@@ -5384,9 -5407,14 +5386,9 @@@ static void rtl_hw_start_8101(struct ne tp->event_slow &= ~RxFIFOOver;
if (tp->mac_version == RTL_GIGA_MAC_VER_13 || - tp->mac_version == RTL_GIGA_MAC_VER_16) { - int cap = pci_pcie_cap(pdev); - - if (cap) { - pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_NOSNOOP_EN); - } - } + tp->mac_version == RTL_GIGA_MAC_VER_16) + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_NOSNOOP_EN);
RTL_W8(Cfg9346, Cfg9346_Unlock);
diff --combined drivers/net/usb/asix_devices.c index 32e31c5,8d5fdf1..1df77f2 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@@ -221,7 -221,8 +221,8 @@@ static int ax88172_bind(struct usbnet * /* Get the MAC address */ ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); if (ret < 0) { - dbg("read AX_CMD_READ_NODE_ID failed: %d", ret); + netdev_dbg(dev->net, "read AX_CMD_READ_NODE_ID failed: %d\n", + ret); goto out; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); @@@ -303,7 -304,7 +304,7 @@@ static int ax88772_reset(struct usbnet
ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL); if (ret < 0) { - dbg("Select PHY #1 failed: %d", ret); + netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); goto out; }
@@@ -331,13 -332,13 +332,13 @@@
msleep(150); rx_ctl = asix_read_rx_ctl(dev); - dbg("RX_CTL is 0x%04x after software reset", rx_ctl); + netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl); ret = asix_write_rx_ctl(dev, 0x0000); if (ret < 0) goto out;
rx_ctl = asix_read_rx_ctl(dev); - dbg("RX_CTL is 0x%04x setting to 0x0000", rx_ctl); + netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
ret = asix_sw_reset(dev, AX_SWRESET_PRL); if (ret < 0) @@@ -364,7 -365,7 +365,7 @@@ AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT, AX88772_IPG2_DEFAULT, 0, NULL); if (ret < 0) { - dbg("Write IPG,IPG1,IPG2 failed: %d", ret); + netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret); goto out; }
@@@ -381,10 -382,13 +382,13 @@@ goto out;
rx_ctl = asix_read_rx_ctl(dev); - dbg("RX_CTL is 0x%04x after all initializations", rx_ctl); + netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n", + rx_ctl);
rx_ctl = asix_read_medium_status(dev); - dbg("Medium Status is 0x%04x after all initializations", rx_ctl); + netdev_dbg(dev->net, + "Medium Status is 0x%04x after all initializations\n", + rx_ctl);
return 0;
@@@ -416,7 -420,7 +420,7 @@@ static int ax88772_bind(struct usbnet * /* Get the MAC address */ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); if (ret < 0) { - dbg("Failed to read MAC address: %d", ret); + netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); return ret; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); @@@ -439,7 -443,7 +443,7 @@@ /* Reset the PHY to normal operation mode */ ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL); if (ret < 0) { - dbg("Select PHY #1 failed: %d", ret); + netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); return ret; }
@@@ -459,7 -463,7 +463,7 @@@
/* Read PHYID register *AFTER* the PHY was reset properly */ phyid = asix_get_phyid(dev); - dbg("PHYID=0x%08x", phyid); + netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ if (dev->driver_info->flags & FLAG_FRAMING_AX) { @@@ -575,13 -579,13 +579,13 @@@ static int ax88178_reset(struct usbnet u32 phyid;
asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status); - dbg("GPIO Status: 0x%04x", status); + netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL); asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom); asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
- dbg("EEPROM index 0x17 is 0x%04x", eeprom); + netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
if (eeprom == cpu_to_le16(0xffff)) { data->phymode = PHY_MODE_MARVELL; @@@ -592,7 -596,7 +596,7 @@@ data->ledmode = le16_to_cpu(eeprom) >> 8; gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1; } - dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode); + netdev_dbg(dev->net, "GPIO0: %d, PhyMode: %d\n", gpio0, data->phymode);
/* Power up external GigaPHY through AX88178 GPIO pin */ asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40); @@@ -601,14 -605,14 +605,14 @@@ asix_write_gpio(dev, 0x001c, 300); asix_write_gpio(dev, 0x003c, 30); } else { - dbg("gpio phymode == 1 path"); + netdev_dbg(dev->net, "gpio phymode == 1 path\n"); asix_write_gpio(dev, AX_GPIO_GPO1EN, 30); asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30); }
/* Read PHYID register *AFTER* powering up PHY */ phyid = asix_get_phyid(dev); - dbg("PHYID=0x%08x", phyid); + netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
/* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */ asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL); @@@ -770,7 -774,7 +774,7 @@@ static int ax88178_bind(struct usbnet * /* Get the MAC address */ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); if (ret < 0) { - dbg("Failed to read MAC address: %d", ret); + netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); return ret; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); @@@ -962,10 -966,6 +966,10 @@@ static const struct usb_device_id produ USB_DEVICE (0x2001, 0x3c05), .driver_info = (unsigned long) &ax88772_info, }, { + // DLink DUB-E100 H/W Ver C1 + USB_DEVICE (0x2001, 0x1a02), + .driver_info = (unsigned long) &ax88772_info, +}, { // Linksys USB1000 USB_DEVICE (0x1737, 0x0039), .driver_info = (unsigned long) &ax88178_info, diff --combined drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index d066f25,b5659cb..884f9f0 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@@ -138,7 -138,8 +138,8 @@@ static const struct ar9300_eeprom ar930 }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0}, + .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@@ -713,7 -714,8 +714,8 @@@ static const struct ar9300_eeprom ar930 }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0}, + .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@@ -1289,7 -1291,8 +1291,8 @@@ static const struct ar9300_eeprom ar930 }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0}, + .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@@ -1865,7 -1868,8 +1868,8 @@@ static const struct ar9300_eeprom ar930 }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0}, + .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@@ -2440,7 -2444,8 +2444,8 @@@ static const struct ar9300_eeprom ar930 }, .base_ext1 = { .ant_div_control = 0, - .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + .future = {0, 0, 0}, + .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), @@@ -2982,10 -2987,6 +2987,10 @@@ static u32 ath9k_hw_ar9300_get_eeprom(s case EEP_RX_MASK: return pBase->txrxMask & 0xf; case EEP_PAPRD: + if (AR_SREV_9462(ah)) + return false; + if (!ah->config.enable_paprd); + return false; return !!(pBase->featureEnable & BIT(5)); case EEP_CHAIN_MASK_REDUCE: return (pBase->miscConfiguration >> 0x3) & 0x1; @@@ -3524,7 -3525,7 +3529,7 @@@ static void ar9003_hw_xpa_bias_level_ap
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias); - else if (AR_SREV_9462(ah) || AR_SREV_9550(ah)) + else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah)) REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); else { REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); @@@ -3572,7 -3573,7 +3577,7 @@@ static void ar9003_hw_ant_ctrl_apply(st
u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
- if (AR_SREV_9462(ah)) { + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_AR9462_ALL, value); } else if (AR_SREV_9550(ah)) { @@@ -3616,7 -3617,7 +3621,7 @@@ } }
- if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { + if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) { value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1); /* * main_lnaconf, alt_lnaconf, main_tb, alt_tb @@@ -3626,19 -3627,16 +3631,16 @@@ regval &= (~AR_ANT_DIV_CTRL_ALL); regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S; /* enable_lnadiv */ - regval &= (~AR_PHY_9485_ANT_DIV_LNADIV); - regval |= ((value >> 6) & 0x1) << - AR_PHY_9485_ANT_DIV_LNADIV_S; + regval &= (~AR_PHY_ANT_DIV_LNADIV); + regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S; REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
/*enable fast_div */ regval = REG_READ(ah, AR_PHY_CCK_DETECT); regval &= (~AR_FAST_DIV_ENABLE); - regval |= ((value >> 7) & 0x1) << - AR_FAST_DIV_ENABLE_S; + regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S; REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); - ant_div_ctl1 = - ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); + ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); /* check whether antenna diversity is enabled */ if ((ant_div_ctl1 >> 0x6) == 0x3) { regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); @@@ -3646,15 -3644,15 +3648,15 @@@ * clear bits 25-30 main_lnaconf, alt_lnaconf, * main_tb, alt_tb */ - regval &= (~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF | - AR_PHY_9485_ANT_DIV_ALT_LNACONF | - AR_PHY_9485_ANT_DIV_ALT_GAINTB | - AR_PHY_9485_ANT_DIV_MAIN_GAINTB)); + regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF | + AR_PHY_ANT_DIV_ALT_LNACONF | + AR_PHY_ANT_DIV_ALT_GAINTB | + AR_PHY_ANT_DIV_MAIN_GAINTB)); /* by default use LNA1 for the main antenna */ - regval |= (AR_PHY_9485_ANT_DIV_LNA1 << - AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S); - regval |= (AR_PHY_9485_ANT_DIV_LNA2 << - AR_PHY_9485_ANT_DIV_ALT_LNACONF_S); + regval |= (AR_PHY_ANT_DIV_LNA1 << + AR_PHY_ANT_DIV_MAIN_LNACONF_S); + regval |= (AR_PHY_ANT_DIV_LNA2 << + AR_PHY_ANT_DIV_ALT_LNACONF_S); REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); }
@@@ -3847,7 -3845,7 +3849,7 @@@ void ar9003_hw_internal_regulator_apply REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) return; - } else if (AR_SREV_9462(ah)) { + } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { reg_val = le32_to_cpu(pBase->swreg); REG_WRITE(ah, AR_PHY_PMU1, reg_val); } else { @@@ -3878,7 -3876,7 +3880,7 @@@ while (!REG_READ_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM)) udelay(10); - } else if (AR_SREV_9462(ah)) + } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1); else { reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) | @@@ -3981,6 -3979,62 +3983,62 @@@ static void ar9003_hw_xlna_bias_strengt bias & 0x3); }
+ static int ar9003_hw_get_thermometer(struct ath_hw *ah) + { + struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; + struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader; + int thermometer = (pBase->miscConfiguration >> 1) & 0x3; + + return --thermometer; + } + + static void ar9003_hw_thermometer_apply(struct ath_hw *ah) + { + int thermometer = ar9003_hw_get_thermometer(ah); + u8 therm_on = (thermometer < 0) ? 0 : 1; + + REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4, + AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on); + if (ah->caps.tx_chainmask & BIT(1)) + REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4, + AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on); + if (ah->caps.tx_chainmask & BIT(2)) + REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, + AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on); + + therm_on = (thermometer < 0) ? 0 : (thermometer == 0); + REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4, + AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); + if (ah->caps.tx_chainmask & BIT(1)) { + therm_on = (thermometer < 0) ? 0 : (thermometer == 1); + REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4, + AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); + } + if (ah->caps.tx_chainmask & BIT(2)) { + therm_on = (thermometer < 0) ? 0 : (thermometer == 2); + REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, + AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on); + } + } + + static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah) + { + u32 data, ko, kg; + + if (!AR_SREV_9462_20(ah)) + return; + ar9300_otp_read_word(ah, 1, &data); + ko = data & 0xff; + kg = (data >> 8) & 0xff; + if (ko || kg) { + REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3, + AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET, ko); + REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3, + AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN, + kg + 256); + } + } + static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { @@@ -3996,6 -4050,8 +4054,8 @@@ ar9003_hw_internal_regulator_apply(ah); ar9003_hw_apply_tuning_caps(ah); ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz); + ar9003_hw_thermometer_apply(ah); + ar9003_hw_thermo_cal_apply(ah); }
static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah, @@@ -4532,7 -4588,7 +4592,7 @@@ static int ar9003_hw_power_control_over { int tempSlope = 0; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; - int f[3], t[3]; + int f[8], t[8], i;
REG_RMW(ah, AR_PHY_TPC_11_B0, (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), @@@ -4565,7 -4621,14 +4625,14 @@@ */ if (frequency < 4000) tempSlope = eep->modalHeader2G.tempSlope; - else if (eep->base_ext2.tempSlopeLow != 0) { + else if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) { + for (i = 0; i < 8; i++) { + t[i] = eep->base_ext1.tempslopextension[i]; + f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0); + } + tempSlope = ar9003_hw_power_interpolate((s32) frequency, + f, t, 8); + } else if (eep->base_ext2.tempSlopeLow != 0) { t[0] = eep->base_ext2.tempSlopeLow; f[0] = 5180; t[1] = eep->modalHeader5G.tempSlope; @@@ -4905,90 -4968,79 +4972,79 @@@ static void ar9003_hw_set_power_per_rat i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i], chan->channel);
- /* - * compare test group from regulatory - * channel list with test mode from pCtlMode - * list - */ - if ((((cfgCtl & ~CTL_MODE_M) | - (pCtlMode[ctlMode] & CTL_MODE_M)) == - ctlIndex[i]) || - (((cfgCtl & ~CTL_MODE_M) | - (pCtlMode[ctlMode] & CTL_MODE_M)) == - ((ctlIndex[i] & CTL_MODE_M) | - SD_NO_CTL))) { - twiceMinEdgePower = - ar9003_hw_get_max_edge_power(pEepData, - freq, i, - is2ghz); - - if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) - /* - * Find the minimum of all CTL - * edge powers that apply to - * this channel - */ - twiceMaxEdgePower = - min(twiceMaxEdgePower, - twiceMinEdgePower); - else { - /* specific */ - twiceMaxEdgePower = - twiceMinEdgePower; - break; - } + /* + * compare test group from regulatory + * channel list with test mode from pCtlMode + * list + */ + if ((((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + ctlIndex[i]) || + (((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + ((ctlIndex[i] & CTL_MODE_M) | + SD_NO_CTL))) { + twiceMinEdgePower = + ar9003_hw_get_max_edge_power(pEepData, + freq, i, + is2ghz); + + if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) + /* + * Find the minimum of all CTL + * edge powers that apply to + * this channel + */ + twiceMaxEdgePower = + min(twiceMaxEdgePower, + twiceMinEdgePower); + else { + /* specific */ + twiceMaxEdgePower = twiceMinEdgePower; + break; } } + }
- minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); + minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
- ath_dbg(common, REGULATORY, - "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n", - ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, - scaledPower, minCtlPower); - - /* Apply ctl mode to correct target power set */ - switch (pCtlMode[ctlMode]) { - case CTL_11B: - for (i = ALL_TARGET_LEGACY_1L_5L; - i <= ALL_TARGET_LEGACY_11S; i++) - pPwrArray[i] = - (u8)min((u16)pPwrArray[i], - minCtlPower); - break; - case CTL_11A: - case CTL_11G: - for (i = ALL_TARGET_LEGACY_6_24; - i <= ALL_TARGET_LEGACY_54; i++) - pPwrArray[i] = - (u8)min((u16)pPwrArray[i], - minCtlPower); - break; - case CTL_5GHT20: - case CTL_2GHT20: - for (i = ALL_TARGET_HT20_0_8_16; - i <= ALL_TARGET_HT20_21; i++) - pPwrArray[i] = - (u8)min((u16)pPwrArray[i], - minCtlPower); - pPwrArray[ALL_TARGET_HT20_22] = - (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22], - minCtlPower); - pPwrArray[ALL_TARGET_HT20_23] = - (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23], - minCtlPower); - break; - case CTL_5GHT40: - case CTL_2GHT40: - for (i = ALL_TARGET_HT40_0_8_16; - i <= ALL_TARGET_HT40_23; i++) - pPwrArray[i] = - (u8)min((u16)pPwrArray[i], - minCtlPower); - break; - default: - break; - } + ath_dbg(common, REGULATORY, + "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n", + ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, + scaledPower, minCtlPower); + + /* Apply ctl mode to correct target power set */ + switch (pCtlMode[ctlMode]) { + case CTL_11B: + for (i = ALL_TARGET_LEGACY_1L_5L; + i <= ALL_TARGET_LEGACY_11S; i++) + pPwrArray[i] = (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + case CTL_11A: + case CTL_11G: + for (i = ALL_TARGET_LEGACY_6_24; + i <= ALL_TARGET_LEGACY_54; i++) + pPwrArray[i] = (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + case CTL_5GHT20: + case CTL_2GHT20: + for (i = ALL_TARGET_HT20_0_8_16; + i <= ALL_TARGET_HT20_23; i++) + pPwrArray[i] = (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + case CTL_5GHT40: + case CTL_2GHT40: + for (i = ALL_TARGET_HT40_0_8_16; + i <= ALL_TARGET_HT40_23; i++) + pPwrArray[i] = (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + default: + break; + } } /* end ctl mode checking */ }
diff --combined drivers/net/wireless/ath/ath9k/debug.c index c8ef301,ab3bc85..e1041a6 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@@ -373,6 -373,8 +373,8 @@@ void ath_debug_stat_interrupt(struct at sc->debug.stats.istats.tsfoor++; if (status & ATH9K_INT_MCI) sc->debug.stats.istats.mci++; + if (status & ATH9K_INT_GENTIMER) + sc->debug.stats.istats.gen_timer++; }
static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, @@@ -418,6 -420,7 +420,7 @@@ PR_IS("DTIM", dtim); PR_IS("TSFOOR", tsfoor); PR_IS("MCI", mci); + PR_IS("GENTIMER", gen_timer); PR_IS("TOTAL", total);
len += snprintf(buf + len, mxlen - len, @@@ -1577,8 -1580,6 +1580,8 @@@ int ath9k_init_debug(struct ath_hw *ah sc->debug.debugfs_phy, sc, &fops_tx_chainmask); debugfs_create_file("disable_ani", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_disable_ani); + debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, + &sc->sc_ah->config.enable_paprd); debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_regidx); debugfs_create_file("regval", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, diff --combined drivers/net/wireless/ath/ath9k/hw.c index 4faf0a3,99cab44..40f57aa --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@@ -355,7 -355,7 +355,7 @@@ static void ath9k_hw_read_revisions(str (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
- if (AR_SREV_9462(ah)) + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) ah->is_pciexpress = true; else ah->is_pciexpress = (val & @@@ -602,6 -602,11 +602,11 @@@ static int __ath9k_hw_init(struct ath_h if (AR_SREV_9462(ah)) ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
+ if (AR_SREV_9565(ah)) { + ah->WARegVal |= AR_WA_BIT22; + REG_WRITE(ah, AR_WA, ah->WARegVal); + } + ath9k_hw_init_defaults(ah); ath9k_hw_init_config(ah);
@@@ -647,6 -652,7 +652,7 @@@ case AR_SREV_VERSION_9340: case AR_SREV_VERSION_9462: case AR_SREV_VERSION_9550: + case AR_SREV_VERSION_9565: break; default: ath_err(common, @@@ -708,7 -714,7 +714,7 @@@ int ath9k_hw_init(struct ath_hw *ah int ret; struct ath_common *common = ath9k_hw_common(ah);
- /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */ + /* These are all the AR5008/AR9001/AR9002/AR9003 hardware family of chipsets */ switch (ah->hw_version.devid) { case AR5416_DEVID_PCI: case AR5416_DEVID_PCIE: @@@ -728,6 -734,7 +734,7 @@@ case AR9300_DEVID_AR9580: case AR9300_DEVID_AR9462: case AR9485_DEVID_AR1111: + case AR9300_DEVID_AR9565: break; default: if (common->bus_ops->ath_bus_type == ATH_USB) @@@ -800,8 -807,7 +807,7 @@@ static void ath9k_hw_init_pll(struct at { u32 pll;
- if (AR_SREV_9485(ah)) { - + if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) { /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_PLL_PWD, 0x1); @@@ -912,7 -918,8 +918,8 @@@ }
pll = ath9k_hw_compute_pll_control(ah, chan); - + if (AR_SREV_9565(ah)) + pll |= 0x40000; REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) || @@@ -2034,7 -2041,7 +2041,7 @@@ static void ath9k_set_power_sleep(struc { REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
- if (AR_SREV_9462(ah)) { + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff); REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff); REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff); @@@ -2401,7 -2408,10 +2408,10 @@@ int ath9k_hw_fill_cap_info(struct ath_h if (eeval & AR5416_OPFLAGS_11G) pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
- if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah)) + if (AR_SREV_9485(ah) || + AR_SREV_9285(ah) || + AR_SREV_9330(ah) || + AR_SREV_9565(ah)) chip_chainmask = 1; else if (AR_SREV_9462(ah)) chip_chainmask = 3; @@@ -2489,7 -2499,7 +2499,7 @@@
if (AR_SREV_9300_20_OR_LATER(ah)) { pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK; - if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah)) + if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) && !AR_SREV_9565(ah)) pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH; @@@ -2497,6 -2507,10 +2507,6 @@@ pCap->rx_status_len = sizeof(struct ar9003_rxs); pCap->tx_desc_len = sizeof(struct ar9003_txc); pCap->txs_len = sizeof(struct ar9003_txs); - if (!ah->config.paprd_disable && - ah->eep_ops->get_eeprom(ah, EEP_PAPRD) && - !AR_SREV_9462(ah)) - pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; } else { pCap->tx_desc_len = sizeof(struct ath_desc); if (AR_SREV_9280_20(ah)) @@@ -2568,14 -2582,12 +2578,12 @@@ ah->enabled_cals |= TX_IQ_ON_AGC_CAL; }
- if (AR_SREV_9462(ah)) { - + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE)) pCap->hw_caps |= ATH9K_HW_CAP_MCI;
if (AR_SREV_9462_20(ah)) pCap->hw_caps |= ATH9K_HW_CAP_RTT; - }
@@@ -2741,7 -2753,7 +2749,7 @@@ void ath9k_hw_setrxfilter(struct ath_h
ENABLE_REGWRITE_BUFFER(ah);
- if (AR_SREV_9462(ah)) + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER;
REG_WRITE(ah, AR_RX_FILTER, bits); @@@ -3038,7 -3050,7 +3046,7 @@@ void ath9k_hw_gen_timer_start(struct at REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, gen_tmr_configuration[timer->index].mode_mask);
- if (AR_SREV_9462(ah)) { + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { /* * Starting from AR9462, each generic timer can select which tsf * to use. But we still follow the old rule, 0 - 7 use tsf and @@@ -3072,6 -3084,16 +3080,16 @@@ void ath9k_hw_gen_timer_stop(struct ath REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, gen_tmr_configuration[timer->index].mode_mask);
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { + /* + * Need to switch back to TSF if it was using TSF2. + */ + if ((timer->index >= AR_GEN_TIMER_BANK_1_LEN)) { + REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL, + (1 << timer->index)); + } + } + /* Disable both trigger and thresh interrupt masks */ REG_CLR_BIT(ah, AR_IMR_S5, (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | @@@ -3153,6 -3175,7 +3171,7 @@@ static struct { AR_SREV_VERSION_9485, "9485" }, { AR_SREV_VERSION_9462, "9462" }, { AR_SREV_VERSION_9550, "9550" }, + { AR_SREV_VERSION_9565, "9565" }, };
/* For devices with external radios */ diff --combined drivers/net/wireless/ath/ath9k/hw.h index de6968f,0d17ce0..f0798cc --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@@ -50,6 -50,7 +50,7 @@@ #define AR9300_DEVID_AR9330 0x0035 #define AR9300_DEVID_QCA955X 0x0038 #define AR9485_DEVID_AR1111 0x0037 + #define AR9300_DEVID_AR9565 0x0036
#define AR5416_AR9100_DEVID 0x000b
@@@ -236,6 -237,7 +237,6 @@@ enum ath9k_hw_caps ATH9K_HW_CAP_LDPC = BIT(6), ATH9K_HW_CAP_FASTCLOCK = BIT(7), ATH9K_HW_CAP_SGI_20 = BIT(8), - ATH9K_HW_CAP_PAPRD = BIT(9), ATH9K_HW_CAP_ANT_DIV_COMB = BIT(10), ATH9K_HW_CAP_2GHZ = BIT(11), ATH9K_HW_CAP_5GHZ = BIT(12), @@@ -286,12 -288,12 +287,12 @@@ struct ath9k_ops_config u8 pcie_clock_req; u32 pcie_waen; u8 analog_shiftreg; - u8 paprd_disable; u32 ofdm_trig_low; u32 ofdm_trig_high; u32 cck_trig_high; u32 cck_trig_low; u32 enable_ani; + u32 enable_paprd; int serialize_regmode; bool rx_intr_mitigation; bool tx_intr_mitigation; diff --combined drivers/net/wireless/ath/ath9k/pci.c index ef11dc6,a8f6126..c4f6980 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@@ -38,6 -38,7 +38,7 @@@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */ + { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */ { 0 } };
@@@ -113,32 -114,41 +114,32 @@@ static void ath_pci_aspm_init(struct at struct ath_hw *ah = sc->sc_ah; struct pci_dev *pdev = to_pci_dev(sc->dev); struct pci_dev *parent; - int pos; - u8 aspm; + u16 aspm;
if (!ah->is_pciexpress) return;
- pos = pci_pcie_cap(pdev); - if (!pos) - return; - parent = pdev->bus->self; if (!parent) return;
if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { /* Bluetooth coexistance requires disabling ASPM. */ - pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm); - aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); - pci_write_config_byte(pdev, pos + PCI_EXP_LNKCTL, aspm); + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, + PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
/* * Both upstream and downstream PCIe components should * have the same ASPM settings. */ - pos = pci_pcie_cap(parent); - pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm); - aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); - pci_write_config_byte(parent, pos + PCI_EXP_LNKCTL, aspm); + pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, + PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
ath_info(common, "Disabling ASPM since BTCOEX is enabled\n"); return; }
- pos = pci_pcie_cap(parent); - pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm); + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm); if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) { ah->aspm_enabled = true; /* Initialize PCIe PM and SERDES registers. */ diff --combined drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 7c4ee72,e0b313c..372f74e --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@@ -42,6 -42,7 +42,7 @@@
#define DMA_ALIGN_MASK 0x03
+ #define SDIO_DEVICE_ID_BROADCOM_43241 0x4324 #define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 #define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 #define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 @@@ -51,6 -52,7 +52,7 @@@
/* devices we support, null terminated */ static const struct sdio_device_id brcmf_sdmmc_ids[] = { + {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)}, {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)}, {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)}, {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)}, @@@ -638,8 -640,6 +640,8 @@@ static int brcmf_sdio_pd_probe(struct p
oobirq_entry = kzalloc(sizeof(struct brcmf_sdio_oobirq), GFP_KERNEL); + if (!oobirq_entry) + return -ENOMEM; oobirq_entry->irq = res->start; oobirq_entry->flags = res->flags & IRQF_TRIGGER_MASK; list_add_tail(&oobirq_entry->list, &oobirq_lh); diff --combined drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c index 6f70953,f6b862d..8121dba --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c @@@ -205,7 -205,8 +205,8 @@@ brcmf_c_show_host_event(struct brcmf_ev BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, { BRCMF_E_IF, "IF"}, { BRCMF_E_RSSI, "RSSI"}, { - BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"} + BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}, { + BRCMF_E_ESCAN_RESULT, "ESCAN_RESULT"} }; uint event_type, flags, auth_type, datalen; static u32 seqnum_prev; @@@ -350,6 -351,11 +351,11 @@@ brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name); break;
+ case BRCMF_E_ESCAN_RESULT: + brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name); + datalen = 0; + break; + case BRCMF_E_PFN_NET_FOUND: case BRCMF_E_PFN_NET_LOST: case BRCMF_E_PFN_SCAN_COMPLETE: @@@ -764,11 -770,8 +770,11 @@@ static void brcmf_c_arp_offload_set(str { char iovbuf[32]; int retcode; + __le32 arp_mode_le;
- brcmf_c_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf)); + arp_mode_le = cpu_to_le32(arp_mode); + brcmf_c_mkiovar("arp_ol", (char *)&arp_mode_le, 4, iovbuf, + sizeof(iovbuf)); retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); retcode = retcode >= 0 ? 0 : retcode; @@@ -784,11 -787,8 +790,11 @@@ static void brcmf_c_arp_offload_enable( { char iovbuf[32]; int retcode; + __le32 arp_enable_le;
- brcmf_c_mkiovar("arpoe", (char *)&arp_enable, 4, + arp_enable_le = cpu_to_le32(arp_enable); + + brcmf_c_mkiovar("arpoe", (char *)&arp_enable_le, 4, iovbuf, sizeof(iovbuf)); retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); @@@ -806,10 -806,10 +812,10 @@@ int brcmf_c_preinit_dcmds(struct brcmf_ char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ char buf[128], *ptr; - u32 roaming = 1; - uint bcn_timeout = 3; - int scan_assoc_time = 40; - int scan_unassoc_time = 40; + __le32 roaming_le = cpu_to_le32(1); + __le32 bcn_timeout_le = cpu_to_le32(3); + __le32 scan_assoc_time_le = cpu_to_le32(40); + __le32 scan_unassoc_time_le = cpu_to_le32(40); int i; struct brcmf_bus_dcmd *cmdlst; struct list_head *cur, *q; @@@ -835,14 -835,14 +841,14 @@@
/* Setup timeout if Beacons are lost and roam is off to report link down */ - brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, + brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout_le, 4, iovbuf, sizeof(iovbuf)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
/* Enable/Disable build-in roaming to allowed ext supplicant to take of romaing */ - brcmf_c_mkiovar("roam_off", (char *)&roaming, 4, + brcmf_c_mkiovar("roam_off", (char *)&roaming_le, 4, iovbuf, sizeof(iovbuf)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); @@@ -854,9 -854,9 +860,9 @@@ sizeof(iovbuf));
brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME, - (char *)&scan_assoc_time, sizeof(scan_assoc_time)); + (char *)&scan_assoc_time_le, sizeof(scan_assoc_time_le)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME, - (char *)&scan_unassoc_time, sizeof(scan_unassoc_time)); + (char *)&scan_unassoc_time_le, sizeof(scan_unassoc_time_le));
/* Set and enable ARP offload feature */ brcmf_c_arp_offload_set(drvr, BRCMF_ARPOL_MODE); diff --combined drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 50b5553,65cf8f9..32ee052 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@@ -28,6 -28,7 +28,7 @@@ #include <linux/ieee80211.h> #include <linux/uaccess.h> #include <net/cfg80211.h> + #include <net/netlink.h>
#include <brcmu_utils.h> #include <defs.h> @@@ -489,8 -490,8 +490,8 @@@ static void brcmf_set_mpc(struct net_de } }
- static void wl_iscan_prep(struct brcmf_scan_params_le *params_le, - struct brcmf_ssid *ssid) + static void brcmf_iscan_prep(struct brcmf_scan_params_le *params_le, + struct brcmf_ssid *ssid) { memcpy(params_le->bssid, ether_bcast, ETH_ALEN); params_le->bss_type = DOT11_BSSTYPE_ANY; @@@ -500,10 -501,8 +501,10 @@@ params_le->active_time = cpu_to_le32(-1); params_le->passive_time = cpu_to_le32(-1); params_le->home_time = cpu_to_le32(-1); - if (ssid && ssid->SSID_len) - memcpy(¶ms_le->ssid_le, ssid, sizeof(struct brcmf_ssid)); + if (ssid && ssid->SSID_len) { + params_le->ssid_le.SSID_len = cpu_to_le32(ssid->SSID_len); + memcpy(¶ms_le->ssid_le.SSID, ssid->SSID, ssid->SSID_len); + } }
static s32 @@@ -546,7 -545,7 +547,7 @@@ brcmf_run_iscan(struct brcmf_cfg80211_i return -ENOMEM; BUG_ON(params_size >= BRCMF_DCMD_SMLEN);
- wl_iscan_prep(¶ms->params_le, ssid); + brcmf_iscan_prep(¶ms->params_le, ssid);
params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION); params->action = cpu_to_le16(action); @@@ -599,9 -598,9 +600,9 @@@ static s32 brcmf_do_iscan(struct brcmf_ }
static s32 - __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, - struct cfg80211_scan_request *request, - struct cfg80211_ssid *this_ssid) + brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request, + struct cfg80211_ssid *this_ssid) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct cfg80211_ssid *ssids; @@@ -692,11 -691,342 +693,342 @@@ scan_out return err; }
+ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le, + struct cfg80211_scan_request *request) + { + u32 n_ssids; + u32 n_channels; + s32 i; + s32 offset; + __le16 chanspec; + u16 channel; + struct ieee80211_channel *req_channel; + char *ptr; + struct brcmf_ssid ssid; + + memcpy(params_le->bssid, ether_bcast, ETH_ALEN); + params_le->bss_type = DOT11_BSSTYPE_ANY; + params_le->scan_type = 0; + params_le->channel_num = 0; + params_le->nprobes = cpu_to_le32(-1); + params_le->active_time = cpu_to_le32(-1); + params_le->passive_time = cpu_to_le32(-1); + params_le->home_time = cpu_to_le32(-1); + memset(¶ms_le->ssid_le, 0, sizeof(params_le->ssid_le)); + + /* if request is null exit so it will be all channel broadcast scan */ + if (!request) + return; + + n_ssids = request->n_ssids; + n_channels = request->n_channels; + /* Copy channel array if applicable */ + WL_SCAN("### List of channelspecs to scan ### %d\n", n_channels); + if (n_channels > 0) { + for (i = 0; i < n_channels; i++) { + chanspec = 0; + req_channel = request->channels[i]; + channel = ieee80211_frequency_to_channel( + req_channel->center_freq); + if (req_channel->band == IEEE80211_BAND_2GHZ) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + if (req_channel->flags & IEEE80211_CHAN_NO_HT40) { + chanspec |= WL_CHANSPEC_BW_20; + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + } else { + chanspec |= WL_CHANSPEC_BW_40; + if (req_channel->flags & + IEEE80211_CHAN_NO_HT40PLUS) + chanspec |= WL_CHANSPEC_CTL_SB_LOWER; + else + chanspec |= WL_CHANSPEC_CTL_SB_UPPER; + } + + params_le->channel_list[i] = + (channel & WL_CHANSPEC_CHAN_MASK) | + chanspec; + WL_SCAN("Chan : %d, Channel spec: %x\n", + channel, params_le->channel_list[i]); + params_le->channel_list[i] = + cpu_to_le16(params_le->channel_list[i]); + } + } else { + WL_SCAN("Scanning all channels\n"); + } + /* Copy ssid array if applicable */ + WL_SCAN("### List of SSIDs to scan ### %d\n", n_ssids); + if (n_ssids > 0) { + offset = offsetof(struct brcmf_scan_params_le, channel_list) + + n_channels * sizeof(u16); + offset = roundup(offset, sizeof(u32)); + ptr = (char *)params_le + offset; + for (i = 0; i < n_ssids; i++) { + memset(&ssid, 0, sizeof(ssid)); + ssid.SSID_len = cpu_to_le32(request->ssids[i].ssid_len); + memcpy(ssid.SSID, request->ssids[i].ssid, + request->ssids[i].ssid_len); + if (!ssid.SSID_len) + WL_SCAN("%d: Broadcast scan\n", i); + else + WL_SCAN("%d: scan for %s size =%d\n", i, + ssid.SSID, ssid.SSID_len); + memcpy(ptr, &ssid, sizeof(ssid)); + ptr += sizeof(ssid); + } + } else { + WL_SCAN("Broadcast scan %p\n", request->ssids); + if ((request->ssids) && request->ssids->ssid_len) { + WL_SCAN("SSID %s len=%d\n", params_le->ssid_le.SSID, + request->ssids->ssid_len); + params_le->ssid_le.SSID_len = + cpu_to_le32(request->ssids->ssid_len); + memcpy(¶ms_le->ssid_le.SSID, request->ssids->ssid, + request->ssids->ssid_len); + } + } + /* Adding mask to channel numbers */ + params_le->channel_num = + cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) | + (n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK)); + } + + static s32 + brcmf_notify_escan_complete(struct brcmf_cfg80211_priv *cfg_priv, + struct net_device *ndev, + bool aborted, bool fw_abort) + { + struct brcmf_scan_params_le params_le; + struct cfg80211_scan_request *scan_request; + s32 err = 0; + + WL_SCAN("Enter\n"); + + /* clear scan request, because the FW abort can cause a second call */ + /* to this functon and might cause a double cfg80211_scan_done */ + scan_request = cfg_priv->scan_request; + cfg_priv->scan_request = NULL; + + if (timer_pending(&cfg_priv->escan_timeout)) + del_timer_sync(&cfg_priv->escan_timeout); + + if (fw_abort) { + /* Do a scan abort to stop the driver's scan engine */ + WL_SCAN("ABORT scan in firmware\n"); + memset(¶ms_le, 0, sizeof(params_le)); + memcpy(params_le.bssid, ether_bcast, ETH_ALEN); + params_le.bss_type = DOT11_BSSTYPE_ANY; + params_le.scan_type = 0; + params_le.channel_num = cpu_to_le32(1); + params_le.nprobes = cpu_to_le32(1); + params_le.active_time = cpu_to_le32(-1); + params_le.passive_time = cpu_to_le32(-1); + params_le.home_time = cpu_to_le32(-1); + /* Scan is aborted by setting channel_list[0] to -1 */ + params_le.channel_list[0] = cpu_to_le16(-1); + /* E-Scan (or anyother type) can be aborted by SCAN */ + err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, ¶ms_le, + sizeof(params_le)); + if (err) + WL_ERR("Scan abort failed\n"); + } + if (scan_request) { + WL_SCAN("ESCAN Completed scan: %s\n", + aborted ? "Aborted" : "Done"); + cfg80211_scan_done(scan_request, aborted); + brcmf_set_mpc(ndev, 1); + } + if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { + WL_ERR("Scan complete while device not scanning\n"); + return -EPERM; + } + + return err; + } + + static s32 + brcmf_run_escan(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, + struct cfg80211_scan_request *request, u16 action) + { + s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE + + offsetof(struct brcmf_escan_params_le, params_le); + struct brcmf_escan_params_le *params; + s32 err = 0; + + WL_SCAN("E-SCAN START\n"); + + if (request != NULL) { + /* Allocate space for populating ssids in struct */ + params_size += sizeof(u32) * ((request->n_channels + 1) / 2); + + /* Allocate space for populating ssids in struct */ + params_size += sizeof(struct brcmf_ssid) * request->n_ssids; + } + + params = kzalloc(params_size, GFP_KERNEL); + if (!params) { + err = -ENOMEM; + goto exit; + } + BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN); + brcmf_escan_prep(¶ms->params_le, request); + params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION); + params->action = cpu_to_le16(action); + params->sync_id = cpu_to_le16(0x1234); + + err = brcmf_dev_iovar_setbuf(ndev, "escan", params, params_size, + cfg_priv->escan_ioctl_buf, BRCMF_DCMD_MEDLEN); + if (err) { + if (err == -EBUSY) + WL_INFO("system busy : escan canceled\n"); + else + WL_ERR("error (%d)\n", err); + } + + kfree(params); + exit: + return err; + } + + static s32 + brcmf_do_escan(struct brcmf_cfg80211_priv *cfg_priv, struct wiphy *wiphy, + struct net_device *ndev, struct cfg80211_scan_request *request) + { + s32 err; + __le32 passive_scan; + struct brcmf_scan_results *results; + + WL_SCAN("Enter\n"); + cfg_priv->escan_info.ndev = ndev; + cfg_priv->escan_info.wiphy = wiphy; + cfg_priv->escan_info.escan_state = WL_ESCAN_STATE_SCANNING; + passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1); + err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN, + &passive_scan, sizeof(passive_scan)); + if (err) { + WL_ERR("error (%d)\n", err); + return err; + } + brcmf_set_mpc(ndev, 0); + results = (struct brcmf_scan_results *)cfg_priv->escan_info.escan_buf; + results->version = 0; + results->count = 0; + results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE; + + err = brcmf_run_escan(cfg_priv, ndev, request, WL_ESCAN_ACTION_START); + if (err) + brcmf_set_mpc(ndev, 1); + return err; + } + + static s32 + brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request, + struct cfg80211_ssid *this_ssid) + { + struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); + struct cfg80211_ssid *ssids; + struct brcmf_cfg80211_scan_req *sr = cfg_priv->scan_req_int; + __le32 passive_scan; + bool escan_req; + bool spec_scan; + s32 err; + u32 SSID_len; + + WL_SCAN("START ESCAN\n"); + + if (test_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { + WL_ERR("Scanning already : status (%lu)\n", cfg_priv->status); + return -EAGAIN; + } + if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status)) { + WL_ERR("Scanning being aborted : status (%lu)\n", + cfg_priv->status); + return -EAGAIN; + } + if (test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) { + WL_ERR("Connecting : status (%lu)\n", + cfg_priv->status); + return -EAGAIN; + } + + /* Arm scan timeout timer */ + mod_timer(&cfg_priv->escan_timeout, jiffies + + WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000); + + escan_req = false; + if (request) { + /* scan bss */ + ssids = request->ssids; + escan_req = true; + } else { + /* scan in ibss */ + /* we don't do escan in ibss */ + ssids = this_ssid; + } + + cfg_priv->scan_request = request; + set_bit(WL_STATUS_SCANNING, &cfg_priv->status); + if (escan_req) { + err = brcmf_do_escan(cfg_priv, wiphy, ndev, request); + if (!err) + return err; + else + goto scan_out; + } else { + WL_SCAN("ssid "%s", ssid_len (%d)\n", + ssids->ssid, ssids->ssid_len); + memset(&sr->ssid_le, 0, sizeof(sr->ssid_le)); + SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len); + sr->ssid_le.SSID_len = cpu_to_le32(0); + spec_scan = false; + if (SSID_len) { + memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len); + sr->ssid_le.SSID_len = cpu_to_le32(SSID_len); + spec_scan = true; + } else + WL_SCAN("Broadcast scan\n"); + + passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1); + err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN, + &passive_scan, sizeof(passive_scan)); + if (err) { + WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err); + goto scan_out; + } + brcmf_set_mpc(ndev, 0); + err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le, + sizeof(sr->ssid_le)); + if (err) { + if (err == -EBUSY) + WL_INFO("BUSY: scan for "%s" canceled\n", + sr->ssid_le.SSID); + else + WL_ERR("WLC_SCAN error (%d)\n", err); + + brcmf_set_mpc(ndev, 1); + goto scan_out; + } + } + + return 0; + + scan_out: + clear_bit(WL_STATUS_SCANNING, &cfg_priv->status); + if (timer_pending(&cfg_priv->escan_timeout)) + del_timer_sync(&cfg_priv->escan_timeout); + cfg_priv->scan_request = NULL; + return err; + } + static s32 brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct net_device *ndev = request->wdev->netdev; + struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); s32 err = 0;
WL_TRACE("Enter\n"); @@@ -704,7 -1034,11 +1036,11 @@@ if (!check_sys_up(wiphy)) return -EIO;
- err = __brcmf_cfg80211_scan(wiphy, ndev, request, NULL); + if (cfg_priv->iscan_on) + err = brcmf_cfg80211_iscan(wiphy, ndev, request, NULL); + else if (cfg_priv->escan_on) + err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL); + if (err) WL_ERR("scan error (%d)\n", err);
@@@ -2473,6 -2807,175 +2809,175 @@@ static s32 brcmf_init_iscan(struct brcm return err; }
+ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work) + { + struct brcmf_cfg80211_priv *cfg_priv = + container_of(work, struct brcmf_cfg80211_priv, + escan_timeout_work); + + brcmf_notify_escan_complete(cfg_priv, + cfg_priv->escan_info.ndev, true, true); + } + + static void brcmf_escan_timeout(unsigned long data) + { + struct brcmf_cfg80211_priv *cfg_priv = + (struct brcmf_cfg80211_priv *)data; + + if (cfg_priv->scan_request) { + WL_ERR("timer expired\n"); + if (cfg_priv->escan_on) + schedule_work(&cfg_priv->escan_timeout_work); + } + } + + static s32 + brcmf_compare_update_same_bss(struct brcmf_bss_info_le *bss, + struct brcmf_bss_info_le *bss_info_le) + { + if (!memcmp(&bss_info_le->BSSID, &bss->BSSID, ETH_ALEN) && + (CHSPEC_BAND(le16_to_cpu(bss_info_le->chanspec)) == + CHSPEC_BAND(le16_to_cpu(bss->chanspec))) && + bss_info_le->SSID_len == bss->SSID_len && + !memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) { + if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) == + (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL)) { + /* preserve max RSSI if the measurements are + * both on-channel or both off-channel + */ + if (bss_info_le->RSSI > bss->RSSI) + bss->RSSI = bss_info_le->RSSI; + } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) && + (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) { + /* preserve the on-channel rssi measurement + * if the new measurement is off channel + */ + bss->RSSI = bss_info_le->RSSI; + bss->flags |= WLC_BSS_RSSI_ON_CHANNEL; + } + return 1; + } + return 0; + } + + static s32 + brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_priv *cfg_priv, + struct net_device *ndev, + const struct brcmf_event_msg *e, void *data) + { + s32 status; + s32 err = 0; + struct brcmf_escan_result_le *escan_result_le; + struct brcmf_bss_info_le *bss_info_le; + struct brcmf_bss_info_le *bss = NULL; + u32 bi_length; + struct brcmf_scan_results *list; + u32 i; + + status = be32_to_cpu(e->status); + + if (!ndev || !cfg_priv->escan_on || + !test_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { + WL_ERR("scan not ready ndev %p wl->escan_on %d drv_status %x\n", + ndev, cfg_priv->escan_on, + !test_bit(WL_STATUS_SCANNING, &cfg_priv->status)); + return -EPERM; + } + + if (status == BRCMF_E_STATUS_PARTIAL) { + WL_SCAN("ESCAN Partial result\n"); + escan_result_le = (struct brcmf_escan_result_le *) data; + if (!escan_result_le) { + WL_ERR("Invalid escan result (NULL pointer)\n"); + goto exit; + } + if (!cfg_priv->scan_request) { + WL_SCAN("result without cfg80211 request\n"); + goto exit; + } + + if (le16_to_cpu(escan_result_le->bss_count) != 1) { + WL_ERR("Invalid bss_count %d: ignoring\n", + escan_result_le->bss_count); + goto exit; + } + bss_info_le = &escan_result_le->bss_info_le; + + bi_length = le32_to_cpu(bss_info_le->length); + if (bi_length != (le32_to_cpu(escan_result_le->buflen) - + WL_ESCAN_RESULTS_FIXED_SIZE)) { + WL_ERR("Invalid bss_info length %d: ignoring\n", + bi_length); + goto exit; + } + + if (!(cfg_to_wiphy(cfg_priv)->interface_modes & + BIT(NL80211_IFTYPE_ADHOC))) { + if (le16_to_cpu(bss_info_le->capability) & + WLAN_CAPABILITY_IBSS) { + WL_ERR("Ignoring IBSS result\n"); + goto exit; + } + } + + list = (struct brcmf_scan_results *) + cfg_priv->escan_info.escan_buf; + if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) { + WL_ERR("Buffer is too small: ignoring\n"); + goto exit; + } + + for (i = 0; i < list->count; i++) { + bss = bss ? (struct brcmf_bss_info_le *) + ((unsigned char *)bss + + le32_to_cpu(bss->length)) : list->bss_info_le; + if (brcmf_compare_update_same_bss(bss, bss_info_le)) + goto exit; + } + memcpy(&(cfg_priv->escan_info.escan_buf[list->buflen]), + bss_info_le, bi_length); + list->version = le32_to_cpu(bss_info_le->version); + list->buflen += bi_length; + list->count++; + } else { + cfg_priv->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + if (cfg_priv->scan_request) { + cfg_priv->bss_list = (struct brcmf_scan_results *) + cfg_priv->escan_info.escan_buf; + brcmf_inform_bss(cfg_priv); + if (status == BRCMF_E_STATUS_SUCCESS) { + WL_SCAN("ESCAN Completed\n"); + brcmf_notify_escan_complete(cfg_priv, ndev, + false, false); + } else { + WL_ERR("ESCAN Aborted, Event 0x%x\n", status); + brcmf_notify_escan_complete(cfg_priv, ndev, + true, false); + } + brcmf_set_mpc(ndev, 1); + } else + WL_ERR("Unexpected scan result 0x%x\n", status); + } + exit: + return err; + } + + static void brcmf_init_escan(struct brcmf_cfg80211_priv *cfg_priv) + { + + if (cfg_priv->escan_on) { + cfg_priv->el.handler[BRCMF_E_ESCAN_RESULT] = + brcmf_cfg80211_escan_handler; + cfg_priv->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + /* Init scan_timeout timer */ + init_timer(&cfg_priv->escan_timeout); + cfg_priv->escan_timeout.data = (unsigned long) cfg_priv; + cfg_priv->escan_timeout.function = brcmf_escan_timeout; + INIT_WORK(&cfg_priv->escan_timeout_work, + brcmf_cfg80211_escan_timeout_worker); + } + } + static __always_inline void brcmf_delay(u32 ms) { if (ms < 1000 / HZ) { @@@ -2548,10 -3051,8 +3053,8 @@@ static s32 brcmf_cfg80211_suspend(struc clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
/* Turn off watchdog timer */ - if (test_bit(WL_STATUS_READY, &cfg_priv->status)) { - WL_INFO("Enable MPC\n"); + if (test_bit(WL_STATUS_READY, &cfg_priv->status)) brcmf_set_mpc(ndev, 1); - }
WL_TRACE("Exit\n");
@@@ -2726,6 -3227,25 +3229,25 @@@ brcmf_cfg80211_flush_pmksa(struct wiph
}
+ #ifdef CONFIG_NL80211_TESTMODE + static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len) + { + struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); + struct net_device *ndev = cfg_priv->wdev->netdev; + struct brcmf_dcmd *dcmd = data; + struct sk_buff *reply; + int ret; + + ret = brcmf_netlink_dcmd(ndev, dcmd); + if (ret == 0) { + reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd)); + nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd); + ret = cfg80211_testmode_reply(reply); + } + return ret; + } + #endif + static struct cfg80211_ops wl_cfg80211_ops = { .change_virtual_intf = brcmf_cfg80211_change_iface, .scan = brcmf_cfg80211_scan, @@@ -2748,7 -3268,10 +3270,10 @@@ .resume = brcmf_cfg80211_resume, .set_pmksa = brcmf_cfg80211_set_pmksa, .del_pmksa = brcmf_cfg80211_del_pmksa, - .flush_pmksa = brcmf_cfg80211_flush_pmksa + .flush_pmksa = brcmf_cfg80211_flush_pmksa, + #ifdef CONFIG_NL80211_TESTMODE + .testmode_cmd = brcmf_cfg80211_testmode + #endif };
static s32 brcmf_mode_to_nl80211_iftype(s32 mode) @@@ -3173,10 -3696,8 +3698,8 @@@ brcmf_notify_scan_status(struct brcmf_c cfg_priv->scan_results->count = le32_to_cpu(bss_list_le->count);
err = brcmf_inform_bss(cfg_priv); - if (err) { + if (err) scan_abort = true; - goto scan_done_out; - }
scan_done_out: if (cfg_priv->scan_request) { @@@ -3223,6 -3744,8 +3746,8 @@@ static void brcmf_deinit_priv_mem(struc cfg_priv->profile = NULL; kfree(cfg_priv->scan_req_int); cfg_priv->scan_req_int = NULL; + kfree(cfg_priv->escan_ioctl_buf); + cfg_priv->escan_ioctl_buf = NULL; kfree(cfg_priv->dcmd_buf); cfg_priv->dcmd_buf = NULL; kfree(cfg_priv->extra_buf); @@@ -3251,6 -3774,9 +3776,9 @@@ static s32 brcmf_init_priv_mem(struct b GFP_KERNEL); if (!cfg_priv->scan_req_int) goto init_priv_mem_out; + cfg_priv->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL); + if (!cfg_priv->escan_ioctl_buf) + goto init_priv_mem_out; cfg_priv->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL); if (!cfg_priv->dcmd_buf) goto init_priv_mem_out; @@@ -3300,18 -3826,28 +3828,28 @@@ static struct brcmf_cfg80211_event_q *b
static s32 brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event, - const struct brcmf_event_msg *msg) + const struct brcmf_event_msg *msg, void *data) { struct brcmf_cfg80211_event_q *e; s32 err = 0; ulong flags; + u32 data_len; + u32 total_len;
- e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_ATOMIC); + total_len = sizeof(struct brcmf_cfg80211_event_q); + if (data) + data_len = be32_to_cpu(msg->datalen); + else + data_len = 0; + total_len += data_len; + e = kzalloc(total_len, GFP_ATOMIC); if (!e) return -ENOMEM;
e->etype = event; memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg)); + if (data) + memcpy(&e->edata, data, data_len);
spin_lock_irqsave(&cfg_priv->evt_q_lock, flags); list_add_tail(&e->evt_q_list, &cfg_priv->evt_q_list); @@@ -3377,8 -3913,17 +3915,17 @@@ static s32 wl_init_priv(struct brcmf_cf
cfg_priv->scan_request = NULL; cfg_priv->pwr_save = true; + #ifdef CONFIG_BRCMISCAN cfg_priv->iscan_on = true; /* iscan on & off switch. we enable iscan per default */ + cfg_priv->escan_on = false; /* escan on & off switch. + we disable escan per default */ + #else + cfg_priv->iscan_on = false; /* iscan on & off switch. + we disable iscan per default */ + cfg_priv->escan_on = true; /* escan on & off switch. + we enable escan per default */ + #endif cfg_priv->roam_on = true; /* roam on & off switch. we enable roam per default */
@@@ -3396,6 -3941,7 +3943,7 @@@ err = brcmf_init_iscan(cfg_priv); if (err) return err; + brcmf_init_escan(cfg_priv); brcmf_init_conf(cfg_priv->conf); brcmf_init_prof(cfg_priv->profile); brcmf_link_down(cfg_priv); @@@ -3480,7 -4026,7 +4028,7 @@@ brcmf_cfg80211_event(struct net_device u32 event_type = be32_to_cpu(e->event_type); struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
- if (!brcmf_enq_event(cfg_priv, event_type, e)) + if (!brcmf_enq_event(cfg_priv, event_type, e, data)) schedule_work(&cfg_priv->event_work); }
@@@ -3554,6 -4100,7 +4102,7 @@@ static s32 brcmf_dongle_eventmsg(struc setbit(eventmask, BRCMF_E_TXFAIL); setbit(eventmask, BRCMF_E_JOIN_START); setbit(eventmask, BRCMF_E_SCAN_COMPLETE); + setbit(eventmask, BRCMF_E_ESCAN_RESULT);
brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); diff --combined drivers/net/wireless/iwlegacy/common.h index 7246826,3d3135e..b4bb813 --- a/drivers/net/wireless/iwlegacy/common.h +++ b/drivers/net/wireless/iwlegacy/common.h @@@ -1832,8 -1832,10 +1832,8 @@@ int il_enqueue_hcmd(struct il_priv *il static inline u16 il_pcie_link_ctl(struct il_priv *il) { - int pos; u16 pci_lnk_ctl; - pos = pci_pcie_cap(il->pci_dev); - pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); + pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &pci_lnk_ctl); return pci_lnk_ctl; }
@@@ -1843,8 -1845,6 +1843,6 @@@ __le32 il_add_beacon_time(struct il_pri u32 beacon_interval);
#ifdef CONFIG_PM - int il_pci_suspend(struct device *device); - int il_pci_resume(struct device *device); extern const struct dev_pm_ops il_pm_ops;
#define IL_LEGACY_PM_OPS (&il_pm_ops) diff --combined drivers/net/wireless/iwlwifi/pcie/trans.c index 063ecaf,8488511..f386725 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@@ -492,10 -492,11 +492,11 @@@ static void iwl_tx_queue_free(struct iw iwl_tx_queue_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */ - if (txq_id == trans_pcie->cmd_queue) - for (i = 0; i < txq->q.n_window; i++) + for (i = 0; i < txq->q.n_window; i++) { kfree(txq->entries[i].cmd); + kfree(txq->entries[i].copy_cmd); + }
/* De-alloc circular buffer of TFDs */ if (txq->q.n_bd) { @@@ -675,10 -676,13 +676,10 @@@ static void iwl_set_pwr_vmain(struct iw static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int pos; u16 pci_lnk_ctl;
- struct pci_dev *pci_dev = trans_pcie->pci_dev; - - pos = pci_pcie_cap(pci_dev); - pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, + &pci_lnk_ctl); return pci_lnk_ctl; }
@@@ -893,6 -897,7 +894,7 @@@ static int iwl_set_hw_ready(struct iwl_ static int iwl_prepare_card_hw(struct iwl_trans *trans) { int ret; + int t = 0;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
@@@ -905,17 -910,15 +907,15 @@@ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
- ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, - ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, - CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); + do { + ret = iwl_set_hw_ready(trans); + if (ret >= 0) + return 0;
- if (ret < 0) - return ret; + usleep_range(200, 1000); + t += 200; + } while (t < 150000);
- /* HW should be ready by now, check again. */ - ret = iwl_set_hw_ready(trans); - if (ret >= 0) - return 0; return ret; }
@@@ -1439,7 -1442,6 +1439,7 @@@ static int iwl_trans_pcie_start_hw(stru return err;
err_free_irq: + trans_pcie->irq_requested = false; free_irq(trans_pcie->irq, trans); error: iwl_free_isr_ict(trans); @@@ -1769,7 -1771,7 +1769,7 @@@ void iwl_dump_csr(struct iwl_trans *tra #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ if (!debugfs_create_file(#name, mode, parent, trans, \ &iwl_dbgfs_##name##_ops)) \ - return -ENOMEM; \ + goto err; \ } while (0)
/* file operation */ @@@ -2033,6 -2035,10 +2033,10 @@@ static int iwl_trans_pcie_dbgfs_registe DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR); return 0; + + err: + IWL_ERR(trans, "failed to create the trans debugfs entry\n"); + return -ENOMEM; } #else static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, diff --combined drivers/net/wireless/rtlwifi/pci.c index 5983631,aad9d44..abc306b --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@@ -372,11 -372,13 +372,11 @@@ static void rtl_pci_parse_configuration struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 tmp; - int pos; - u8 linkctrl_reg; + u16 linkctrl_reg;
/*Link Control Register */ - pos = pci_pcie_cap(pdev); - pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg); - pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg; + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg); + pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n", pcipriv->ndis_adapter.linkctrl_reg); @@@ -502,7 -504,7 +502,7 @@@ static void _rtl_pci_tx_chk_waitq(struc _rtl_update_earlymode_info(hw, skb, &tcb_desc, tid);
- rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); + rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc); } } } @@@ -927,7 -929,7 +927,7 @@@ static void _rtl_pci_prepare_bcn_taskle info = IEEE80211_SKB_CB(pskb); pdesc = &ring->desc[0]; rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, - info, pskb, BEACON_QUEUE, &tcb_desc); + info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
__skb_queue_tail(&ring->queue, pskb);
@@@ -1303,11 -1305,10 +1303,10 @@@ int rtl_pci_reset_trx_ring(struct ieee8 }
static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_sta *sta = info->control.sta; struct rtl_sta_info *sta_entry = NULL; u8 tid = rtl_get_tid(skb);
@@@ -1335,13 -1336,14 +1334,14 @@@ return true; }
- static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb, - struct rtl_tcb_desc *ptcb_desc) + static int rtl_pci_tx(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb, + struct rtl_tcb_desc *ptcb_desc) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_sta_info *sta_entry = NULL; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_sta *sta = info->control.sta; struct rtl8192_tx_ring *ring; struct rtl_tx_desc *pdesc; u8 idx; @@@ -1416,7 -1418,7 +1416,7 @@@ rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, - info, skb, hw_queue, ptcb_desc); + info, sta, skb, hw_queue, ptcb_desc);
__skb_queue_tail(&ring->queue, skb);
diff --combined drivers/net/wireless/rtlwifi/rtl8192ce/hw.c index dd4bb09,cc89582..86d73b3 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c @@@ -994,16 -994,8 +994,16 @@@ static enum version_8192c _rtl92ce_read version = (value32 & TYPE_ID) ? VERSION_A_CHIP_92C : VERSION_A_CHIP_88C; } else { - version = (value32 & TYPE_ID) ? VERSION_B_CHIP_92C : - VERSION_B_CHIP_88C; + version = (enum version_8192c) (CHIP_VER_B | + ((value32 & TYPE_ID) ? CHIP_92C_BITMASK : 0) | + ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0)); + if ((!IS_CHIP_VENDOR_UMC(version)) && (value32 & + CHIP_VER_RTL_MASK)) { + version = (enum version_8192c)(version | + ((((value32 & CHIP_VER_RTL_MASK) == BIT(12)) + ? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) | + CHIP_VENDOR_UMC)); + } }
switch (version) { @@@ -1914,8 -1906,8 +1914,8 @@@ static void rtl92ce_update_hal_rate_mas } RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n", ratr_bitmap); - *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) | - (ratr_index << 28)); + *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | + (ratr_index << 28); rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n", diff --combined drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index 7d8f964,60451ee..ea2e1bd --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@@ -162,12 -162,10 +162,12 @@@ int rtl92c_init_sw_vars(struct ieee8021
/* request fw */ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && - !IS_92C_SERIAL(rtlhal->version)) + !IS_92C_SERIAL(rtlhal->version)) { rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin"; - else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) + } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) { rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin"; + pr_info("****** This B_CUT device may not work with kernels 3.6 and earlier\n"); + }
rtlpriv->max_fw_size = 0x4000; pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); @@@ -344,7 -342,7 +344,7 @@@ static struct rtl_hal_cfg rtl92ce_hal_c .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, };
- DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = { + static DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)}, diff --combined drivers/s390/net/qeth_core_main.c index cf6da7f,3af9024..3e25d31 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@@ -489,7 -489,7 +489,7 @@@ static struct qeth_reply *qeth_alloc_re atomic_set(&reply->refcnt, 1); atomic_set(&reply->received, 0); reply->card = card; - }; + } return reply; }
@@@ -1257,7 -1257,30 +1257,30 @@@ static void qeth_clean_channel(struct q kfree(channel->iob[cnt].data); }
- static void qeth_get_channel_path_desc(struct qeth_card *card) + static void qeth_set_single_write_queues(struct qeth_card *card) + { + if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && + (card->qdio.no_out_queues == 4)) + qeth_free_qdio_buffers(card); + + card->qdio.no_out_queues = 1; + if (card->qdio.default_out_queue != 0) + dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); + + card->qdio.default_out_queue = 0; + } + + static void qeth_set_multiple_write_queues(struct qeth_card *card) + { + if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && + (card->qdio.no_out_queues == 1)) { + qeth_free_qdio_buffers(card); + card->qdio.default_out_queue = 2; + } + card->qdio.no_out_queues = 4; + } + + static void qeth_update_from_chp_desc(struct qeth_card *card) { struct ccw_device *ccwdev; struct channelPath_dsc { @@@ -1274,38 -1297,23 +1297,23 @@@ QETH_DBF_TEXT(SETUP, 2, "chp_desc");
ccwdev = card->data.ccwdev; - chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); - if (chp_dsc != NULL) { - if (card->info.type != QETH_CARD_TYPE_IQD) { - /* CHPP field bit 6 == 1 -> single queue */ - if ((chp_dsc->chpp & 0x02) == 0x02) { - if ((atomic_read(&card->qdio.state) != - QETH_QDIO_UNINITIALIZED) && - (card->qdio.no_out_queues == 4)) - /* change from 4 to 1 outbound queues */ - qeth_free_qdio_buffers(card); - card->qdio.no_out_queues = 1; - if (card->qdio.default_out_queue != 0) - dev_info(&card->gdev->dev, - "Priority Queueing not supported\n"); - card->qdio.default_out_queue = 0; - } else { - if ((atomic_read(&card->qdio.state) != - QETH_QDIO_UNINITIALIZED) && - (card->qdio.no_out_queues == 1)) { - /* change from 1 to 4 outbound queues */ - qeth_free_qdio_buffers(card); - card->qdio.default_out_queue = 2; - } - card->qdio.no_out_queues = 4; - } - } - card->info.func_level = 0x4100 + chp_dsc->desc; - kfree(chp_dsc); - } + chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); + if (!chp_dsc) + goto out; + + card->info.func_level = 0x4100 + chp_dsc->desc; + if (card->info.type == QETH_CARD_TYPE_IQD) + goto out; + + /* CHPP field bit 6 == 1 -> single queue */ + if ((chp_dsc->chpp & 0x02) == 0x02) + qeth_set_single_write_queues(card); + else + qeth_set_multiple_write_queues(card); + out: + kfree(chp_dsc); QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); - return; }
static void qeth_init_qdio_info(struct qeth_card *card) @@@ -1473,7 -1481,7 +1481,7 @@@ static int qeth_determine_card_type(str card->qdio.no_in_queues = 1; card->info.is_multicast_different = known_devices[i][QETH_MULTICAST_IND]; - qeth_get_channel_path_desc(card); + qeth_update_from_chp_desc(card); return 0; } i++; @@@ -2029,7 -2037,7 +2037,7 @@@ int qeth_send_control_data(struct qeth_ if (time_after(jiffies, timeout)) goto time_err; cpu_relax(); - }; + } }
if (reply->rc == -EIO) @@@ -2993,7 -3001,7 +3001,7 @@@ static void qeth_get_trap_id(struct qet struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; struct ccw_dev_id ccwid; - int level, rc; + int level;
tid->chpid = card->info.chpid; ccw_device_get_id(CARD_RDEV(card), &ccwid); @@@ -3001,10 -3009,17 +3009,10 @@@ tid->devno = ccwid.devno; if (!info) return; - - rc = stsi(NULL, 0, 0, 0); - if (rc == -ENOSYS) - level = rc; - else - level = (((unsigned int) rc) >> 28); - - if ((level >= 2) && (stsi(info222, 2, 2, 2) != -ENOSYS)) + level = stsi(NULL, 0, 0, 0); + if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) tid->lparnr = info222->lpar_number; - - if ((level >= 3) && (stsi(info322, 3, 2, 2) != -ENOSYS)) { + if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); } @@@ -4735,7 -4750,7 +4743,7 @@@ int qeth_core_hardsetup_card(struct qet
QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); atomic_set(&card->force_alloc_skb, 0); - qeth_get_channel_path_desc(card); + qeth_update_from_chp_desc(card); retry: if (retries) QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", diff --combined fs/namei.c index c5b85b3,a856e7f..c5cd17d --- a/fs/namei.c +++ b/fs/namei.c @@@ -680,7 -680,7 +680,7 @@@ static inline int may_follow_link(struc
/* Allowed if owner and follower match. */ inode = link->dentry->d_inode; - if (current_cred()->fsuid == inode->i_uid) + if (uid_eq(current_cred()->fsuid, inode->i_uid)) return 0;
/* Allowed if parent directory not sticky and world-writable. */ @@@ -689,7 -689,7 +689,7 @@@ return 0;
/* Allowed if parent directory and link owner match. */ - if (parent->i_uid == inode->i_uid) + if (uid_eq(parent->i_uid, inode->i_uid)) return 0;
path_put_conditional(link, nd); @@@ -759,7 -759,7 +759,7 @@@ static int may_linkat(struct path *link /* Source inode owner (or CAP_FOWNER) can hardlink all they like, * otherwise, it must be a safe source. */ - if (cred->fsuid == inode->i_uid || safe_hardlink_source(inode) || + if (uid_eq(cred->fsuid, inode->i_uid) || safe_hardlink_source(inode) || capable(CAP_FOWNER)) return 0;
@@@ -3971,7 -3971,7 +3971,7 @@@ EXPORT_SYMBOL(user_path_at) EXPORT_SYMBOL(follow_down_one); EXPORT_SYMBOL(follow_down); EXPORT_SYMBOL(follow_up); -EXPORT_SYMBOL(get_write_access); /* binfmt_aout */ +EXPORT_SYMBOL(get_write_access); /* nfsd */ EXPORT_SYMBOL(getname); EXPORT_SYMBOL(lock_rename); EXPORT_SYMBOL(lookup_one_len); diff --combined include/linux/net.h index c8a9708,99276c3..a3831f3 --- a/include/linux/net.h +++ b/include/linux/net.h @@@ -65,7 -65,6 +65,7 @@@ typedef enum struct poll_table_struct; struct pipe_inode_info; struct inode; +struct file; struct net;
#define SOCK_ASYNC_NOSPACE 0 @@@ -247,7 -246,7 +247,8 @@@ extern int sock_sendmsg(struct size_t len); extern int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); - extern struct file *sock_alloc_file(struct socket *sock, int flags); -extern int sock_map_fd(struct socket *sock, int flags); ++extern struct file *sock_alloc_file(struct socket *sock, int flags, ++ const char *dname); extern struct socket *sockfd_lookup(int fd, int *err); extern struct socket *sock_from_file(struct file *file, int *err); #define sockfd_put(sock) fput(sock->file) diff --combined include/linux/sched.h index 23bddac,a8e2413..439d35a --- a/include/linux/sched.h +++ b/include/linux/sched.h @@@ -954,6 -954,7 +954,6 @@@ struct sched_domain unsigned int smt_gain; int flags; /* See SD_* */ int level; - int idle_buddy; /* cpu assigned to select_idle_sibling() */
/* Runtime fields. */ unsigned long last_balance; /* init to jiffies. units in jiffies */ @@@ -1529,6 -1530,9 +1529,9 @@@ struct task_struct * cache last used pipe for splice */ struct pipe_inode_info *splice_pipe; + + struct page_frag task_frag; + #ifdef CONFIG_TASK_DELAY_ACCT struct task_delay_info *delays; #endif diff --combined include/net/ip6_fib.h index 9fc7114,cd64cf3..8a2a203 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@@ -37,6 -37,7 +37,7 @@@ struct fib6_config int fc_ifindex; u32 fc_flags; u32 fc_protocol; + u32 fc_type; /* only 8 bits are used */
struct in6_addr fc_dst; struct in6_addr fc_src; @@@ -111,8 -112,9 +112,8 @@@ struct rt6_info struct inet6_dev *rt6i_idev; unsigned long _rt6i_peer;
-#ifdef CONFIG_XFRM - u32 rt6i_flow_cache_genid; -#endif + u32 rt6i_genid; + /* more non-fragment space at head required */ unsigned short rt6i_nfheader_len;
diff --combined include/net/net_namespace.h index fd87963,d61e2b3..4faf661 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@@ -15,6 -15,7 +15,7 @@@ #include <net/netns/packet.h> #include <net/netns/ipv4.h> #include <net/netns/ipv6.h> + #include <net/netns/sctp.h> #include <net/netns/dccp.h> #include <net/netns/x_tables.h> #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) @@@ -66,6 -67,7 +67,7 @@@ struct net struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; /* protected by rtnl_mutex */ + int ifindex;
/* core fib_rules */ struct list_head rules_ops; @@@ -80,6 -82,9 +82,9 @@@ #if IS_ENABLED(CONFIG_IPV6) struct netns_ipv6 ipv6; #endif + #if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE) + struct netns_sctp sctp; + #endif #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) struct netns_dccp dccp; #endif @@@ -88,6 -93,9 +93,9 @@@ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct netns_ct ct; #endif + #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) + struct netns_nf_frag nf_frag; + #endif struct sock *nfnl; struct sock *nfnl_stash; #endif @@@ -102,9 -110,15 +110,16 @@@ #endif struct netns_ipvs *ipvs; struct sock *diag_nlsk; + atomic_t rt_genid; };
+ /* + * ifindex generation is per-net namespace, and loopback is + * always the 1st device in ns (see net_dev_init), thus any + * loopback device should get ifindex 1 + */ + + #define LOOPBACK_IFINDEX 1
#include <linux/seq_file_net.h>
@@@ -301,14 -315,5 +316,14 @@@ static inline void unregister_net_sysct } #endif
+static inline int rt_genid(struct net *net) +{ + return atomic_read(&net->rt_genid); +} + +static inline void rt_genid_bump(struct net *net) +{ + atomic_inc(&net->rt_genid); +}
#endif /* __NET_NET_NAMESPACE_H */ diff --combined include/net/netns/ipv4.h index eb24dbc,7d00583..2ae2b83 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@@ -5,6 -5,7 +5,7 @@@ #ifndef __NETNS_IPV4_H__ #define __NETNS_IPV4_H__
+ #include <linux/uidgid.h> #include <net/inet_frag.h>
struct tcpm_hash_bucket; @@@ -51,8 -52,6 +52,6 @@@ struct netns_ipv4 struct xt_table *iptable_security; #endif struct xt_table *nat_table; - struct hlist_head *nat_bysource; - unsigned int nat_htable_size; #endif
int sysctl_icmp_echo_ignore_all; @@@ -62,9 -61,10 +61,9 @@@ int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr;
- unsigned int sysctl_ping_group_range[2]; + kgid_t sysctl_ping_group_range[2]; long sysctl_tcp_mem[3];
- atomic_t rt_genid; atomic_t dev_addr_genid;
#ifdef CONFIG_IP_MROUTE diff --combined include/net/sock.h index adb7da2,f036493..fce5564 --- a/include/net/sock.h +++ b/include/net/sock.h @@@ -247,8 -247,7 +247,7 @@@ struct cg_proto * @sk_stamp: time stamp of last packet received * @sk_socket: Identd and reporting IO signals * @sk_user_data: RPC layer private data - * @sk_sndmsg_page: cached page for sendmsg - * @sk_sndmsg_off: cached offset for sendmsg + * @sk_frag: cached page frag * @sk_peek_off: current peek_offset value * @sk_send_head: front of stuff to transmit * @sk_security: used by security modules @@@ -362,9 -361,8 +361,8 @@@ struct sock ktime_t sk_stamp; struct socket *sk_socket; void *sk_user_data; - struct page *sk_sndmsg_page; + struct page_frag sk_frag; struct sk_buff *sk_send_head; - __u32 sk_sndmsg_off; __s32 sk_peek_off; int sk_write_pending; #ifdef CONFIG_SECURITY @@@ -606,6 -604,15 +604,15 @@@ static inline void sk_add_bind_node(str #define sk_for_each_bound(__sk, node, list) \ hlist_for_each_entry(__sk, node, list, sk_bind_node)
+ static inline struct user_namespace *sk_user_ns(struct sock *sk) + { + /* Careful only use this in a context where these parameters + * can not change and must all be valid, such as recvmsg from + * userspace. + */ + return sk->sk_socket->file->f_cred->user_ns; + } + /* Sock flags */ enum sock_flags { SOCK_DEAD, @@@ -1332,7 -1339,7 +1339,7 @@@ static inline bool sk_wmem_schedule(str }
static inline bool -sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size) +sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) { if (!sk_has_account(sk)) return true; @@@ -1670,7 -1677,7 +1677,7 @@@ static inline void sock_graft(struct so write_unlock_bh(&sk->sk_callback_lock); }
- extern int sock_i_uid(struct sock *sk); + extern kuid_t sock_i_uid(struct sock *sk); extern unsigned long sock_i_ino(struct sock *sk);
static inline struct dst_entry * @@@ -2025,18 -2032,23 +2032,23 @@@ static inline void sk_stream_moderate_s
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
- static inline struct page *sk_stream_alloc_page(struct sock *sk) + /** + * sk_page_frag - return an appropriate page_frag + * @sk: socket + * + * If socket allocation mode allows current thread to sleep, it means its + * safe to use the per task page_frag instead of the per socket one. + */ + static inline struct page_frag *sk_page_frag(struct sock *sk) { - struct page *page = NULL; + if (sk->sk_allocation & __GFP_WAIT) + return ¤t->task_frag;
- page = alloc_pages(sk->sk_allocation, 0); - if (!page) { - sk_enter_memory_pressure(sk); - sk_stream_moderate_sndbuf(sk); - } - return page; + return &sk->sk_frag; }
+ extern bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); + /* * Default write policy as shown to user space via poll/select/SIGIO */ diff --combined init/Kconfig index 8bfda46,b445d6f..186a482 --- a/init/Kconfig +++ b/init/Kconfig @@@ -942,28 -942,12 +942,12 @@@ config UIDGID_CONVERTE depends on PROC_EVENTS = n
# Networking - depends on NET = n depends on NET_9P = n - depends on IPX = n - depends on PHONET = n - depends on NET_CLS_FLOW = n - depends on NETFILTER_XT_MATCH_OWNER = n - depends on NETFILTER_XT_MATCH_RECENT = n - depends on NETFILTER_XT_TARGET_LOG = n - depends on NETFILTER_NETLINK_LOG = n - depends on INET = n - depends on IPV6 = n - depends on IP_SCTP = n depends on AF_RXRPC = n - depends on LLC2 = n depends on NET_KEY = n - depends on INET_DIAG = n depends on DNS_RESOLVER = n - depends on AX25 = n - depends on ATALK = n
# Filesystems - depends on USB_DEVICEFS = n depends on USB_GADGETFS = n depends on USB_FUNCTIONFS = n depends on DEVTMPFS = n @@@ -1019,9 -1003,6 +1003,6 @@@ depends on !UML || HOSTFS = n
# The rare drivers that won't build - depends on AIRO = n - depends on AIRO_CS = n - depends on TUN = n depends on INFINIBAND_QIB = n depends on BLK_DEV_LOOP = n depends on ANDROID_BINDER_IPC = n @@@ -1153,8 -1134,7 +1134,8 @@@ menuconfig EXPER
config UID16 bool "Enable 16-bit UID system calls" if EXPERT - depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION) + depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION) \ + || AARCH32_EMULATION default y help This enables the legacy 16-bit UID syscall wrappers. diff --combined kernel/exit.c index 0951133,42f2595..346616c --- a/kernel/exit.c +++ b/kernel/exit.c @@@ -457,13 -457,108 +457,13 @@@ void daemonize(const char *name, ... /* Become as one with the init task */
daemonize_fs_struct(); - exit_files(current); - current->files = init_task.files; - atomic_inc(¤t->files->count); + daemonize_descriptors();
reparent_to_kthreadd(); }
EXPORT_SYMBOL(daemonize);
-static void close_files(struct files_struct * files) -{ - int i, j; - struct fdtable *fdt; - - j = 0; - - /* - * It is safe to dereference the fd table without RCU or - * ->file_lock because this is the last reference to the - * files structure. But use RCU to shut RCU-lockdep up. - */ - rcu_read_lock(); - fdt = files_fdtable(files); - rcu_read_unlock(); - for (;;) { - unsigned long set; - i = j * BITS_PER_LONG; - if (i >= fdt->max_fds) - break; - set = fdt->open_fds[j++]; - while (set) { - if (set & 1) { - struct file * file = xchg(&fdt->fd[i], NULL); - if (file) { - filp_close(file, files); - cond_resched(); - } - } - i++; - set >>= 1; - } - } -} - -struct files_struct *get_files_struct(struct task_struct *task) -{ - struct files_struct *files; - - task_lock(task); - files = task->files; - if (files) - atomic_inc(&files->count); - task_unlock(task); - - return files; -} - -void put_files_struct(struct files_struct *files) -{ - struct fdtable *fdt; - - if (atomic_dec_and_test(&files->count)) { - close_files(files); - /* - * Free the fd and fdset arrays if we expanded them. - * If the fdtable was embedded, pass files for freeing - * at the end of the RCU grace period. Otherwise, - * you can free files immediately. - */ - rcu_read_lock(); - fdt = files_fdtable(files); - if (fdt != &files->fdtab) - kmem_cache_free(files_cachep, files); - free_fdtable(fdt); - rcu_read_unlock(); - } -} - -void reset_files_struct(struct files_struct *files) -{ - struct task_struct *tsk = current; - struct files_struct *old; - - old = tsk->files; - task_lock(tsk); - tsk->files = files; - task_unlock(tsk); - put_files_struct(old); -} - -void exit_files(struct task_struct *tsk) -{ - struct files_struct * files = tsk->files; - - if (files) { - task_lock(tsk); - tsk->files = NULL; - task_unlock(tsk); - put_files_struct(files); - } -} - #ifdef CONFIG_MM_OWNER /* * A task is exiting. If it owned this mm, find a new owner for the mm. @@@ -951,6 -1046,9 +951,9 @@@ void do_exit(long code if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe);
+ if (tsk->task_frag.page) + put_page(tsk->task_frag.page); + validate_creds_for_do_exit(tsk);
preempt_disable(); diff --combined kernel/pid_namespace.c index 6144bab,baa528d..478bad2 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@@ -16,6 -16,7 +16,7 @@@ #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/reboot.h> + #include <linux/export.h>
#define BITS_PER_PAGE (PAGE_SIZE*8)
@@@ -144,6 -145,7 +145,7 @@@ void free_pid_ns(struct kref *kref if (parent != NULL) put_pid_ns(parent); } + EXPORT_SYMBOL_GPL(free_pid_ns);
void zap_pid_ns_processes(struct pid_namespace *pid_ns) { @@@ -232,19 -234,15 +234,19 @@@ static int pid_ns_ctl_handler(struct ct */
tmp.data = ¤t->nsproxy->pid_ns->last_pid; - return proc_dointvec(&tmp, write, buffer, lenp, ppos); + return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); }
+extern int pid_max; +static int zero = 0; static struct ctl_table pid_ns_ctl_table[] = { { .procname = "ns_last_pid", .maxlen = sizeof(int), .mode = 0666, /* permissions are checked in the handler */ .proc_handler = pid_ns_ctl_handler, + .extra1 = &zero, + .extra2 = &pid_max, }, { } }; diff --combined kernel/taskstats.c index 5116b7e,123793c..9708190 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@@ -415,15 -415,16 +415,15 @@@ static int cgroupstats_user_cmd(struct struct nlattr *na; size_t size; u32 fd; - struct file *file; - int fput_needed; + struct fd f;
na = info->attrs[CGROUPSTATS_CMD_ATTR_FD]; if (!na) return -EINVAL;
fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]); - file = fget_light(fd, &fput_needed); - if (!file) + f = fdget(fd); + if (!f.file) return 0;
size = nla_total_size(sizeof(struct cgroupstats)); @@@ -443,7 -444,7 +443,7 @@@ stats = nla_data(na); memset(stats, 0, sizeof(*stats));
- rc = cgroupstats_build(stats, file->f_dentry); + rc = cgroupstats_build(stats, f.file->f_dentry); if (rc < 0) { nlmsg_free(rep_skb); goto err; @@@ -452,7 -453,7 +452,7 @@@ rc = send_reply(rep_skb, info);
err: - fput_light(file, fput_needed); + fdput(f); return rc; }
@@@ -466,7 -467,7 +466,7 @@@ static int cmd_attr_register_cpumask(st rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); if (rc < 0) goto out; - rc = add_del_listener(info->snd_pid, mask, REGISTER); + rc = add_del_listener(info->snd_portid, mask, REGISTER); out: free_cpumask_var(mask); return rc; @@@ -482,7 -483,7 +482,7 @@@ static int cmd_attr_deregister_cpumask( rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask); if (rc < 0) goto out; - rc = add_del_listener(info->snd_pid, mask, DEREGISTER); + rc = add_del_listener(info->snd_portid, mask, DEREGISTER); out: free_cpumask_var(mask); return rc; diff --combined net/9p/trans_fd.c index 4c3be8f,6449bae..7bea6da --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@@ -316,7 -316,8 +316,7 @@@ static void p9_read_work(struct work_st m->rsize - m->rpos); p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); if (err == -EAGAIN) { - clear_bit(Rworksched, &m->wsched); - return; + goto end_clear; }
if (err <= 0) @@@ -378,20 -379,19 +378,20 @@@ m->req = NULL; }
+end_clear: + clear_bit(Rworksched, &m->wsched); + if (!list_empty(&m->req_list)) { if (test_and_clear_bit(Rpending, &m->wsched)) n = POLLIN; else n = p9_fd_poll(m->client, NULL);
- if (n & POLLIN) { + if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); schedule_work(&m->rq); - } else - clear_bit(Rworksched, &m->wsched); - } else - clear_bit(Rworksched, &m->wsched); + } + }
return; error: @@@ -453,13 -453,12 +453,13 @@@ static void p9_write_work(struct work_s }
if (!m->wsize) { + spin_lock(&m->client->lock); if (list_empty(&m->unsent_req_list)) { clear_bit(Wworksched, &m->wsched); + spin_unlock(&m->client->lock); return; }
- spin_lock(&m->client->lock); req = list_entry(m->unsent_req_list.next, struct p9_req_t, req_list); req->status = REQ_STATUS_SENT; @@@ -477,9 -476,10 +477,9 @@@ clear_bit(Wpending, &m->wsched); err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); - if (err == -EAGAIN) { - clear_bit(Wworksched, &m->wsched); - return; - } + if (err == -EAGAIN) + goto end_clear; +
if (err < 0) goto error; @@@ -492,21 -492,19 +492,21 @@@ if (m->wpos == m->wsize) m->wpos = m->wsize = 0;
- if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) { +end_clear: + clear_bit(Wworksched, &m->wsched); + + if (m->wsize || !list_empty(&m->unsent_req_list)) { if (test_and_clear_bit(Wpending, &m->wsched)) n = POLLOUT; else n = p9_fd_poll(m->client, NULL);
- if (n & POLLOUT) { + if ((n & POLLOUT) && + !test_and_set_bit(Wworksched, &m->wsched)) { p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); schedule_work(&m->wq); - } else - clear_bit(Wworksched, &m->wsched); - } else - clear_bit(Wworksched, &m->wsched); + } + }
return;
@@@ -795,28 -793,30 +795,28 @@@ static int p9_fd_open(struct p9_client static int p9_socket_open(struct p9_client *client, struct socket *csocket) { struct p9_trans_fd *p; - int ret, fd; + struct file *file; + int ret;
p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); if (!p) return -ENOMEM;
csocket->sk->sk_allocation = GFP_NOIO; - file = sock_alloc_file(csocket, 0); - fd = sock_map_fd(csocket, 0); - if (fd < 0) { ++ file = sock_alloc_file(csocket, 0, NULL); + if (IS_ERR(file)) { pr_err("%s (%d): failed to map fd\n", __func__, task_pid_nr(current)); sock_release(csocket); kfree(p); - return fd; + return PTR_ERR(file); }
- get_file(csocket->file); - get_file(csocket->file); - p->wr = p->rd = csocket->file; + get_file(file); + p->wr = p->rd = file; client->trans = p; client->status = Connected;
- sys_close(fd); /* still racy */ - p->rd->f_flags |= O_NONBLOCK;
p->conn = p9_conn_create(client); diff --combined net/batman-adv/bat_iv_ogm.c index 469daab,df79300..b02b75d --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@@ -166,13 -166,15 +166,15 @@@ static void batadv_iv_ogm_send_to_if(st int16_t buff_pos; struct batadv_ogm_packet *batadv_ogm_packet; struct sk_buff *skb; + uint8_t *packet_pos;
if (hard_iface->if_status != BATADV_IF_ACTIVE) return;
packet_num = 0; buff_pos = 0; - batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data; + packet_pos = forw_packet->skb->data; + batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
/* adjust all flags and log packets */ while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, @@@ -181,15 -183,17 +183,17 @@@ /* we might have aggregated direct link packets with an * ordinary base packet */ - if ((forw_packet->direct_link_flags & (1 << packet_num)) && - (forw_packet->if_incoming == hard_iface)) + if (forw_packet->direct_link_flags & BIT(packet_num) && + forw_packet->if_incoming == hard_iface) batadv_ogm_packet->flags |= BATADV_DIRECTLINK; else batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
- fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ? - "Sending own" : - "Forwarding")); + if (packet_num > 0 || !forw_packet->own) + fwd_str = "Forwarding"; + else + fwd_str = "Sending own"; + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", fwd_str, (packet_num > 0 ? "aggregated " : ""), @@@ -204,8 -208,8 +208,8 @@@ buff_pos += BATADV_OGM_HLEN; buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes); packet_num++; - batadv_ogm_packet = (struct batadv_ogm_packet *) - (forw_packet->skb->data + buff_pos); + packet_pos = forw_packet->skb->data + buff_pos; + batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; }
/* create clone because function is called more than once */ @@@ -227,9 -231,10 +231,10 @@@ static void batadv_iv_ogm_emit(struct b struct batadv_hard_iface *primary_if = NULL; struct batadv_ogm_packet *batadv_ogm_packet; unsigned char directlink; + uint8_t *packet_pos;
- batadv_ogm_packet = (struct batadv_ogm_packet *) - (forw_packet->skb->data); + packet_pos = forw_packet->skb->data; + batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
if (!forw_packet->if_incoming) { @@@ -454,6 -459,7 +459,7 @@@ static void batadv_iv_ogm_aggregate(str int packet_len, bool direct_link) { unsigned char *skb_buff; + unsigned long new_direct_link_flag;
skb_buff = skb_put(forw_packet_aggr->skb, packet_len); memcpy(skb_buff, packet_buff, packet_len); @@@ -461,9 -467,10 +467,10 @@@ forw_packet_aggr->num_packets++;
/* save packet direct link flag status */ - if (direct_link) - forw_packet_aggr->direct_link_flags |= - (1 << forw_packet_aggr->num_packets); + if (direct_link) { + new_direct_link_flag = BIT(forw_packet_aggr->num_packets); + forw_packet_aggr->direct_link_flags |= new_direct_link_flag; + } }
static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv, @@@ -586,6 -593,8 +593,8 @@@ static void batadv_iv_ogm_schedule(stru struct batadv_ogm_packet *batadv_ogm_packet; struct batadv_hard_iface *primary_if; int vis_server, tt_num_changes = 0; + uint32_t seqno; + uint8_t bandwidth;
vis_server = atomic_read(&bat_priv->vis_mode); primary_if = batadv_primary_if_get_selected(bat_priv); @@@ -599,12 -608,12 +608,12 @@@ batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
/* change sequence number to network order */ - batadv_ogm_packet->seqno = - htonl((uint32_t)atomic_read(&hard_iface->seqno)); + seqno = (uint32_t)atomic_read(&hard_iface->seqno); + batadv_ogm_packet->seqno = htonl(seqno); atomic_inc(&hard_iface->seqno);
- batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn); - batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc); + batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn); + batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc); if (tt_num_changes >= 0) batadv_ogm_packet->tt_num_changes = tt_num_changes;
@@@ -613,12 -622,13 +622,13 @@@ else batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
- if ((hard_iface == primary_if) && - (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER)) - batadv_ogm_packet->gw_flags = - (uint8_t)atomic_read(&bat_priv->gw_bandwidth); - else + if (hard_iface == primary_if && + atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) { + bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth); + batadv_ogm_packet->gw_flags = bandwidth; + } else { batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS; + }
batadv_slide_own_bcast_window(hard_iface); batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff, @@@ -642,9 -652,9 +652,10 @@@ batadv_iv_ogm_orig_update(struct batadv struct batadv_neigh_node *router = NULL; struct batadv_orig_node *orig_node_tmp; struct hlist_node *node; + int if_num; uint8_t sum_orig, sum_neigh; uint8_t *neigh_addr; + uint8_t tq_avg;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "update_originator(): Searching and updating originator entry of received packet\n"); @@@ -668,8 -678,8 +679,8 @@@ spin_lock_bh(&tmp_neigh_node->lq_update_lock); batadv_ring_buffer_set(tmp_neigh_node->tq_recv, &tmp_neigh_node->tq_index, 0); - tmp_neigh_node->tq_avg = - batadv_ring_buffer_avg(tmp_neigh_node->tq_recv); + tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv); + tmp_neigh_node->tq_avg = tq_avg; spin_unlock_bh(&tmp_neigh_node->lq_update_lock); }
@@@ -728,14 -738,12 +739,14 @@@ if (router && (neigh_node->tq_avg == router->tq_avg)) { orig_node_tmp = router->orig_node; spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); - sum_orig = orig_node_tmp->bcast_own_sum[if_incoming->if_num]; + if_num = router->if_incoming->if_num; + sum_orig = orig_node_tmp->bcast_own_sum[if_num]; spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
orig_node_tmp = neigh_node->orig_node; spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); - sum_neigh = orig_node_tmp->bcast_own_sum[if_incoming->if_num]; + if_num = neigh_node->if_incoming->if_num; + sum_neigh = orig_node_tmp->bcast_own_sum[if_num]; spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
if (sum_orig >= sum_neigh) @@@ -836,8 -844,10 +847,10 @@@ static int batadv_iv_ogm_calc_tq(struc spin_unlock_bh(&orig_node->ogm_cnt_lock);
/* pay attention to not get a value bigger than 100 % */ - total_count = (orig_eq_count > neigh_rq_count ? - neigh_rq_count : orig_eq_count); + if (orig_eq_count > neigh_rq_count) + total_count = neigh_rq_count; + else + total_count = orig_eq_count;
/* if we have too few packets (too less data) we set tq_own to zero * if we receive too few packets it is not considered bidirectional @@@ -911,6 -921,7 +924,7 @@@ batadv_iv_ogm_update_seqnos(const struc int set_mark, ret = -1; uint32_t seqno = ntohl(batadv_ogm_packet->seqno); uint8_t *neigh_addr; + uint8_t packet_count;
orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig); if (!orig_node) @@@ -945,9 -956,9 +959,9 @@@ tmp_neigh_node->real_bits, seq_diff, set_mark);
- tmp_neigh_node->real_packet_count = - bitmap_weight(tmp_neigh_node->real_bits, - BATADV_TQ_LOCAL_WINDOW_SIZE); + packet_count = bitmap_weight(tmp_neigh_node->real_bits, + BATADV_TQ_LOCAL_WINDOW_SIZE); + tmp_neigh_node->real_packet_count = packet_count; } rcu_read_unlock();
@@@ -1164,9 -1175,12 +1178,12 @@@ static void batadv_iv_ogm_process(cons /* if sender is a direct neighbor the sender mac equals * originator mac */ - orig_neigh_node = (is_single_hop_neigh ? - orig_node : - batadv_get_orig_node(bat_priv, ethhdr->h_source)); + if (is_single_hop_neigh) + orig_neigh_node = orig_node; + else + orig_neigh_node = batadv_get_orig_node(bat_priv, + ethhdr->h_source); + if (!orig_neigh_node) goto out;
@@@ -1252,6 -1266,7 +1269,7 @@@ static int batadv_iv_ogm_receive(struc int buff_pos = 0, packet_len; unsigned char *tt_buff, *packet_buff; bool ret; + uint8_t *packet_pos;
ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN); if (!ret) @@@ -1282,8 -1297,8 +1300,8 @@@ buff_pos += BATADV_OGM_HLEN; buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
- batadv_ogm_packet = (struct batadv_ogm_packet *) - (packet_buff + buff_pos); + packet_pos = packet_buff + buff_pos; + batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, batadv_ogm_packet->tt_num_changes));
diff --combined net/batman-adv/soft-interface.c index 21c5357,7b683e0..b9a28d2 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@@ -93,28 -93,32 +93,35 @@@ static int batadv_interface_release(str static struct net_device_stats *batadv_interface_stats(struct net_device *dev) { struct batadv_priv *bat_priv = netdev_priv(dev); - return &bat_priv->stats; + struct net_device_stats *stats = &bat_priv->stats; + + stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX); + stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES); + stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED); + stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX); + stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES); + return stats; }
static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) { struct batadv_priv *bat_priv = netdev_priv(dev); struct sockaddr *addr = p; + uint8_t old_addr[ETH_ALEN];
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
+ memcpy(old_addr, dev->dev_addr, ETH_ALEN); + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + /* only modify transtable if it has been initialized before */ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) { - batadv_tt_local_remove(bat_priv, dev->dev_addr, + batadv_tt_local_remove(bat_priv, old_addr, "mac address changed", false); batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX); }
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); dev->addr_assign_type &= ~NET_ADDR_RANDOM; return 0; } @@@ -145,6 -149,7 +152,7 @@@ static int batadv_interface_tx(struct s int data_len = skb->len, ret; short vid __maybe_unused = -1; bool do_bcast = false; + uint32_t seqno;
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) goto dropped; @@@ -226,8 -231,8 +234,8 @@@ primary_if->net_dev->dev_addr, ETH_ALEN);
/* set broadcast sequence number */ - bcast_packet->seqno = - htonl(atomic_inc_return(&bat_priv->bcast_seqno)); + seqno = atomic_inc_return(&bat_priv->bcast_seqno); + bcast_packet->seqno = htonl(seqno);
batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
@@@ -249,14 -254,14 +257,14 @@@ goto dropped_freed; }
- bat_priv->stats.tx_packets++; - bat_priv->stats.tx_bytes += data_len; + batadv_inc_counter(bat_priv, BATADV_CNT_TX); + batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len); goto end;
dropped: kfree_skb(skb); dropped_freed: - bat_priv->stats.tx_dropped++; + batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED); end: if (primary_if) batadv_hardif_free_ref(primary_if); @@@ -265,7 -270,7 +273,7 @@@
void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb, struct batadv_hard_iface *recv_if, - int hdr_size) + int hdr_size, struct batadv_orig_node *orig_node) { struct batadv_priv *bat_priv = netdev_priv(soft_iface); struct ethhdr *ethhdr; @@@ -311,11 -316,16 +319,16 @@@
/* skb->ip_summed = CHECKSUM_UNNECESSARY; */
- bat_priv->stats.rx_packets++; - bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; + batadv_inc_counter(bat_priv, BATADV_CNT_RX); + batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, + skb->len + ETH_HLEN);
soft_iface->last_rx = jiffies;
+ if (orig_node) + batadv_tt_add_temporary_global_entry(bat_priv, orig_node, + ethhdr->h_source); + if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) goto dropped;
@@@ -382,15 -392,22 +395,22 @@@ struct net_device *batadv_softif_create if (!soft_iface) goto out;
+ bat_priv = netdev_priv(soft_iface); + + /* batadv_interface_stats() needs to be available as soon as + * register_netdevice() has been called + */ + bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t)); + if (!bat_priv->bat_counters) + goto free_soft_iface; + ret = register_netdevice(soft_iface); if (ret < 0) { pr_err("Unable to register the batman interface '%s': %i\n", name, ret); - goto free_soft_iface; + goto free_bat_counters; }
- bat_priv = netdev_priv(soft_iface); - atomic_set(&bat_priv->aggregated_ogms, 1); atomic_set(&bat_priv->bonding, 0); atomic_set(&bat_priv->bridge_loop_avoidance, 0); @@@ -408,29 -425,26 +428,26 @@@
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); atomic_set(&bat_priv->bcast_seqno, 1); - atomic_set(&bat_priv->ttvn, 0); - atomic_set(&bat_priv->tt_local_changes, 0); - atomic_set(&bat_priv->tt_ogm_append_cnt, 0); - atomic_set(&bat_priv->bla_num_requests, 0); - - bat_priv->tt_buff = NULL; - bat_priv->tt_buff_len = 0; - bat_priv->tt_poss_change = false; + atomic_set(&bat_priv->tt.vn, 0); + atomic_set(&bat_priv->tt.local_changes, 0); + atomic_set(&bat_priv->tt.ogm_append_cnt, 0); + #ifdef CONFIG_BATMAN_ADV_BLA + atomic_set(&bat_priv->bla.num_requests, 0); + #endif + bat_priv->tt.last_changeset = NULL; + bat_priv->tt.last_changeset_len = 0; + bat_priv->tt.poss_change = false;
bat_priv->primary_if = NULL; bat_priv->num_ifaces = 0;
- bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t)); - if (!bat_priv->bat_counters) - goto unreg_soft_iface; - ret = batadv_algo_select(bat_priv, batadv_routing_algo); if (ret < 0) - goto free_bat_counters; + goto unreg_soft_iface;
ret = batadv_sysfs_add_meshif(soft_iface); if (ret < 0) - goto free_bat_counters; + goto unreg_soft_iface;
ret = batadv_debugfs_add_meshif(soft_iface); if (ret < 0) @@@ -446,12 -460,13 +463,13 @@@ unreg_debugfs batadv_debugfs_del_meshif(soft_iface); unreg_sysfs: batadv_sysfs_del_meshif(soft_iface); - free_bat_counters: - free_percpu(bat_priv->bat_counters); unreg_soft_iface: + free_percpu(bat_priv->bat_counters); unregister_netdevice(soft_iface); return NULL;
+ free_bat_counters: + free_percpu(bat_priv->bat_counters); free_soft_iface: free_netdev(soft_iface); out: @@@ -521,6 -536,11 +539,11 @@@ static u32 batadv_get_link(struct net_d static const struct { const char name[ETH_GSTRING_LEN]; } batadv_counters_strings[] = { + { "tx" }, + { "tx_bytes" }, + { "tx_dropped" }, + { "rx" }, + { "rx_bytes" }, { "forward" }, { "forward_bytes" }, { "mgmt_tx" }, diff --combined net/bluetooth/bnep/sock.c index 1eaacf1,5b6cc0b..e7154a5 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c @@@ -29,6 -29,10 +29,10 @@@
#include "bnep.h"
+ static struct bt_sock_list bnep_sk_list = { + .lock = __RW_LOCK_UNLOCKED(bnep_sk_list.lock) + }; + static int bnep_sock_release(struct socket *sock) { struct sock *sk = sock->sk; @@@ -38,6 -42,8 +42,8 @@@ if (!sk) return 0;
+ bt_sock_unlink(&bnep_sk_list, sk); + sock_orphan(sk); sock_put(sk); return 0; @@@ -58,7 -64,7 +64,7 @@@ static int bnep_sock_ioctl(struct socke switch (cmd) { case BNEPCONNADD: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; @@@ -84,7 -90,7 +90,7 @@@
case BNEPCONNDEL: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; @@@ -204,6 -210,7 +210,7 @@@ static int bnep_sock_create(struct net sk->sk_protocol = protocol; sk->sk_state = BT_OPEN;
+ bt_sock_link(&bnep_sk_list, sk); return 0; }
@@@ -222,19 -229,30 +229,30 @@@ int __init bnep_sock_init(void return err;
err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops); - if (err < 0) + if (err < 0) { + BT_ERR("Can't register BNEP socket"); goto error; + } + + err = bt_procfs_init(THIS_MODULE, &init_net, "bnep", &bnep_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create BNEP proc file"); + bt_sock_unregister(BTPROTO_BNEP); + goto error; + } + + BT_INFO("BNEP socket layer initialized");
return 0;
error: - BT_ERR("Can't register BNEP socket"); proto_unregister(&bnep_proto); return err; }
void __exit bnep_sock_cleanup(void) { + bt_procfs_cleanup(&init_net, "bnep"); if (bt_sock_unregister(BTPROTO_BNEP) < 0) BT_ERR("Can't unregister BNEP socket");
diff --combined net/bluetooth/cmtp/sock.c index 32dc83d,d5cacef..aacb802 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c @@@ -42,6 -42,10 +42,10 @@@
#include "cmtp.h"
+ static struct bt_sock_list cmtp_sk_list = { + .lock = __RW_LOCK_UNLOCKED(cmtp_sk_list.lock) + }; + static int cmtp_sock_release(struct socket *sock) { struct sock *sk = sock->sk; @@@ -51,6 -55,8 +55,8 @@@ if (!sk) return 0;
+ bt_sock_unlink(&cmtp_sk_list, sk); + sock_orphan(sk); sock_put(sk);
@@@ -72,7 -78,7 +78,7 @@@ static int cmtp_sock_ioctl(struct socke switch (cmd) { case CMTPCONNADD: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; @@@ -97,7 -103,7 +103,7 @@@
case CMTPCONNDEL: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; @@@ -214,6 -220,8 +220,8 @@@ static int cmtp_sock_create(struct net sk->sk_protocol = protocol; sk->sk_state = BT_OPEN;
+ bt_sock_link(&cmtp_sk_list, sk); + return 0; }
@@@ -232,19 -240,30 +240,30 @@@ int cmtp_init_sockets(void return err;
err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops); - if (err < 0) + if (err < 0) { + BT_ERR("Can't register CMTP socket"); goto error; + } + + err = bt_procfs_init(THIS_MODULE, &init_net, "cmtp", &cmtp_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create CMTP proc file"); + bt_sock_unregister(BTPROTO_HIDP); + goto error; + } + + BT_INFO("CMTP socket layer initialized");
return 0;
error: - BT_ERR("Can't register CMTP socket"); proto_unregister(&cmtp_proto); return err; }
void cmtp_cleanup_sockets(void) { + bt_procfs_cleanup(&init_net, "cmtp"); if (bt_sock_unregister(BTPROTO_CMTP) < 0) BT_ERR("Can't unregister CMTP socket");
diff --combined net/bluetooth/hci_core.c index 0b997c8,fa974a1..8806869 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@@ -696,7 -696,8 +696,8 @@@ int hci_dev_open(__u16 dev hci_dev_hold(hdev); set_bit(HCI_UP, &hdev->flags); hci_notify(hdev, HCI_DEV_UP); - if (!test_bit(HCI_SETUP, &hdev->dev_flags)) { + if (!test_bit(HCI_SETUP, &hdev->dev_flags) && + mgmt_valid_hdev(hdev)) { hci_dev_lock(hdev); mgmt_powered(hdev, 1); hci_dev_unlock(hdev); @@@ -734,8 -735,6 +735,8 @@@ static int hci_dev_do_close(struct hci_
cancel_work_sync(&hdev->le_scan);
+ cancel_delayed_work(&hdev->power_off); + hci_req_cancel(hdev, ENODEV); hci_req_lock(hdev);
@@@ -799,7 -798,8 +800,8 @@@ * and no tasks are scheduled. */ hdev->close(hdev);
- if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { + if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) && + mgmt_valid_hdev(hdev)) { hci_dev_lock(hdev); mgmt_powered(hdev, 0); hci_dev_unlock(hdev); diff --combined net/bluetooth/hci_sock.c index d5ace1e,bb64331..07f0739 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@@ -490,7 -490,7 +490,7 @@@ static int hci_sock_bound_ioctl(struct switch (cmd) { case HCISETRAW: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return -EPERM; @@@ -510,12 -510,12 +510,12 @@@
case HCIBLOCKADDR: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_sock_blacklist_add(hdev, (void __user *) arg);
case HCIUNBLOCKADDR: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_sock_blacklist_del(hdev, (void __user *) arg);
default: @@@ -546,22 -546,22 +546,22 @@@ static int hci_sock_ioctl(struct socke
case HCIDEVUP: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_open(arg);
case HCIDEVDOWN: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_close(arg);
case HCIDEVRESET: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_reset(arg);
case HCIDEVRESTAT: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_reset_stat(arg);
case HCISETSCAN: @@@ -573,7 -573,7 +573,7 @@@ case HCISETACLMTU: case HCISETSCOMTU: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_cmd(cmd, argp);
case HCIINQUIRY: @@@ -1102,21 -1102,30 +1102,30 @@@ int __init hci_sock_init(void return err;
err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops); - if (err < 0) + if (err < 0) { + BT_ERR("HCI socket registration failed"); goto error; + } + + err = bt_procfs_init(THIS_MODULE, &init_net, "hci", &hci_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create HCI proc file"); + bt_sock_unregister(BTPROTO_HCI); + goto error; + }
BT_INFO("HCI socket layer initialized");
return 0;
error: - BT_ERR("HCI socket registration failed"); proto_unregister(&hci_sk_proto); return err; }
void hci_sock_cleanup(void) { + bt_procfs_cleanup(&init_net, "hci"); if (bt_sock_unregister(BTPROTO_HCI) < 0) BT_ERR("HCI socket unregistration failed");
diff --combined net/bluetooth/hidp/sock.c index b24fb3b,eca3889..82a829d --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c @@@ -25,6 -25,10 +25,10 @@@
#include "hidp.h"
+ static struct bt_sock_list hidp_sk_list = { + .lock = __RW_LOCK_UNLOCKED(hidp_sk_list.lock) + }; + static int hidp_sock_release(struct socket *sock) { struct sock *sk = sock->sk; @@@ -34,6 -38,8 +38,8 @@@ if (!sk) return 0;
+ bt_sock_unlink(&hidp_sk_list, sk); + sock_orphan(sk); sock_put(sk);
@@@ -56,7 -62,7 +62,7 @@@ static int hidp_sock_ioctl(struct socke switch (cmd) { case HIDPCONNADD: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; @@@ -91,7 -97,7 +97,7 @@@
case HIDPCONNDEL: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; @@@ -253,6 -259,8 +259,8 @@@ static int hidp_sock_create(struct net sk->sk_protocol = protocol; sk->sk_state = BT_OPEN;
+ bt_sock_link(&hidp_sk_list, sk); + return 0; }
@@@ -271,8 -279,19 +279,19 @@@ int __init hidp_init_sockets(void return err;
err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops); - if (err < 0) + if (err < 0) { + BT_ERR("Can't register HIDP socket"); goto error; + } + + err = bt_procfs_init(THIS_MODULE, &init_net, "hidp", &hidp_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create HIDP proc file"); + bt_sock_unregister(BTPROTO_HIDP); + goto error; + } + + BT_INFO("HIDP socket layer initialized");
return 0;
@@@ -284,6 -303,7 +303,7 @@@ error
void __exit hidp_cleanup_sockets(void) { + bt_procfs_cleanup(&init_net, "hidp"); if (bt_sock_unregister(BTPROTO_HIDP) < 0) BT_ERR("Can't unregister HIDP socket");
diff --combined net/bluetooth/l2cap_core.c index 38c00f1,e0abaf3..bda526e --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@@ -416,13 -416,30 +416,30 @@@ struct l2cap_chan *l2cap_chan_create(vo return chan; }
- void l2cap_chan_destroy(struct l2cap_chan *chan) + static void l2cap_chan_destroy(struct l2cap_chan *chan) { + BT_DBG("chan %p", chan); + write_lock(&chan_list_lock); list_del(&chan->global_l); write_unlock(&chan_list_lock);
- l2cap_chan_put(chan); + kfree(chan); + } + + void l2cap_chan_hold(struct l2cap_chan *c) + { + BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt)); + + atomic_inc(&c->refcnt); + } + + void l2cap_chan_put(struct l2cap_chan *c) + { + BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt)); + + if (atomic_dec_and_test(&c->refcnt)) + l2cap_chan_destroy(c); }
void l2cap_chan_set_defaults(struct l2cap_chan *chan) @@@ -1008,7 -1025,7 +1025,7 @@@ static void l2cap_send_disconn_req(stru if (!conn) return;
- if (chan->mode == L2CAP_MODE_ERTM) { + if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) { __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); @@@ -5331,7 -5348,7 +5348,7 @@@ int l2cap_connect_ind(struct hci_dev *h return exact ? lm1 : lm2; }
- int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) + void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { struct l2cap_conn *conn;
@@@ -5344,7 -5361,6 +5361,6 @@@ } else l2cap_conn_del(hcon, bt_to_errno(status));
- return 0; }
int l2cap_disconn_ind(struct hci_conn *hcon) @@@ -5358,12 -5374,11 +5374,11 @@@ return conn->disc_reason; }
- int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) + void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) { BT_DBG("hcon %p reason %d", hcon, reason);
l2cap_conn_del(hcon, bt_to_errno(reason)); - return 0; }
static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) @@@ -5406,6 -5421,11 +5421,11 @@@ int l2cap_security_cfm(struct hci_conn BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid, state_to_string(chan->state));
+ if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { + l2cap_chan_unlock(chan); + continue; + } + if (chan->scid == L2CAP_CID_LE_DATA) { if (!status && encrypt) { chan->sec_level = hcon->sec_level; diff --combined net/bluetooth/mgmt.c index eba022d,a3329cb..e329631 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@@ -193,6 -193,11 +193,11 @@@ static u8 mgmt_status_table[] = MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */ };
+ bool mgmt_valid_hdev(struct hci_dev *hdev) + { + return hdev->dev_type == HCI_BREDR; + } + static u8 mgmt_status(u8 hci_status) { if (hci_status < ARRAY_SIZE(mgmt_status_table)) @@@ -317,7 -322,6 +322,6 @@@ static int read_index_list(struct sock u16 data_len) { struct mgmt_rp_read_index_list *rp; - struct list_head *p; struct hci_dev *d; size_t rp_len; u16 count; @@@ -328,7 -332,10 +332,10 @@@ read_lock(&hci_dev_list_lock);
count = 0; - list_for_each(p, &hci_dev_list) { + list_for_each_entry(d, &hci_dev_list, list) { + if (!mgmt_valid_hdev(d)) + continue; + count++; }
@@@ -346,6 -353,9 +353,9 @@@ if (test_bit(HCI_SETUP, &d->dev_flags)) continue;
+ if (!mgmt_valid_hdev(d)) + continue; + rp->index[i++] = cpu_to_le16(d->id); BT_DBG("Added hci%u", d->id); } @@@ -370,10 -380,10 +380,10 @@@ static u32 get_supported_settings(struc settings |= MGMT_SETTING_DISCOVERABLE; settings |= MGMT_SETTING_PAIRABLE;
- if (hdev->features[6] & LMP_SIMPLE_PAIR) + if (lmp_ssp_capable(hdev)) settings |= MGMT_SETTING_SSP;
- if (!(hdev->features[4] & LMP_NO_BREDR)) { + if (lmp_bredr_capable(hdev)) { settings |= MGMT_SETTING_BREDR; settings |= MGMT_SETTING_LINK_SECURITY; } @@@ -381,7 -391,7 +391,7 @@@ if (enable_hs) settings |= MGMT_SETTING_HS;
- if (hdev->features[4] & LMP_LE) + if (lmp_le_capable(hdev)) settings |= MGMT_SETTING_LE;
return settings; @@@ -403,7 -413,7 +413,7 @@@ static u32 get_current_settings(struct if (test_bit(HCI_PAIRABLE, &hdev->dev_flags)) settings |= MGMT_SETTING_PAIRABLE;
- if (!(hdev->features[4] & LMP_NO_BREDR)) + if (lmp_bredr_capable(hdev)) settings |= MGMT_SETTING_BREDR;
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) @@@ -1111,7 -1121,7 +1121,7 @@@ static int set_ssp(struct sock *sk, str
hci_dev_lock(hdev);
- if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) { + if (!lmp_ssp_capable(hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, MGMT_STATUS_NOT_SUPPORTED); goto failed; @@@ -1195,7 -1205,7 +1205,7 @@@ static int set_le(struct sock *sk, stru
hci_dev_lock(hdev);
- if (!(hdev->features[4] & LMP_LE)) { + if (!lmp_le_capable(hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE, MGMT_STATUS_NOT_SUPPORTED); goto unlock; @@@ -2191,7 -2201,7 +2201,7 @@@ static int read_local_oob_data(struct s goto unlock; }
- if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) { + if (!lmp_ssp_capable(hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_NOT_SUPPORTED); goto unlock; @@@ -2820,6 -2830,9 +2830,9 @@@ static void cmd_status_rsp(struct pendi
int mgmt_index_added(struct hci_dev *hdev) { + if (!mgmt_valid_hdev(hdev)) + return -ENOTSUPP; + return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); }
@@@ -2827,6 -2840,9 +2840,9 @@@ int mgmt_index_removed(struct hci_dev * { u8 status = MGMT_STATUS_INVALID_INDEX;
+ if (!mgmt_valid_hdev(hdev)) + return -ENOTSUPP; + mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); @@@ -2875,22 -2891,6 +2891,22 @@@ int mgmt_powered(struct hci_dev *hdev, if (scan) hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { + u8 ssp = 1; + + hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp); + } + + if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { + struct hci_cp_write_le_host_supported cp; + + cp.le = 1; + cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); + + hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, + sizeof(cp), &cp); + } + update_class(hdev); update_name(hdev, hdev->dev_name); update_eir(hdev); diff --combined net/core/dev.c index 89e33a5,707b124..3e645f3 --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -959,18 -959,30 +959,30 @@@ int dev_alloc_name(struct net_device *d } EXPORT_SYMBOL(dev_alloc_name);
- static int dev_get_valid_name(struct net_device *dev, const char *name) + static int dev_alloc_name_ns(struct net *net, + struct net_device *dev, + const char *name) { - struct net *net; + char buf[IFNAMSIZ]; + int ret;
- BUG_ON(!dev_net(dev)); - net = dev_net(dev); + ret = __dev_alloc_name(net, name, buf); + if (ret >= 0) + strlcpy(dev->name, buf, IFNAMSIZ); + return ret; + } + + static int dev_get_valid_name(struct net *net, + struct net_device *dev, + const char *name) + { + BUG_ON(!net);
if (!dev_valid_name(name)) return -EINVAL;
if (strchr(name, '%')) - return dev_alloc_name(dev, name); + return dev_alloc_name_ns(net, dev, name); else if (__dev_get_by_name(net, name)) return -EEXIST; else if (dev->name != name) @@@ -1006,7 -1018,7 +1018,7 @@@ int dev_change_name(struct net_device *
memcpy(oldname, dev->name, IFNAMSIZ);
- err = dev_get_valid_name(dev, newname); + err = dev_get_valid_name(net, dev, newname); if (err < 0) return err;
@@@ -1109,11 -1121,23 +1121,23 @@@ void netdev_state_change(struct net_dev } EXPORT_SYMBOL(netdev_state_change);
- int netdev_bonding_change(struct net_device *dev, unsigned long event) + /** + * netdev_notify_peers - notify network peers about existence of @dev + * @dev: network device + * + * Generate traffic such that interested network peers are aware of + * @dev, such as by generating a gratuitous ARP. This may be used when + * a device wants to inform the rest of the network about some sort of + * reconfiguration such as a failover event or virtual machine + * migration. + */ + void netdev_notify_peers(struct net_device *dev) { - return call_netdevice_notifiers(event, dev); + rtnl_lock(); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); + rtnl_unlock(); } - EXPORT_SYMBOL(netdev_bonding_change); + EXPORT_SYMBOL(netdev_notify_peers);
/** * dev_load - load a network module @@@ -1394,7 -1418,6 +1418,6 @@@ rollback nb->notifier_call(nb, NETDEV_DOWN, dev); } nb->notifier_call(nb, NETDEV_UNREGISTER, dev); - nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); } }
@@@ -1436,7 -1459,6 +1459,6 @@@ int unregister_netdevice_notifier(struc nb->notifier_call(nb, NETDEV_DOWN, dev); } nb->notifier_call(nb, NETDEV_UNREGISTER, dev); - nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); } } unlock: @@@ -2134,8 -2156,7 +2156,8 @@@ static bool can_checksum_protocol(netde static netdev_features_t harmonize_features(struct sk_buff *skb, __be16 protocol, netdev_features_t features) { - if (!can_checksum_protocol(features, protocol)) { + if (skb->ip_summed != CHECKSUM_NONE && + !can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; features &= ~NETIF_F_SG; } else if (illegal_highdma(skb->dev, skb)) { @@@ -2175,9 -2196,7 +2197,7 @@@ EXPORT_SYMBOL(netif_skb_features) /* * Returns true if either: * 1. skb has frag_list and the device doesn't support FRAGLIST, or - * 2. skb is fragmented and the device does not support SG, or if - * at least one of fragments is in highmem and device does not - * support DMA from it. + * 2. skb is fragmented and the device does not support SG. */ static inline int skb_needs_linearize(struct sk_buff *skb, int features) @@@ -2206,9 -2225,6 +2226,6 @@@ int dev_hard_start_xmit(struct sk_buff if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(skb);
- if (!list_empty(&ptype_all)) - dev_queue_xmit_nit(skb, dev); - features = netif_skb_features(skb);
if (vlan_tx_tag_present(skb) && @@@ -2243,6 -2259,9 +2260,9 @@@ } }
+ if (!list_empty(&ptype_all)) + dev_queue_xmit_nit(skb, dev); + skb_len = skb->len; rc = ops->ndo_start_xmit(skb, dev); trace_net_dev_xmit(skb, rc, dev, skb_len); @@@ -2265,6 -2284,9 +2285,9 @@@ gso if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(nskb);
+ if (!list_empty(&ptype_all)) + dev_queue_xmit_nit(nskb, dev); + skb_len = nskb->len; rc = ops->ndo_start_xmit(nskb, dev); trace_net_dev_xmit(nskb, rc, dev, skb_len); @@@ -2374,8 -2396,8 +2397,8 @@@ static inline int get_xps_queue(struct #endif }
- static struct netdev_queue *dev_pick_tx(struct net_device *dev, - struct sk_buff *skb) + struct netdev_queue *netdev_pick_tx(struct net_device *dev, + struct sk_buff *skb) { int queue_index; const struct net_device_ops *ops = dev->netdev_ops; @@@ -2549,7 -2571,7 +2572,7 @@@ int dev_queue_xmit(struct sk_buff *skb
skb_update_prio(skb);
- txq = dev_pick_tx(dev, skb); + txq = netdev_pick_tx(dev, skb); q = rcu_dereference_bh(txq->qdisc);
#ifdef CONFIG_NET_CLS_ACT @@@ -3323,7 -3345,7 +3346,7 @@@ ncls
if (pt_prev) { if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) - ret = -ENOMEM; + goto drop; else ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } else { @@@ -4512,8 -4534,8 +4535,8 @@@ static void dev_change_rx_flags(struct static int __dev_set_promiscuity(struct net_device *dev, int inc) { unsigned int old_flags = dev->flags; - uid_t uid; - gid_t gid; + kuid_t uid; + kgid_t gid;
ASSERT_RTNL();
@@@ -4545,7 -4567,8 +4568,8 @@@ dev->name, (dev->flags & IFF_PROMISC), (old_flags & IFF_PROMISC), audit_get_loginuid(current), - uid, gid, + from_kuid(&init_user_ns, uid), + from_kgid(&init_user_ns, gid), audit_get_sessionid(current)); }
@@@ -5238,12 -5261,12 +5262,12 @@@ int dev_ioctl(struct net *net, unsigne */ static int dev_new_index(struct net *net) { - static int ifindex; + int ifindex = net->ifindex; for (;;) { if (++ifindex <= 0) ifindex = 1; if (!__dev_get_by_index(net, ifindex)) - return ifindex; + return net->ifindex = ifindex; } }
@@@ -5321,10 -5344,6 +5345,6 @@@ static void rollback_registered_many(st netdev_unregister_kobject(dev); }
- /* Process any work delayed until the end of the batch */ - dev = list_first_entry(head, struct net_device, unreg_list); - call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); - synchronize_net();
list_for_each_entry(dev, head, unreg_list) @@@ -5582,7 -5601,7 +5602,7 @@@ int register_netdevice(struct net_devic
dev->iflink = -1;
- ret = dev_get_valid_name(dev, dev->name); + ret = dev_get_valid_name(net, dev, dev->name); if (ret < 0) goto out;
@@@ -5596,7 -5615,12 +5616,12 @@@ } }
- dev->ifindex = dev_new_index(net); + ret = -EBUSY; + if (!dev->ifindex) + dev->ifindex = dev_new_index(net); + else if (__dev_get_by_index(net, dev->ifindex)) + goto err_uninit; + if (dev->iflink == -1) dev->iflink = dev->ifindex;
@@@ -5639,6 -5663,8 +5664,8 @@@
set_bit(__LINK_STATE_PRESENT, &dev->state);
+ linkwatch_init_dev(dev); + dev_init_scheduler(dev); dev_hold(dev); list_netdevice(dev); @@@ -5772,9 -5798,12 +5799,12 @@@ static void netdev_wait_allrefs(struct
/* Rebroadcast unregister notification */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); - /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users - * should have already handle it the first time */
+ __rtnl_unlock(); + rcu_barrier(); + rtnl_lock(); + + call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); if (test_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { /* We must not have linkwatch events @@@ -5836,9 -5865,8 +5866,8 @@@ void netdev_run_todo(void
__rtnl_unlock();
- /* Wait for rcu callbacks to finish before attempting to drain - * the device list. This usually avoids a 250ms wait. - */ + + /* Wait for rcu callbacks to finish before next phase */ if (!list_empty(&list)) rcu_barrier();
@@@ -5847,6 -5875,10 +5876,10 @@@ = list_first_entry(&list, struct net_device, todo_list); list_del(&dev->todo_list);
+ rtnl_lock(); + call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); + __rtnl_unlock(); + if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { pr_err("network todo '%s' but state %d\n", dev->name, dev->reg_state); @@@ -5942,6 -5974,8 +5975,8 @@@ struct netdev_queue *dev_ingress_queue_ return queue; }
+ static const struct ethtool_ops default_ethtool_ops; + /** * alloc_netdev_mqs - allocate network device * @sizeof_priv: size of private data to allocate space for @@@ -6029,6 -6063,8 +6064,8 @@@ struct net_device *alloc_netdev_mqs(in
strcpy(dev->name, name); dev->group = INIT_NETDEV_GROUP; + if (!dev->ethtool_ops) + dev->ethtool_ops = &default_ethtool_ops; return dev;
free_all: @@@ -6213,7 -6249,7 +6250,7 @@@ int dev_change_net_namespace(struct net /* We get here if we can't use the current device name */ if (!pat) goto out; - if (dev_get_valid_name(dev, pat) < 0) + if (dev_get_valid_name(net, dev, pat) < 0) goto out; }
@@@ -6241,7 -6277,8 +6278,8 @@@ the device is just moving and can keep their slaves up. */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); - call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); + rcu_barrier(); + call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev); rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
/* diff --combined net/core/netprio_cgroup.c index 5ffd084,45c503e..2aa08c2 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@@ -73,7 -73,6 +73,6 @@@ static int extend_netdev_table(struct n ((sizeof(u32) * new_len)); struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL); struct netprio_map *old_priomap; - int i;
old_priomap = rtnl_dereference(dev->priomap);
@@@ -82,10 -81,10 +81,10 @@@ return -ENOMEM; }
- for (i = 0; - old_priomap && (i < old_priomap->priomap_len); - i++) - new_priomap->priomap[i] = old_priomap->priomap[i]; + if (old_priomap) + memcpy(new_priomap->priomap, old_priomap->priomap, + old_priomap->priomap_len * + sizeof(old_priomap->priomap[0]));
new_priomap->priomap_len = new_len;
@@@ -109,32 -108,6 +108,6 @@@ static int write_update_netdev_table(st return ret; }
- static int update_netdev_tables(void) - { - int ret = 0; - struct net_device *dev; - u32 max_len; - struct netprio_map *map; - - rtnl_lock(); - max_len = atomic_read(&max_prioidx) + 1; - for_each_netdev(&init_net, dev) { - map = rtnl_dereference(dev->priomap); - /* - * don't allocate priomap if we didn't - * change net_prio.ifpriomap (map == NULL), - * this will speed up skb_update_prio. - */ - if (map && map->priomap_len < max_len) { - ret = extend_netdev_table(dev, max_len); - if (ret < 0) - break; - } - } - rtnl_unlock(); - return ret; - } - static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) { struct cgroup_netprio_state *cs; @@@ -153,12 -126,6 +126,6 @@@ goto out; }
- ret = update_netdev_tables(); - if (ret < 0) { - put_prioidx(cs->prioidx); - goto out; - } - return &cs->css; out: kfree(cs); @@@ -272,24 -239,38 +239,24 @@@ out_free_devname return ret; }
+static int update_netprio(const void *v, struct file *file, unsigned n) +{ + int err; + struct socket *sock = sock_from_file(file, &err); + if (sock) + sock->sk->sk_cgrp_prioidx = (u32)(unsigned long)v; + return 0; +} + void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) { struct task_struct *p; + void *v;
cgroup_taskset_for_each(p, cgrp, tset) { - unsigned int fd; - struct fdtable *fdt; - struct files_struct *files; - task_lock(p); - files = p->files; - if (!files) { - task_unlock(p); - continue; - } - - spin_lock(&files->file_lock); - fdt = files_fdtable(files); - for (fd = 0; fd < fdt->max_fds; fd++) { - struct file *file; - struct socket *sock; - int err; - - file = fcheck_files(files, fd); - if (!file) - continue; - - sock = sock_from_file(file, &err); - if (sock) - sock_update_netprioidx(sock->sk, p); - } - spin_unlock(&files->file_lock); + v = (void *)(unsigned long)task_netprioidx(p); + iterate_fd(p->files, 0, update_netprio, v); task_unlock(p); } } diff --combined net/core/scm.c index b0098d2,9c1c63d..ab57084 --- a/net/core/scm.c +++ b/net/core/scm.c @@@ -45,12 -45,17 +45,17 @@@ static __inline__ int scm_check_creds(struct ucred *creds) { const struct cred *cred = current_cred(); + kuid_t uid = make_kuid(cred->user_ns, creds->uid); + kgid_t gid = make_kgid(cred->user_ns, creds->gid); + + if (!uid_valid(uid) || !gid_valid(gid)) + return -EINVAL;
if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) && - ((creds->uid == cred->uid || creds->uid == cred->euid || - creds->uid == cred->suid) || capable(CAP_SETUID)) && - ((creds->gid == cred->gid || creds->gid == cred->egid || - creds->gid == cred->sgid) || capable(CAP_SETGID))) { + ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || + uid_eq(uid, cred->suid)) || capable(CAP_SETUID)) && + ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || + gid_eq(gid, cred->sgid)) || capable(CAP_SETGID))) { return 0; } return -EPERM; @@@ -149,39 -154,54 +154,54 @@@ int __scm_send(struct socket *sock, str goto error; break; case SCM_CREDENTIALS: + { + struct ucred creds; + kuid_t uid; + kgid_t gid; if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred))) goto error; - memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred)); - err = scm_check_creds(&p->creds); + memcpy(&creds, CMSG_DATA(cmsg), sizeof(struct ucred)); + err = scm_check_creds(&creds); if (err) goto error;
- if (!p->pid || pid_vnr(p->pid) != p->creds.pid) { + p->creds.pid = creds.pid; + if (!p->pid || pid_vnr(p->pid) != creds.pid) { struct pid *pid; err = -ESRCH; - pid = find_get_pid(p->creds.pid); + pid = find_get_pid(creds.pid); if (!pid) goto error; put_pid(p->pid); p->pid = pid; }
+ err = -EINVAL; + uid = make_kuid(current_user_ns(), creds.uid); + gid = make_kgid(current_user_ns(), creds.gid); + if (!uid_valid(uid) || !gid_valid(gid)) + goto error; + + p->creds.uid = uid; + p->creds.gid = gid; + if (!p->cred || - (p->cred->euid != p->creds.uid) || - (p->cred->egid != p->creds.gid)) { + !uid_eq(p->cred->euid, uid) || + !gid_eq(p->cred->egid, gid)) { struct cred *cred; err = -ENOMEM; cred = prepare_creds(); if (!cred) goto error;
- cred->uid = cred->euid = p->creds.uid; - cred->gid = cred->egid = p->creds.gid; + cred->uid = cred->euid = uid; + cred->gid = cred->egid = gid; if (p->cred) put_cred(p->cred); p->cred = cred; } break; + } default: goto error; } @@@ -281,10 -301,11 +301,10 @@@ void scm_detach_fds(struct msghdr *msg break; } /* Bump the usage count and install the file. */ sock = sock_from_file(fp[i], &err); if (sock) sock_update_netprioidx(sock->sk, current); - fd_install(new_fd, fp[i]); + fd_install(new_fd, get_file(fp[i])); }
if (i > 0) diff --combined net/core/skbuff.c index e33ebae,2ede3cf..2f50849 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -1655,38 -1655,19 +1655,19 @@@ static struct page *linear_to_page(stru unsigned int *offset, struct sk_buff *skb, struct sock *sk) { - struct page *p = sk->sk_sndmsg_page; - unsigned int off; + struct page_frag *pfrag = sk_page_frag(sk);
- if (!p) { - new_page: - p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); - if (!p) - return NULL; - - off = sk->sk_sndmsg_off = 0; - /* hold one ref to this page until it's full */ - } else { - unsigned int mlen; - - /* If we are the only user of the page, we can reset offset */ - if (page_count(p) == 1) - sk->sk_sndmsg_off = 0; - off = sk->sk_sndmsg_off; - mlen = PAGE_SIZE - off; - if (mlen < 64 && mlen < *len) { - put_page(p); - goto new_page; - } + if (!sk_page_frag_refill(sk, pfrag)) + return NULL;
- *len = min_t(unsigned int, *len, mlen); - } + *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
- memcpy(page_address(p) + off, page_address(page) + *offset, *len); - sk->sk_sndmsg_off += *len; - *offset = off; + memcpy(page_address(pfrag->page) + pfrag->offset, + page_address(page) + *offset, *len); + *offset = pfrag->offset; + pfrag->offset += *len;
- return p; + return pfrag->page; }
static bool spd_can_coalesce(const struct splice_pipe_desc *spd, @@@ -3502,9 -3483,7 +3483,9 @@@ bool skb_try_coalesce(struct sk_buff *t if (!skb_cloned(from)) skb_shinfo(from)->nr_frags = 0;
- /* if the skb is cloned this does nothing since we set nr_frags to 0 */ + /* if the skb is not cloned this does nothing + * since we set nr_frags to 0. + */ for (i = 0; i < skb_shinfo(from)->nr_frags; i++) skb_frag_ref(from, i);
diff --combined net/core/sock.c index a6000fb,727114c..16ff426 --- a/net/core/sock.c +++ b/net/core/sock.c @@@ -691,8 -691,7 +691,8 @@@ set_rcvbuf
case SO_KEEPALIVE: #ifdef CONFIG_INET - if (sk->sk_protocol == IPPROTO_TCP) + if (sk->sk_protocol == IPPROTO_TCP && + sk->sk_type == SOCK_STREAM) tcp_set_keepalive(sk, valbool); #endif sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); @@@ -869,8 -868,8 +869,8 @@@ void cred_to_ucred(struct pid *pid, con if (cred) { struct user_namespace *current_ns = current_user_ns();
- ucred->uid = from_kuid(current_ns, cred->euid); - ucred->gid = from_kgid(current_ns, cred->egid); + ucred->uid = from_kuid_munged(current_ns, cred->euid); + ucred->gid = from_kgid_munged(current_ns, cred->egid); } } EXPORT_SYMBOL_GPL(cred_to_ucred); @@@ -1231,7 -1230,7 +1231,7 @@@ void sock_update_classid(struct sock *s rcu_read_lock(); /* doing current task, which cannot vanish. */ classid = task_cls_classid(current); rcu_read_unlock(); - if (classid && classid != sk->sk_classid) + if (classid != sk->sk_classid) sk->sk_classid = classid; } EXPORT_SYMBOL(sock_update_classid); @@@ -1535,12 -1534,12 +1535,12 @@@ void sock_edemux(struct sk_buff *skb } EXPORT_SYMBOL(sock_edemux);
- int sock_i_uid(struct sock *sk) + kuid_t sock_i_uid(struct sock *sk) { - int uid; + kuid_t uid;
read_lock_bh(&sk->sk_callback_lock); - uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; + uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; read_unlock_bh(&sk->sk_callback_lock); return uid; } @@@ -1745,6 -1744,45 +1745,45 @@@ struct sk_buff *sock_alloc_send_skb(str } EXPORT_SYMBOL(sock_alloc_send_skb);
+ /* On 32bit arches, an skb frag is limited to 2^15 */ + #define SKB_FRAG_PAGE_ORDER get_order(32768) + + bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) + { + int order; + + if (pfrag->page) { + if (atomic_read(&pfrag->page->_count) == 1) { + pfrag->offset = 0; + return true; + } + if (pfrag->offset < pfrag->size) + return true; + put_page(pfrag->page); + } + + /* We restrict high order allocations to users that can afford to wait */ + order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0; + + do { + gfp_t gfp = sk->sk_allocation; + + if (order) + gfp |= __GFP_COMP | __GFP_NOWARN; + pfrag->page = alloc_pages(gfp, order); + if (likely(pfrag->page)) { + pfrag->offset = 0; + pfrag->size = PAGE_SIZE << order; + return true; + } + } while (--order >= 0); + + sk_enter_memory_pressure(sk); + sk_stream_moderate_sndbuf(sk); + return false; + } + EXPORT_SYMBOL(sk_page_frag_refill); + static void __lock_sock(struct sock *sk) __releases(&sk->sk_lock.slock) __acquires(&sk->sk_lock.slock) @@@ -2174,8 -2212,8 +2213,8 @@@ void sock_init_data(struct socket *sock sk->sk_error_report = sock_def_error_report; sk->sk_destruct = sock_def_destruct;
- sk->sk_sndmsg_page = NULL; - sk->sk_sndmsg_off = 0; + sk->sk_frag.page = NULL; + sk->sk_frag.offset = 0; sk->sk_peek_off = -1;
sk->sk_peer_pid = NULL; @@@ -2418,6 -2456,12 +2457,12 @@@ void sk_common_release(struct sock *sk xfrm_sk_free_policy(sk);
sk_refcnt_debug_release(sk); + + if (sk->sk_frag.page) { + put_page(sk->sk_frag.page); + sk->sk_frag.page = NULL; + } + sock_put(sk); } EXPORT_SYMBOL(sk_common_release); diff --combined net/ipv4/devinet.c index e12fad7,7b00556..2a6abc1 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@@ -94,25 -94,22 +94,22 @@@ static const struct nla_policy ifa_ipv4 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, };
- /* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE - * value. So if you change this define, make appropriate changes to - * inet_addr_hash as well. - */ - #define IN4_ADDR_HSIZE 256 + #define IN4_ADDR_HSIZE_SHIFT 8 + #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT) + static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE]; static DEFINE_SPINLOCK(inet_addr_hash_lock);
- static inline unsigned int inet_addr_hash(struct net *net, __be32 addr) + static u32 inet_addr_hash(struct net *net, __be32 addr) { - u32 val = (__force u32) addr ^ hash_ptr(net, 8); + u32 val = (__force u32) addr ^ net_hash_mix(net);
- return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) & - (IN4_ADDR_HSIZE - 1)); + return hash_32(val, IN4_ADDR_HSIZE_SHIFT); }
static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa) { - unsigned int hash = inet_addr_hash(net, ifa->ifa_local); + u32 hash = inet_addr_hash(net, ifa->ifa_local);
spin_lock(&inet_addr_hash_lock); hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]); @@@ -136,18 -133,18 +133,18 @@@ static void inet_hash_remove(struct in_ */ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref) { - unsigned int hash = inet_addr_hash(net, addr); + u32 hash = inet_addr_hash(net, addr); struct net_device *result = NULL; struct in_ifaddr *ifa; struct hlist_node *node;
rcu_read_lock(); hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) { - struct net_device *dev = ifa->ifa_dev->dev; - - if (!net_eq(dev_net(dev), net)) - continue; if (ifa->ifa_local == addr) { + struct net_device *dev = ifa->ifa_dev->dev; + + if (!net_eq(dev_net(dev), net)) + continue; result = dev; break; } @@@ -182,10 -179,10 +179,10 @@@ static void inet_del_ifa(struct in_devi static void devinet_sysctl_register(struct in_device *idev); static void devinet_sysctl_unregister(struct in_device *idev); #else - static inline void devinet_sysctl_register(struct in_device *idev) + static void devinet_sysctl_register(struct in_device *idev) { } - static inline void devinet_sysctl_unregister(struct in_device *idev) + static void devinet_sysctl_unregister(struct in_device *idev) { } #endif @@@ -205,7 -202,7 +202,7 @@@ static void inet_rcu_free_ifa(struct rc kfree(ifa); }
- static inline void inet_free_ifa(struct in_ifaddr *ifa) + static void inet_free_ifa(struct in_ifaddr *ifa) { call_rcu(&ifa->rcu_head, inet_rcu_free_ifa); } @@@ -314,7 -311,7 +311,7 @@@ int inet_addr_onlink(struct in_device * }
static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, - int destroy, struct nlmsghdr *nlh, u32 pid) + int destroy, struct nlmsghdr *nlh, u32 portid) { struct in_ifaddr *promote = NULL; struct in_ifaddr *ifa, *ifa1 = *ifap; @@@ -348,7 -345,7 +345,7 @@@ inet_hash_remove(ifa); *ifap1 = ifa->ifa_next;
- rtmsg_ifa(RTM_DELADDR, ifa, nlh, pid); + rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa); inet_free_ifa(ifa); @@@ -385,7 -382,7 +382,7 @@@ is valid, it will try to restore deleted routes... Grr. So that, this order is correct. */ - rtmsg_ifa(RTM_DELADDR, ifa1, nlh, pid); + rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
if (promote) { @@@ -398,7 -395,7 +395,7 @@@ }
promote->ifa_flags &= ~IFA_F_SECONDARY; - rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid); + rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, promote); for (ifa = next_sec; ifa; ifa = ifa->ifa_next) { @@@ -420,7 -417,7 +417,7 @@@ static void inet_del_ifa(struct in_devi }
static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, - u32 pid) + u32 portid) { struct in_device *in_dev = ifa->ifa_dev; struct in_ifaddr *ifa1, **ifap, **last_primary; @@@ -467,7 -464,7 +464,7 @@@ /* Send message first, then call notifier. Notifier will trigger FIB update, so that listeners of netlink will know about new ifaddr */ - rtmsg_ifa(RTM_NEWADDR, ifa, nlh, pid); + rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
return 0; @@@ -566,7 -563,7 +563,7 @@@ static int inet_rtm_deladdr(struct sk_b !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa))) continue;
- __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).pid); + __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid); return 0; }
@@@ -652,14 -649,14 +649,14 @@@ static int inet_rtm_newaddr(struct sk_b if (IS_ERR(ifa)) return PTR_ERR(ifa);
- return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).pid); + return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid); }
/* * Determine a default network mask, based on the IP address. */
- static inline int inet_abc_len(__be32 addr) + static int inet_abc_len(__be32 addr) { int rc = -1; /* Something else, probably a multicast. */
@@@ -725,7 -722,7 +722,7 @@@ int devinet_ioctl(struct net *net, unsi break;
case SIOCSIFFLAGS: - ret = -EACCES; + ret = -EPERM; if (!capable(CAP_NET_ADMIN)) goto out; break; @@@ -733,7 -730,7 +730,7 @@@ case SIOCSIFBRDADDR: /* Set the broadcast address */ case SIOCSIFDSTADDR: /* Set the destination address */ case SIOCSIFNETMASK: /* Set the netmask for the interface */ - ret = -EACCES; + ret = -EPERM; if (!capable(CAP_NET_ADMIN)) goto out; ret = -EINVAL; @@@ -1124,7 -1121,7 +1121,7 @@@ skip } }
- static inline bool inetdev_valid_mtu(unsigned int mtu) + static bool inetdev_valid_mtu(unsigned int mtu) { return mtu >= 68; } @@@ -1239,7 -1236,7 +1236,7 @@@ static struct notifier_block ip_netdev_ .notifier_call = inetdev_event, };
- static inline size_t inet_nlmsg_size(void) + static size_t inet_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + nla_total_size(4) /* IFA_ADDRESS */ @@@ -1249,12 -1246,12 +1246,12 @@@ }
static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, - u32 pid, u32 seq, int event, unsigned int flags) + u32 portid, u32 seq, int event, unsigned int flags) { struct ifaddrmsg *ifm; struct nlmsghdr *nlh;
- nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); + nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags); if (nlh == NULL) return -EMSGSIZE;
@@@ -1316,7 -1313,7 +1313,7 @@@ static int inet_dump_ifaddr(struct sk_b if (ip_idx < s_ip_idx) continue; if (inet_fill_ifaddr(skb, ifa, - NETLINK_CB(cb->skb).pid, + NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWADDR, NLM_F_MULTI) <= 0) { rcu_read_unlock(); @@@ -1338,7 -1335,7 +1335,7 @@@ done }
static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, - u32 pid) + u32 portid) { struct sk_buff *skb; u32 seq = nlh ? nlh->nlmsg_seq : 0; @@@ -1350,14 -1347,14 +1347,14 @@@ if (skb == NULL) goto errout;
- err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0); + err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0); if (err < 0) { /* -EMSGSIZE implies BUG in inet_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } - rtnl_notify(skb, net, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL); + rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL); return; errout: if (err < 0) diff --combined net/ipv4/raw.c index d23c657,f242578..73d1e4d --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@@ -131,20 -131,18 +131,20 @@@ found * 0 - deliver * 1 - block */ -static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb) +static int icmp_filter(const struct sock *sk, const struct sk_buff *skb) { - int type; + struct icmphdr _hdr; + const struct icmphdr *hdr;
- if (!pskb_may_pull(skb, sizeof(struct icmphdr))) + hdr = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_hdr), &_hdr); + if (!hdr) return 1;
- type = icmp_hdr(skb)->type; - if (type < 32) { + if (hdr->type < 32) { __u32 data = raw_sk(sk)->filter.data;
- return ((1 << type) & data) != 0; + return ((1U << hdr->type) & data) != 0; }
/* Do not block unknown ICMP types */ @@@ -994,7 -992,9 +994,9 @@@ static void raw_sock_seq_show(struct se i, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), - 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), + 0, 0L, 0, + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), + 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); }
diff --combined net/ipv4/route.c index fd9af60,940f4f4..ff62206 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@@ -202,6 -202,11 +202,6 @@@ EXPORT_SYMBOL(ip_tos2prio) static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
-static inline int rt_genid(struct net *net) -{ - return atomic_read(&net->ipv4.rt_genid); -} - #ifdef CONFIG_PROC_FS static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) { @@@ -444,7 -449,7 +444,7 @@@ static inline bool rt_is_expired(const
void rt_cache_flush(struct net *net) { - atomic_inc(&net->ipv4.rt_genid); + rt_genid_bump(net); }
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, @@@ -1111,10 -1116,7 +1111,7 @@@ static unsigned int ipv4_mtu(const stru const struct rtable *rt = (const struct rtable *) dst; unsigned int mtu = rt->rt_pmtu;
- if (mtu && time_after_eq(jiffies, rt->dst.expires)) - mtu = 0; - - if (!mtu) + if (!mtu || time_after_eq(jiffies, rt->dst.expires)) mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu && rt_is_output_route(rt)) @@@ -1566,11 -1568,14 +1563,14 @@@ static int ip_route_input_slow(struct s if (ipv4_is_zeronet(daddr)) goto martian_destination;
- if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) { - if (ipv4_is_loopback(daddr)) + /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(), + * and call it once if daddr or/and saddr are loopback addresses + */ + if (ipv4_is_loopback(daddr)) { + if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) goto martian_destination; - - if (ipv4_is_loopback(saddr)) + } else if (ipv4_is_loopback(saddr)) { + if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) goto martian_source; }
@@@ -1595,7 -1600,7 +1595,7 @@@
if (res.type == RTN_LOCAL) { err = fib_validate_source(skb, saddr, daddr, tos, - net->loopback_dev->ifindex, + LOOPBACK_IFINDEX, dev, in_dev, &itag); if (err < 0) goto martian_source_keep_err; @@@ -1871,7 -1876,7 +1871,7 @@@ struct rtable *__ip_route_output_key(st
orig_oif = fl4->flowi4_oif;
- fl4->flowi4_iif = net->loopback_dev->ifindex; + fl4->flowi4_iif = LOOPBACK_IFINDEX; fl4->flowi4_tos = tos & IPTOS_RT_MASK; fl4->flowi4_scope = ((tos & RTO_ONLINK) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); @@@ -1960,7 -1965,7 +1960,7 @@@ if (!fl4->daddr) fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK); dev_out = net->loopback_dev; - fl4->flowi4_oif = net->loopback_dev->ifindex; + fl4->flowi4_oif = LOOPBACK_IFINDEX; res.type = RTN_LOCAL; flags |= RTCF_LOCAL; goto make_route; @@@ -2131,7 -2136,7 +2131,7 @@@ struct rtable *ip_route_output_flow(str EXPORT_SYMBOL_GPL(ip_route_output_flow);
static int rt_fill_info(struct net *net, __be32 dst, __be32 src, - struct flowi4 *fl4, struct sk_buff *skb, u32 pid, + struct flowi4 *fl4, struct sk_buff *skb, u32 portid, u32 seq, int event, int nowait, unsigned int flags) { struct rtable *rt = skb_rtable(skb); @@@ -2141,7 -2146,7 +2141,7 @@@ u32 error; u32 metrics[RTAX_MAX];
- nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); + nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); if (nlh == NULL) return -EMSGSIZE;
@@@ -2301,12 -2306,12 +2301,12 @@@ static int inet_rtm_getroute(struct sk_ rt->rt_flags |= RTCF_NOTIFY;
err = rt_fill_info(net, dst, src, &fl4, skb, - NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, + NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); if (err <= 0) goto errout_free;
- err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); + err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); errout: return err;
@@@ -2501,7 -2506,7 +2501,7 @@@ static __net_initdata struct pernet_ope
static __net_init int rt_genid_init(struct net *net) { - atomic_set(&net->ipv4.rt_genid, 0); + atomic_set(&net->rt_genid, 0); get_random_bytes(&net->ipv4.dev_addr_genid, sizeof(net->ipv4.dev_addr_genid)); return 0; diff --combined net/ipv4/tcp.c index 5f64193,72ea475..f32c02e --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -486,8 -486,9 +486,9 @@@ unsigned int tcp_poll(struct file *file if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLIN | POLLRDNORM | POLLRDHUP;
- /* Connected? */ - if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { + /* Connected or passive Fast Open socket? */ + if (sk->sk_state != TCP_SYN_SENT && + (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) { int target = sock_rcvlowat(sk, 0, INT_MAX);
if (tp->urg_seq == tp->copied_seq && @@@ -840,10 -841,15 +841,15 @@@ static ssize_t do_tcp_sendpages(struct ssize_t copied; long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
- /* Wait for a connection to finish. */ - if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) + /* Wait for a connection to finish. One exception is TCP Fast Open + * (passive side) where data is allowed to be sent before a connection + * is fully established. + */ + if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && + !tcp_passive_fastopen(sk)) { if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_err; + }
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@@ -1042,10 -1048,15 +1048,15 @@@ int tcp_sendmsg(struct kiocb *iocb, str
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
- /* Wait for a connection to finish. */ - if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) + /* Wait for a connection to finish. One exception is TCP Fast Open + * (passive side) where data is allowed to be sent before a connection + * is fully established. + */ + if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && + !tcp_passive_fastopen(sk)) { if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto do_error; + }
if (unlikely(tp->repair)) { if (tp->repair_queue == TCP_RECV_QUEUE) { @@@ -1139,78 -1150,43 +1150,43 @@@ new_segment if (err) goto do_fault; } else { - bool merge = false; + bool merge = true; int i = skb_shinfo(skb)->nr_frags; - struct page *page = sk->sk_sndmsg_page; - int off; - - if (page && page_count(page) == 1) - sk->sk_sndmsg_off = 0; - - off = sk->sk_sndmsg_off; - - if (skb_can_coalesce(skb, i, page, off) && - off != PAGE_SIZE) { - /* We can extend the last page - * fragment. */ - merge = true; - } else if (i == MAX_SKB_FRAGS || !sg) { - /* Need to add new fragment and cannot - * do this because interface is non-SG, - * or because all the page slots are - * busy. */ - tcp_mark_push(tp, skb); - goto new_segment; - } else if (page) { - if (off == PAGE_SIZE) { - put_page(page); - sk->sk_sndmsg_page = page = NULL; - off = 0; + struct page_frag *pfrag = sk_page_frag(sk); + + if (!sk_page_frag_refill(sk, pfrag)) + goto wait_for_memory; + + if (!skb_can_coalesce(skb, i, pfrag->page, + pfrag->offset)) { + if (i == MAX_SKB_FRAGS || !sg) { + tcp_mark_push(tp, skb); + goto new_segment; } - } else - off = 0; + merge = false; + }
- if (copy > PAGE_SIZE - off) - copy = PAGE_SIZE - off; + copy = min_t(int, copy, pfrag->size - pfrag->offset);
if (!sk_wmem_schedule(sk, copy)) goto wait_for_memory;
- if (!page) { - /* Allocate new cache page. */ - if (!(page = sk_stream_alloc_page(sk))) - goto wait_for_memory; - } - - /* Time to copy data. We are close to - * the end! */ err = skb_copy_to_page_nocache(sk, from, skb, - page, off, copy); - if (err) { - /* If this page was new, give it to the - * socket so it does not get leaked. - */ - if (!sk->sk_sndmsg_page) { - sk->sk_sndmsg_page = page; - sk->sk_sndmsg_off = 0; - } + pfrag->page, + pfrag->offset, + copy); + if (err) goto do_error; - }
/* Update the skb. */ if (merge) { skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); } else { - skb_fill_page_desc(skb, i, page, off, copy); - if (sk->sk_sndmsg_page) { - get_page(page); - } else if (off + copy < PAGE_SIZE) { - get_page(page); - sk->sk_sndmsg_page = page; - } + skb_fill_page_desc(skb, i, pfrag->page, + pfrag->offset, copy); + get_page(pfrag->page); } - - sk->sk_sndmsg_off = off + copy; + pfrag->offset += copy; }
if (!copied) @@@ -1762,14 -1738,8 +1738,14 @@@ int tcp_recvmsg(struct kiocb *iocb, str }
#ifdef CONFIG_NET_DMA - if (tp->ucopy.dma_chan) - dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); + if (tp->ucopy.dma_chan) { + if (tp->rcv_wnd == 0 && + !skb_queue_empty(&sk->sk_async_wait_queue)) { + tcp_service_net_dma(sk, true); + tcp_cleanup_rbuf(sk, copied); + } else + dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); + } #endif if (copied >= target) { /* Do not sleep, just process backlog. */ @@@ -2150,6 -2120,10 +2126,10 @@@ void tcp_close(struct sock *sk, long ti * they look as CLOSING or LAST_ACK for Linux) * Probably, I missed some more holelets. * --ANK + * XXX (TFO) - To start off we don't support SYN+ACK+FIN + * in a single packet! (May consider it later but will + * probably need API support or TCP_CORK SYN-ACK until + * data is written and socket is closed.) */ tcp_send_fin(sk); } @@@ -2221,8 -2195,16 +2201,16 @@@ adjudge_to_death } }
- if (sk->sk_state == TCP_CLOSE) + if (sk->sk_state == TCP_CLOSE) { + struct request_sock *req = tcp_sk(sk)->fastopen_rsk; + /* We could get here with a non-NULL req if the socket is + * aborted (e.g., closed with unread data) before 3WHS + * finishes. + */ + if (req != NULL) + reqsk_fastopen_remove(sk, req, false); inet_csk_destroy_sock(sk); + } /* Otherwise, socket is reprieved until protocol close. */
out: @@@ -2308,6 -2290,13 +2296,13 @@@ int tcp_disconnect(struct sock *sk, in } EXPORT_SYMBOL(tcp_disconnect);
+ void tcp_sock_destruct(struct sock *sk) + { + inet_sock_destruct(sk); + + kfree(inet_csk(sk)->icsk_accept_queue.fastopenq); + } + static inline bool tcp_can_repair_sock(const struct sock *sk) { return capable(CAP_NET_ADMIN) && @@@ -2331,17 -2320,10 +2326,17 @@@ static int tcp_repair_options_est(struc tp->rx_opt.mss_clamp = opt.opt_val; break; case TCPOPT_WINDOW: - if (opt.opt_val > 14) - return -EFBIG; + { + u16 snd_wscale = opt.opt_val & 0xFFFF; + u16 rcv_wscale = opt.opt_val >> 16; + + if (snd_wscale > 14 || rcv_wscale > 14) + return -EFBIG;
- tp->rx_opt.snd_wscale = opt.opt_val; + tp->rx_opt.snd_wscale = snd_wscale; + tp->rx_opt.rcv_wscale = rcv_wscale; + tp->rx_opt.wscale_ok = 1; + } break; case TCPOPT_SACK_PERM: if (opt.opt_val != 0) @@@ -2701,6 -2683,14 +2696,14 @@@ static int do_tcp_setsockopt(struct soc else icsk->icsk_user_timeout = msecs_to_jiffies(val); break; + + case TCP_FASTOPEN: + if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | + TCPF_LISTEN))) + err = fastopen_init_queue(sk, val); + else + err = -EINVAL; + break; default: err = -ENOPROTOOPT; break; @@@ -3514,11 -3504,15 +3517,15 @@@ EXPORT_SYMBOL(tcp_cookie_generator)
void tcp_done(struct sock *sk) { + struct request_sock *req = tcp_sk(sk)->fastopen_rsk; + if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
tcp_set_state(sk, TCP_CLOSE); tcp_clear_xmit_timers(sk); + if (req != NULL) + reqsk_fastopen_remove(sk, req, false);
sk->sk_shutdown = SHUTDOWN_MASK;
diff --combined net/ipv4/tcp_input.c index d377f48,e037697..432c366 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@@ -237,7 -237,11 +237,11 @@@ static inline void TCP_ECN_check_ce(str tcp_enter_quickack_mode((struct sock *)tp); break; case INET_ECN_CE: - tp->ecn_flags |= TCP_ECN_DEMAND_CWR; + if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { + /* Better not delay acks, sender can have a very low cwnd */ + tcp_enter_quickack_mode((struct sock *)tp); + tp->ecn_flags |= TCP_ECN_DEMAND_CWR; + } /* fallinto */ default: tp->ecn_flags |= TCP_ECN_SEEN; @@@ -374,7 -378,7 +378,7 @@@ static void tcp_fixup_rcvbuf(struct soc /* 4. Try to fixup all. It is made immediately after connection enters * established state. */ - static void tcp_init_buffer_space(struct sock *sk) + void tcp_init_buffer_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int maxwin; @@@ -739,29 -743,6 +743,6 @@@ __u32 tcp_init_cwnd(const struct tcp_so return min_t(__u32, cwnd, tp->snd_cwnd_clamp); }
- /* Set slow start threshold and cwnd not falling to slow start */ - void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) - { - struct tcp_sock *tp = tcp_sk(sk); - const struct inet_connection_sock *icsk = inet_csk(sk); - - tp->prior_ssthresh = 0; - tp->bytes_acked = 0; - if (icsk->icsk_ca_state < TCP_CA_CWR) { - tp->undo_marker = 0; - if (set_ssthresh) - tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); - tp->snd_cwnd = min(tp->snd_cwnd, - tcp_packets_in_flight(tp) + 1U); - tp->snd_cwnd_cnt = 0; - tp->high_seq = tp->snd_nxt; - tp->snd_cwnd_stamp = tcp_time_stamp; - TCP_ECN_queue_cwr(tp); - - tcp_set_ca_state(sk, TCP_CA_CWR); - } - } - /* * Packet counting of FACK is based on in-order assumptions, therefore TCP * disables it when reordering is detected @@@ -2489,35 -2470,6 +2470,6 @@@ static inline void tcp_moderate_cwnd(st tp->snd_cwnd_stamp = tcp_time_stamp; }
- /* Lower bound on congestion window is slow start threshold - * unless congestion avoidance choice decides to overide it. - */ - static inline u32 tcp_cwnd_min(const struct sock *sk) - { - const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; - - return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh; - } - - /* Decrease cwnd each second ack. */ - static void tcp_cwnd_down(struct sock *sk, int flag) - { - struct tcp_sock *tp = tcp_sk(sk); - int decr = tp->snd_cwnd_cnt + 1; - - if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) || - (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) { - tp->snd_cwnd_cnt = decr & 1; - decr >>= 1; - - if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) - tp->snd_cwnd -= decr; - - tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); - tp->snd_cwnd_stamp = tcp_time_stamp; - } - } - /* Nothing was retransmitted or returned timestamp is less * than timestamp of the first retransmission. */ @@@ -2719,24 -2671,80 +2671,80 @@@ static bool tcp_try_undo_loss(struct so return false; }
- static inline void tcp_complete_cwr(struct sock *sk) + /* The cwnd reduction in CWR and Recovery use the PRR algorithm + * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction... + * It computes the number of packets to send (sndcnt) based on packets newly + * delivered: + * 1) If the packets in flight is larger than ssthresh, PRR spreads the + * cwnd reductions across a full RTT. + * 2) If packets in flight is lower than ssthresh (such as due to excess + * losses and/or application stalls), do not perform any further cwnd + * reductions, but instead slow start up to ssthresh. + */ + static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh) { struct tcp_sock *tp = tcp_sk(sk);
- /* Do not moderate cwnd if it's already undone in cwr or recovery. */ - if (tp->undo_marker) { - if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) { - tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); - tp->snd_cwnd_stamp = tcp_time_stamp; - } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) { - /* PRR algorithm. */ - tp->snd_cwnd = tp->snd_ssthresh; - tp->snd_cwnd_stamp = tcp_time_stamp; - } + tp->high_seq = tp->snd_nxt; + tp->bytes_acked = 0; + tp->snd_cwnd_cnt = 0; + tp->prior_cwnd = tp->snd_cwnd; + tp->prr_delivered = 0; + tp->prr_out = 0; + if (set_ssthresh) + tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); + TCP_ECN_queue_cwr(tp); + } + + static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, + int fast_rexmit) + { + struct tcp_sock *tp = tcp_sk(sk); + int sndcnt = 0; + int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); + + tp->prr_delivered += newly_acked_sacked; + if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { + u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + + tp->prior_cwnd - 1; + sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; + } else { + sndcnt = min_t(int, delta, + max_t(int, tp->prr_delivered - tp->prr_out, + newly_acked_sacked) + 1); + } + + sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); + tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; + } + + static inline void tcp_end_cwnd_reduction(struct sock *sk) + { + struct tcp_sock *tp = tcp_sk(sk); + + /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ + if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || + (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { + tp->snd_cwnd = tp->snd_ssthresh; + tp->snd_cwnd_stamp = tcp_time_stamp; } tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); }
+ /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */ + void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) + { + struct tcp_sock *tp = tcp_sk(sk); + + tp->prior_ssthresh = 0; + tp->bytes_acked = 0; + if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { + tp->undo_marker = 0; + tcp_init_cwnd_reduction(sk, set_ssthresh); + tcp_set_ca_state(sk, TCP_CA_CWR); + } + } + static void tcp_try_keep_open(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); @@@ -2751,7 -2759,7 +2759,7 @@@ } }
- static void tcp_try_to_open(struct sock *sk, int flag) + static void tcp_try_to_open(struct sock *sk, int flag, int newly_acked_sacked) { struct tcp_sock *tp = tcp_sk(sk);
@@@ -2768,7 -2776,7 +2776,7 @@@ if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open) tcp_moderate_cwnd(tp); } else { - tcp_cwnd_down(sk, flag); + tcp_cwnd_reduction(sk, newly_acked_sacked, 0); } }
@@@ -2850,38 -2858,6 +2858,6 @@@ void tcp_simple_retransmit(struct sock } EXPORT_SYMBOL(tcp_simple_retransmit);
- /* This function implements the PRR algorithm, specifcally the PRR-SSRB - * (proportional rate reduction with slow start reduction bound) as described in - * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt. - * It computes the number of packets to send (sndcnt) based on packets newly - * delivered: - * 1) If the packets in flight is larger than ssthresh, PRR spreads the - * cwnd reductions across a full RTT. - * 2) If packets in flight is lower than ssthresh (such as due to excess - * losses and/or application stalls), do not perform any further cwnd - * reductions, but instead slow start up to ssthresh. - */ - static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, - int fast_rexmit, int flag) - { - struct tcp_sock *tp = tcp_sk(sk); - int sndcnt = 0; - int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); - - if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { - u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + - tp->prior_cwnd - 1; - sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; - } else { - sndcnt = min_t(int, delta, - max_t(int, tp->prr_delivered - tp->prr_out, - newly_acked_sacked) + 1); - } - - sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); - tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; - } - static void tcp_enter_recovery(struct sock *sk, bool ece_ack) { struct tcp_sock *tp = tcp_sk(sk); @@@ -2894,7 -2870,6 +2870,6 @@@
NET_INC_STATS_BH(sock_net(sk), mib_idx);
- tp->high_seq = tp->snd_nxt; tp->prior_ssthresh = 0; tp->undo_marker = tp->snd_una; tp->undo_retrans = tp->retrans_out; @@@ -2902,15 -2877,8 +2877,8 @@@ if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { if (!ece_ack) tp->prior_ssthresh = tcp_current_ssthresh(sk); - tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); - TCP_ECN_queue_cwr(tp); + tcp_init_cwnd_reduction(sk, true); } - - tp->bytes_acked = 0; - tp->snd_cwnd_cnt = 0; - tp->prior_cwnd = tp->snd_cwnd; - tp->prr_delivered = 0; - tp->prr_out = 0; tcp_set_ca_state(sk, TCP_CA_Recovery); }
@@@ -2970,7 -2938,7 +2938,7 @@@ static void tcp_fastretrans_alert(struc /* CWR is to be held something *above* high_seq * is ACKed for CWR bit to reach receiver. */ if (tp->snd_una != tp->high_seq) { - tcp_complete_cwr(sk); + tcp_end_cwnd_reduction(sk); tcp_set_ca_state(sk, TCP_CA_Open); } break; @@@ -2980,7 -2948,7 +2948,7 @@@ tcp_reset_reno_sack(tp); if (tcp_try_undo_recovery(sk)) return; - tcp_complete_cwr(sk); + tcp_end_cwnd_reduction(sk); break; } } @@@ -3021,7 -2989,7 +2989,7 @@@ tcp_try_undo_dsack(sk);
if (!tcp_time_to_recover(sk, flag)) { - tcp_try_to_open(sk, flag); + tcp_try_to_open(sk, flag, newly_acked_sacked); return; }
@@@ -3043,8 -3011,7 +3011,7 @@@
if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) tcp_update_scoreboard(sk, fast_rexmit); - tp->prr_delivered += newly_acked_sacked; - tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag); + tcp_cwnd_reduction(sk, newly_acked_sacked, fast_rexmit); tcp_xmit_retransmit_queue(sk); }
@@@ -3123,6 -3090,12 +3090,12 @@@ void tcp_rearm_rto(struct sock *sk { struct tcp_sock *tp = tcp_sk(sk);
+ /* If the retrans timer is currently being used by Fast Open + * for SYN-ACK retrans purpose, stay put. + */ + if (tp->fastopen_rsk) + return; + if (!tp->packets_out) { inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); } else { @@@ -3384,7 -3357,7 +3357,7 @@@ static inline bool tcp_may_raise_cwnd(c { const struct tcp_sock *tp = tcp_sk(sk); return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && - !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR)); + !tcp_in_cwnd_reduction(sk); }
/* Check that window update is acceptable. @@@ -3452,9 -3425,9 +3425,9 @@@ static void tcp_conservative_spur_to_re }
/* A conservative spurious RTO response algorithm: reduce cwnd using - * rate halving and continue in congestion avoidance. + * PRR and continue in congestion avoidance. */ - static void tcp_ratehalving_spur_to_response(struct sock *sk) + static void tcp_cwr_spur_to_response(struct sock *sk) { tcp_enter_cwr(sk, 0); } @@@ -3462,7 -3435,7 +3435,7 @@@ static void tcp_undo_spur_to_response(struct sock *sk, int flag) { if (flag & FLAG_ECE) - tcp_ratehalving_spur_to_response(sk); + tcp_cwr_spur_to_response(sk); else tcp_undo_cwr(sk, true); } @@@ -3569,7 -3542,7 +3542,7 @@@ static bool tcp_process_frto(struct soc tcp_conservative_spur_to_response(tp); break; default: - tcp_ratehalving_spur_to_response(sk); + tcp_cwr_spur_to_response(sk); break; } tp->frto_counter = 0; @@@ -4034,7 -4007,7 +4007,7 @@@ static inline bool tcp_sequence(const s }
/* When we get a reset we do this. */ - static void tcp_reset(struct sock *sk) + void tcp_reset(struct sock *sk) { /* We want the right error as BSD sees it (and indeed as we do). */ switch (sk->sk_state) { @@@ -4661,7 -4634,7 +4634,7 @@@ queue_and_out
if (eaten > 0) kfree_skb_partial(skb, fragstolen); - else if (!sock_flag(sk, SOCK_DEAD)) + if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); return; } @@@ -5556,7 -5529,8 +5529,7 @@@ no_ack #endif if (eaten) kfree_skb_partial(skb, fragstolen); - else - sk->sk_data_ready(sk, 0); + sk->sk_data_ready(sk, 0); return 0; } } @@@ -5740,7 -5714,7 +5713,7 @@@ static int tcp_rcv_synsent_state_proces
TCP_ECN_rcv_synack(tp, th);
- tp->snd_wl1 = TCP_SKB_CB(skb)->seq; + tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); tcp_ack(sk, skb, FLAG_SLOWPATH);
/* Ok.. it's good. Set up sequence numbers and @@@ -5753,7 -5727,6 +5726,6 @@@ * never scaled. */ tp->snd_wnd = ntohs(th->window); - tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
if (!tp->rx_opt.wscale_ok) { tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; @@@ -5891,7 -5864,9 +5863,9 @@@ discard tcp_send_synack(sk); #if 0 /* Note, we could accept data and URG from this segment. - * There are no obstacles to make this. + * There are no obstacles to make this (except that we must + * either change tcp_recvmsg() to prevent it from returning data + * before 3WHS completes per RFC793, or employ TCP Fast Open). * * However, if we ignore data in ACKless segments sometimes, * we have no reasons to accept it sometimes. @@@ -5931,6 -5906,7 +5905,7 @@@ int tcp_rcv_state_process(struct sock * { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); + struct request_sock *req; int queued = 0;
tp->rx_opt.saw_tstamp = 0; @@@ -5986,6 -5962,14 +5961,14 @@@ return 0; }
+ req = tp->fastopen_rsk; + if (req != NULL) { + BUG_ON(sk->sk_state != TCP_SYN_RECV && + sk->sk_state != TCP_FIN_WAIT1); + + if (tcp_check_req(sk, skb, req, NULL, true) == NULL) + goto discard; + } if (!tcp_validate_incoming(sk, skb, th, 0)) return 0;
@@@ -5996,7 -5980,25 +5979,25 @@@ switch (sk->sk_state) { case TCP_SYN_RECV: if (acceptable) { - tp->copied_seq = tp->rcv_nxt; + /* Once we leave TCP_SYN_RECV, we no longer + * need req so release it. + */ + if (req) { + tcp_synack_rtt_meas(sk, req); + tp->total_retrans = req->retrans; + + reqsk_fastopen_remove(sk, req, false); + } else { + /* Make sure socket is routed, for + * correct metrics. + */ + icsk->icsk_af_ops->rebuild_header(sk); + tcp_init_congestion_control(sk); + + tcp_mtup_init(sk); + tcp_init_buffer_space(sk); + tp->copied_seq = tp->rcv_nxt; + } smp_mb(); tcp_set_state(sk, TCP_ESTABLISHED); sk->sk_state_change(sk); @@@ -6018,23 -6020,27 +6019,27 @@@ if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
- /* Make sure socket is routed, for - * correct metrics. - */ - icsk->icsk_af_ops->rebuild_header(sk); - - tcp_init_metrics(sk); - - tcp_init_congestion_control(sk); + if (req) { + /* Re-arm the timer because data may + * have been sent out. This is similar + * to the regular data transmission case + * when new data has just been ack'ed. + * + * (TFO) - we could try to be more + * aggressive and retranmitting any data + * sooner based on when they were sent + * out. + */ + tcp_rearm_rto(sk); + } else + tcp_init_metrics(sk);
/* Prevent spurious tcp_cwnd_restart() on * first data packet. */ tp->lsndtime = tcp_time_stamp;
- tcp_mtup_init(sk); tcp_initialize_rcv_mss(sk); - tcp_init_buffer_space(sk); tcp_fast_path_on(tp); } else { return 1; @@@ -6042,6 -6048,16 +6047,16 @@@ break;
case TCP_FIN_WAIT1: + /* If we enter the TCP_FIN_WAIT1 state and we are a + * Fast Open socket and this is the first acceptable + * ACK we have received, this would have acknowledged + * our SYNACK so stop the SYNACK timer. + */ + if (acceptable && req != NULL) { + /* We no longer need the request sock. */ + reqsk_fastopen_remove(sk, req, false); + tcp_rearm_rto(sk); + } if (tp->snd_una == tp->write_seq) { struct dst_entry *dst;
diff --combined net/ipv6/raw.c index 4a5f78b,7af88ef..d8e95c7 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@@ -107,20 -107,21 +107,20 @@@ found * 0 - deliver * 1 - block */ -static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb) +static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb) { - struct icmp6hdr *icmph; - struct raw6_sock *rp = raw6_sk(sk); + struct icmp6hdr *_hdr; + const struct icmp6hdr *hdr;
- if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) { - __u32 *data = &rp->filter.data[0]; - int bit_nr; + hdr = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_hdr), &_hdr); + if (hdr) { + const __u32 *data = &raw6_sk(sk)->filter.data[0]; + unsigned int type = hdr->icmp6_type;
- icmph = (struct icmp6hdr *) skb->data; - bit_nr = icmph->icmp6_type; - - return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0; + return (data[type >> 5] & (1U << (type & 31))) != 0; } - return 0; + return 1; }
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) @@@ -1250,7 -1251,8 +1250,8 @@@ static void raw6_sock_seq_show(struct s sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, - sock_i_uid(sp), 0, + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), + 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); } diff --combined net/ipv6/route.c index 854e401,0607ee3..d1ddbc6 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -222,11 -222,11 +222,11 @@@ static const u32 ip6_template_metrics[R [RTAX_HOPLIMIT - 1] = 255, };
- static struct rt6_info ip6_null_entry_template = { + static const struct rt6_info ip6_null_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, - .obsolete = -1, + .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -ENETUNREACH, .input = ip6_pkt_discard, .output = ip6_pkt_discard_out, @@@ -242,11 -242,11 +242,11 @@@ static int ip6_pkt_prohibit(struct sk_buff *skb); static int ip6_pkt_prohibit_out(struct sk_buff *skb);
- static struct rt6_info ip6_prohibit_entry_template = { + static const struct rt6_info ip6_prohibit_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, - .obsolete = -1, + .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EACCES, .input = ip6_pkt_prohibit, .output = ip6_pkt_prohibit_out, @@@ -257,11 -257,11 +257,11 @@@ .rt6i_ref = ATOMIC_INIT(1), };
- static struct rt6_info ip6_blk_hole_entry_template = { + static const struct rt6_info ip6_blk_hole_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, - .obsolete = -1, + .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EINVAL, .input = dst_discard, .output = dst_discard, @@@ -281,14 -281,13 +281,14 @@@ static inline struct rt6_info *ip6_dst_ struct fib6_table *table) { struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, - 0, DST_OBSOLETE_NONE, flags); + 0, DST_OBSOLETE_FORCE_CHK, flags);
if (rt) { struct dst_entry *dst = &rt->dst;
memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); + rt->rt6i_genid = rt_genid(net); } return rt; } @@@ -370,15 -369,11 +370,11 @@@ static void ip6_dst_ifdown(struct dst_e
static bool rt6_check_expired(const struct rt6_info *rt) { - struct rt6_info *ort = NULL; - if (rt->rt6i_flags & RTF_EXPIRES) { if (time_after(jiffies, rt->dst.expires)) return true; } else if (rt->dst.from) { - ort = (struct rt6_info *) rt->dst.from; - return (ort->rt6i_flags & RTF_EXPIRES) && - time_after(jiffies, ort->dst.expires); + return rt6_check_expired((struct rt6_info *) rt->dst.from); } return false; } @@@ -452,10 -447,9 +448,9 @@@ static void rt6_probe(struct rt6_info * * Router Reachability Probe MUST be rate-limited * to no more than one per minute. */ - rcu_read_lock(); neigh = rt ? rt->n : NULL; if (!neigh || (neigh->nud_state & NUD_VALID)) - goto out; + return; read_lock_bh(&neigh->lock); if (!(neigh->nud_state & NUD_VALID) && time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { @@@ -471,8 -465,6 +466,6 @@@ } else { read_unlock_bh(&neigh->lock); } - out: - rcu_read_unlock(); } #else static inline void rt6_probe(struct rt6_info *rt) @@@ -499,7 -491,6 +492,6 @@@ static inline int rt6_check_neigh(struc struct neighbour *neigh; int m;
- rcu_read_lock(); neigh = rt->n; if (rt->rt6i_flags & RTF_NONEXTHOP || !(rt->rt6i_flags & RTF_GATEWAY)) @@@ -517,7 -508,6 +509,6 @@@ read_unlock_bh(&neigh->lock); } else m = 0; - rcu_read_unlock(); return m; }
@@@ -966,7 -956,7 +957,7 @@@ struct dst_entry * ip6_route_output(str { int flags = 0;
- fl6->flowi6_iif = net->loopback_dev->ifindex; + fl6->flowi6_iif = LOOPBACK_IFINDEX;
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) flags |= RT6_LOOKUP_F_IFACE; @@@ -1032,13 -1022,6 +1023,13 @@@ static struct dst_entry *ip6_dst_check(
rt = (struct rt6_info *) dst;
+ /* All IPV6 dsts are created with ->obsolete set to the value + * DST_OBSOLETE_FORCE_CHK which forces validation calls down + * into this function always. + */ + if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev))) + return NULL; + if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { if (rt->rt6i_peer_genid != rt6_peer_genid()) { if (!rt6_has_peer(rt)) @@@ -1405,6 -1388,8 +1396,6 @@@ int ip6_route_add(struct fib6_config *c goto out; }
- rt->dst.obsolete = -1; - if (cfg->fc_flags & RTF_EXPIRES) rt6_set_expires(rt, jiffies + clock_t_to_jiffies(cfg->fc_expires)); @@@ -1469,8 -1454,21 +1460,21 @@@ } rt->dst.output = ip6_pkt_discard_out; rt->dst.input = ip6_pkt_discard; - rt->dst.error = -ENETUNREACH; rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; + switch (cfg->fc_type) { + case RTN_BLACKHOLE: + rt->dst.error = -EINVAL; + break; + case RTN_PROHIBIT: + rt->dst.error = -EACCES; + break; + case RTN_THROW: + rt->dst.error = -EAGAIN; + break; + default: + rt->dst.error = -ENETUNREACH; + break; + } goto install_route; }
@@@ -1835,7 -1833,7 +1839,7 @@@ static struct rt6_info *rt6_get_route_i if (!table) return NULL;
- write_lock_bh(&table->tb6_lock); + read_lock_bh(&table->tb6_lock); fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0); if (!fn) goto out; @@@ -1851,7 -1849,7 +1855,7 @@@ break; } out: - write_unlock_bh(&table->tb6_lock); + read_unlock_bh(&table->tb6_lock); return rt; }
@@@ -1867,7 -1865,7 +1871,7 @@@ static struct rt6_info *rt6_add_route_i .fc_dst_len = prefixlen, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | RTF_UP | RTF_PREF(pref), - .fc_nlinfo.pid = 0, + .fc_nlinfo.portid = 0, .fc_nlinfo.nlh = NULL, .fc_nlinfo.nl_net = net, }; @@@ -1894,7 -1892,7 +1898,7 @@@ struct rt6_info *rt6_get_dflt_router(co if (!table) return NULL;
- write_lock_bh(&table->tb6_lock); + read_lock_bh(&table->tb6_lock); for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) { if (dev == rt->dst.dev && ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && @@@ -1903,7 -1901,7 +1907,7 @@@ } if (rt) dst_hold(&rt->dst); - write_unlock_bh(&table->tb6_lock); + read_unlock_bh(&table->tb6_lock); return rt; }
@@@ -1917,7 -1915,7 +1921,7 @@@ struct rt6_info *rt6_add_dflt_router(co .fc_ifindex = dev->ifindex, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | RTF_UP | RTF_EXPIRES | RTF_PREF(pref), - .fc_nlinfo.pid = 0, + .fc_nlinfo.portid = 0, .fc_nlinfo.nlh = NULL, .fc_nlinfo.nl_net = dev_net(dev), }; @@@ -2086,6 -2084,7 +2090,6 @@@ struct rt6_info *addrconf_dst_alloc(str rt->dst.input = ip6_input; rt->dst.output = ip6_output; rt->rt6i_idev = idev; - rt->dst.obsolete = -1;
rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; if (anycast) @@@ -2266,14 -2265,18 +2270,18 @@@ static int rtm_to_fib6_config(struct sk cfg->fc_src_len = rtm->rtm_src_len; cfg->fc_flags = RTF_UP; cfg->fc_protocol = rtm->rtm_protocol; + cfg->fc_type = rtm->rtm_type;
- if (rtm->rtm_type == RTN_UNREACHABLE) + if (rtm->rtm_type == RTN_UNREACHABLE || + rtm->rtm_type == RTN_BLACKHOLE || + rtm->rtm_type == RTN_PROHIBIT || + rtm->rtm_type == RTN_THROW) cfg->fc_flags |= RTF_REJECT;
if (rtm->rtm_type == RTN_LOCAL) cfg->fc_flags |= RTF_LOCAL;
- cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid; + cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid; cfg->fc_nlinfo.nlh = nlh; cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
@@@ -2364,7 -2367,7 +2372,7 @@@ static inline size_t rt6_nlmsg_size(voi static int rt6_fill_node(struct net *net, struct sk_buff *skb, struct rt6_info *rt, struct in6_addr *dst, struct in6_addr *src, - int iif, int type, u32 pid, u32 seq, + int iif, int type, u32 portid, u32 seq, int prefix, int nowait, unsigned int flags) { struct rtmsg *rtm; @@@ -2380,7 -2383,7 +2388,7 @@@ } }
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags); + nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); if (!nlh) return -EMSGSIZE;
@@@ -2396,8 -2399,22 +2404,22 @@@ rtm->rtm_table = table; if (nla_put_u32(skb, RTA_TABLE, table)) goto nla_put_failure; - if (rt->rt6i_flags & RTF_REJECT) - rtm->rtm_type = RTN_UNREACHABLE; + if (rt->rt6i_flags & RTF_REJECT) { + switch (rt->dst.error) { + case -EINVAL: + rtm->rtm_type = RTN_BLACKHOLE; + break; + case -EACCES: + rtm->rtm_type = RTN_PROHIBIT; + break; + case -EAGAIN: + rtm->rtm_type = RTN_THROW; + break; + default: + rtm->rtm_type = RTN_UNREACHABLE; + break; + } + } else if (rt->rt6i_flags & RTF_LOCAL) rtm->rtm_type = RTN_LOCAL; else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) @@@ -2470,15 -2487,11 +2492,11 @@@ if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) goto nla_put_failure;
- rcu_read_lock(); n = rt->n; if (n) { - if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) { - rcu_read_unlock(); + if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) goto nla_put_failure; - } } - rcu_read_unlock();
if (rt->dst.dev && nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) @@@ -2511,7 -2524,7 +2529,7 @@@ int rt6_dump_route(struct rt6_info *rt
return rt6_fill_node(arg->net, arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, - NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq, + NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq, prefix, 0, NLM_F_MULTI); }
@@@ -2591,14 -2604,14 +2609,14 @@@ static int inet6_rtm_getroute(struct sk skb_dst_set(skb, &rt->dst);
err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, - RTM_NEWROUTE, NETLINK_CB(in_skb).pid, + RTM_NEWROUTE, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0, 0, 0); if (err < 0) { kfree_skb(skb); goto errout; }
- err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); + err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); errout: return err; } @@@ -2618,14 -2631,14 +2636,14 @@@ void inet6_rt_notify(int event, struct goto errout;
err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, - event, info->pid, seq, 0, 0, 0); + event, info->portid, seq, 0, 0, 0); if (err < 0) { /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } - rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE, + rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, info->nlh, gfp_any()); return; errout: @@@ -2680,14 -2693,12 +2698,12 @@@ static int rt6_info_route(struct rt6_in #else seq_puts(m, "00000000000000000000000000000000 00 "); #endif - rcu_read_lock(); n = rt->n; if (n) { seq_printf(m, "%pi6", n->primary_key); } else { seq_puts(m, "00000000000000000000000000000000"); } - rcu_read_unlock(); seq_printf(m, " %08x %08x %08x %08x %8s\n", rt->rt6i_metric, atomic_read(&rt->dst.__refcnt), rt->dst.__use, rt->rt6i_flags, diff --combined net/sctp/socket.c index fb5931c,d37d24f..59d16ea --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@@ -70,7 -70,6 +70,7 @@@ #include <linux/init.h> #include <linux/crypto.h> #include <linux/slab.h> +#include <linux/file.h>
#include <net/ip.h> #include <net/icmp.h> @@@ -428,6 -427,7 +428,7 @@@ SCTP_STATIC int sctp_do_bind(struct soc static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk) { + struct net *net = sock_net(asoc->base.sk); int retval = 0;
/* If there is an outstanding ASCONF chunk, queue it for later @@@ -440,7 -440,7 +441,7 @@@
/* Hold the chunk until an ASCONF_ACK is received. */ sctp_chunk_hold(chunk); - retval = sctp_primitive_ASCONF(asoc, chunk); + retval = sctp_primitive_ASCONF(net, asoc, chunk); if (retval) sctp_chunk_free(chunk); else @@@ -516,6 -516,7 +517,7 @@@ static int sctp_send_asconf_add_ip(stru struct sockaddr *addrs, int addrcnt) { + struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; @@@ -530,7 -531,7 +532,7 @@@ int i; int retval = 0;
- if (!sctp_addip_enable) + if (!net->sctp.addip_enable) return retval;
sp = sctp_sk(sk); @@@ -718,6 -719,7 +720,7 @@@ static int sctp_send_asconf_del_ip(stru struct sockaddr *addrs, int addrcnt) { + struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; @@@ -733,7 -735,7 +736,7 @@@ int stored = 0;
chunk = NULL; - if (!sctp_addip_enable) + if (!net->sctp.addip_enable) return retval;
sp = sctp_sk(sk); @@@ -1051,6 -1053,7 +1054,7 @@@ static int __sctp_connect(struct sock* int addrs_size, sctp_assoc_t *assoc_id) { + struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc = NULL; @@@ -1201,7 -1204,7 +1205,7 @@@ goto out_free; }
- err = sctp_primitive_ASSOCIATE(asoc, NULL); + err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) { goto out_free; } @@@ -1459,6 -1462,7 +1463,7 @@@ SCTP_STATIC int sctp_getsockopt_connect */ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) { + struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_association *asoc; struct list_head *pos, *temp; @@@ -1500,9 -1504,9 +1505,9 @@@
chunk = sctp_make_abort_user(asoc, NULL, 0); if (chunk) - sctp_primitive_ABORT(asoc, chunk); + sctp_primitive_ABORT(net, asoc, chunk); } else - sctp_primitive_SHUTDOWN(asoc, NULL); + sctp_primitive_SHUTDOWN(net, asoc, NULL); }
/* On a TCP-style socket, block for at most linger_time if set. */ @@@ -1570,6 -1574,7 +1575,7 @@@ SCTP_STATIC int sctp_msghdr_parse(cons SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t msg_len) { + struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *new_asoc=NULL, *asoc=NULL; @@@ -1715,7 -1720,7 +1721,7 @@@ if (sinfo_flags & SCTP_EOF) { SCTP_DEBUG_PRINTK("Shutting down association: %p\n", asoc); - sctp_primitive_SHUTDOWN(asoc, NULL); + sctp_primitive_SHUTDOWN(net, asoc, NULL); err = 0; goto out_unlock; } @@@ -1728,7 -1733,7 +1734,7 @@@ }
SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); - sctp_primitive_ABORT(asoc, chunk); + sctp_primitive_ABORT(net, asoc, chunk); err = 0; goto out_unlock; } @@@ -1901,7 -1906,7 +1907,7 @@@
/* Auto-connect, if we aren't connected already. */ if (sctp_state(asoc, CLOSED)) { - err = sctp_primitive_ASSOCIATE(asoc, NULL); + err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) goto out_free; SCTP_DEBUG_PRINTK("We associated primitively.\n"); @@@ -1929,7 -1934,7 +1935,7 @@@ * works that way today. Keep it that way or this * breaks. */ - err = sctp_primitive_SEND(asoc, datamsg); + err = sctp_primitive_SEND(net, asoc, datamsg); /* Did the lower layer accept the chunk? */ if (err) sctp_datamsg_free(datamsg); @@@ -2321,7 -2326,9 +2327,9 @@@ static int sctp_apply_peer_addr_params( int error;
if (params->spp_flags & SPP_HB_DEMAND && trans) { - error = sctp_primitive_REQUESTHEARTBEAT (trans->asoc, trans); + struct net *net = sock_net(trans->asoc->base.sk); + + error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); if (error) return error; } @@@ -3034,6 -3041,7 +3042,7 @@@ static int sctp_setsockopt_maxseg(struc static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { + struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_association *asoc = NULL; struct sctp_setpeerprim prim; @@@ -3043,7 -3051,7 +3052,7 @@@
sp = sctp_sk(sk);
- if (!sctp_addip_enable) + if (!net->sctp.addip_enable) return -EPERM;
if (optlen != sizeof(struct sctp_setpeerprim)) @@@ -3280,9 -3288,10 +3289,10 @@@ static int sctp_setsockopt_auth_chunk(s char __user *optval, unsigned int optlen) { + struct net *net = sock_net(sk); struct sctp_authchunk val;
- if (!sctp_auth_enable) + if (!net->sctp.auth_enable) return -EACCES;
if (optlen != sizeof(struct sctp_authchunk)) @@@ -3312,11 -3321,12 +3322,12 @@@ static int sctp_setsockopt_hmac_ident(s char __user *optval, unsigned int optlen) { + struct net *net = sock_net(sk); struct sctp_hmacalgo *hmacs; u32 idents; int err;
- if (!sctp_auth_enable) + if (!net->sctp.auth_enable) return -EACCES;
if (optlen < sizeof(struct sctp_hmacalgo)) @@@ -3349,11 -3359,12 +3360,12 @@@ static int sctp_setsockopt_auth_key(str char __user *optval, unsigned int optlen) { + struct net *net = sock_net(sk); struct sctp_authkey *authkey; struct sctp_association *asoc; int ret;
- if (!sctp_auth_enable) + if (!net->sctp.auth_enable) return -EACCES;
if (optlen <= sizeof(struct sctp_authkey)) @@@ -3390,10 -3401,11 +3402,11 @@@ static int sctp_setsockopt_active_key(s char __user *optval, unsigned int optlen) { + struct net *net = sock_net(sk); struct sctp_authkeyid val; struct sctp_association *asoc;
- if (!sctp_auth_enable) + if (!net->sctp.auth_enable) return -EACCES;
if (optlen != sizeof(struct sctp_authkeyid)) @@@ -3418,10 -3430,11 +3431,11 @@@ static int sctp_setsockopt_del_key(stru char __user *optval, unsigned int optlen) { + struct net *net = sock_net(sk); struct sctp_authkeyid val; struct sctp_association *asoc;
- if (!sctp_auth_enable) + if (!net->sctp.auth_enable) return -EACCES;
if (optlen != sizeof(struct sctp_authkeyid)) @@@ -3472,7 -3485,7 +3486,7 @@@ static int sctp_setsockopt_auto_asconf( sp->do_auto_asconf = 0; } else if (val && !sp->do_auto_asconf) { list_add_tail(&sp->auto_asconf_list, - &sctp_auto_asconf_splist); + &sock_net(sk)->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; } return 0; @@@ -3844,6 -3857,7 +3858,7 @@@ out */ SCTP_STATIC int sctp_init_sock(struct sock *sk) { + struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_sock *sp;
@@@ -3873,7 -3887,7 +3888,7 @@@ sp->default_timetolive = 0;
sp->default_rcv_context = 0; - sp->max_burst = sctp_max_burst; + sp->max_burst = net->sctp.max_burst;
/* Initialize default setup parameters. These parameters * can be modified with the SCTP_INITMSG socket option or @@@ -3881,24 -3895,24 +3896,24 @@@ */ sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; sp->initmsg.sinit_max_instreams = sctp_max_instreams; - sp->initmsg.sinit_max_attempts = sctp_max_retrans_init; - sp->initmsg.sinit_max_init_timeo = sctp_rto_max; + sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; + sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
/* Initialize default RTO related parameters. These parameters can * be modified for with the SCTP_RTOINFO socket option. */ - sp->rtoinfo.srto_initial = sctp_rto_initial; - sp->rtoinfo.srto_max = sctp_rto_max; - sp->rtoinfo.srto_min = sctp_rto_min; + sp->rtoinfo.srto_initial = net->sctp.rto_initial; + sp->rtoinfo.srto_max = net->sctp.rto_max; + sp->rtoinfo.srto_min = net->sctp.rto_min;
/* Initialize default association related parameters. These parameters * can be modified with the SCTP_ASSOCINFO socket option. */ - sp->assocparams.sasoc_asocmaxrxt = sctp_max_retrans_association; + sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; sp->assocparams.sasoc_number_peer_destinations = 0; sp->assocparams.sasoc_peer_rwnd = 0; sp->assocparams.sasoc_local_rwnd = 0; - sp->assocparams.sasoc_cookie_life = sctp_valid_cookie_life; + sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
/* Initialize default event subscriptions. By default, all the * options are off. @@@ -3908,10 -3922,10 +3923,10 @@@ /* Default Peer Address Parameters. These defaults can * be modified via SCTP_PEER_ADDR_PARAMS */ - sp->hbinterval = sctp_hb_interval; - sp->pathmaxrxt = sctp_max_retrans_path; + sp->hbinterval = net->sctp.hb_interval; + sp->pathmaxrxt = net->sctp.max_retrans_path; sp->pathmtu = 0; // allow default discovery - sp->sackdelay = sctp_sack_timeout; + sp->sackdelay = net->sctp.sack_timeout; sp->sackfreq = 2; sp->param_flags = SPP_HB_ENABLE | SPP_PMTUD_ENABLE | @@@ -3962,10 -3976,10 +3977,10 @@@
local_bh_disable(); percpu_counter_inc(&sctp_sockets_allocated); - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); - if (sctp_default_auto_asconf) { + sock_prot_inuse_add(net, sk->sk_prot, 1); + if (net->sctp.default_auto_asconf) { list_add_tail(&sp->auto_asconf_list, - &sctp_auto_asconf_splist); + &net->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; } else sp->do_auto_asconf = 0; @@@ -4012,6 -4026,7 +4027,7 @@@ SCTP_STATIC void sctp_destroy_sock(stru */ SCTP_STATIC void sctp_shutdown(struct sock *sk, int how) { + struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_association *asoc;
@@@ -4023,7 -4038,7 +4039,7 @@@ if (!list_empty(&ep->asocs)) { asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); - sctp_primitive_SHUTDOWN(asoc, NULL); + sctp_primitive_SHUTDOWN(net, asoc, NULL); } } } @@@ -4277,7 -4292,6 +4293,7 @@@ static int sctp_getsockopt_peeloff(stru { sctp_peeloff_arg_t peeloff; struct socket *newsock; + struct file *newfile; int retval = 0;
if (len < sizeof(sctp_peeloff_arg_t)) @@@ -4291,35 -4305,22 +4307,35 @@@ goto out;
/* Map the socket to an unused fd that can be returned to the user. */ - retval = sock_map_fd(newsock, 0); + retval = get_unused_fd(); if (retval < 0) { sock_release(newsock); goto out; }
- newfile = sock_alloc_file(newsock, 0); ++ newfile = sock_alloc_file(newsock, 0, NULL); + if (unlikely(IS_ERR(newfile))) { + put_unused_fd(retval); + sock_release(newsock); + return PTR_ERR(newfile); + } + SCTP_DEBUG_PRINTK("%s: sk: %p newsk: %p sd: %d\n", __func__, sk, newsock->sk, retval);
/* Return the fd mapped to the new socket. */ + if (put_user(len, optlen)) { + fput(newfile); + put_unused_fd(retval); + return -EFAULT; + } peeloff.sd = retval; - if (put_user(len, optlen)) + if (copy_to_user(optval, &peeloff, len)) { + fput(newfile); + put_unused_fd(retval); return -EFAULT; - if (copy_to_user(optval, &peeloff, len)) - retval = -EFAULT; - + } + fd_install(retval, newfile); out: return retval; } @@@ -4668,9 -4669,10 +4684,10 @@@ static int sctp_copy_laddrs(struct soc union sctp_addr temp; int cnt = 0; int addrlen; + struct net *net = sock_net(sk);
rcu_read_lock(); - list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) { + list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { if (!addr->valid) continue;
@@@ -5314,12 -5316,13 +5331,13 @@@ static int sctp_getsockopt_maxburst(str static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, char __user *optval, int __user *optlen) { + struct net *net = sock_net(sk); struct sctp_hmacalgo __user *p = (void __user *)optval; struct sctp_hmac_algo_param *hmacs; __u16 data_len = 0; u32 num_idents;
- if (!sctp_auth_enable) + if (!net->sctp.auth_enable) return -EACCES;
hmacs = sctp_sk(sk)->ep->auth_hmacs_list; @@@ -5343,10 -5346,11 +5361,11 @@@ static int sctp_getsockopt_active_key(struct sock *sk, int len, char __user *optval, int __user *optlen) { + struct net *net = sock_net(sk); struct sctp_authkeyid val; struct sctp_association *asoc;
- if (!sctp_auth_enable) + if (!net->sctp.auth_enable) return -EACCES;
if (len < sizeof(struct sctp_authkeyid)) @@@ -5375,6 -5379,7 +5394,7 @@@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { + struct net *net = sock_net(sk); struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; @@@ -5382,7 -5387,7 +5402,7 @@@ u32 num_chunks = 0; char __user *to;
- if (!sctp_auth_enable) + if (!net->sctp.auth_enable) return -EACCES;
if (len < sizeof(struct sctp_authchunks)) @@@ -5418,6 -5423,7 +5438,7 @@@ num static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { + struct net *net = sock_net(sk); struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; @@@ -5425,7 -5431,7 +5446,7 @@@ u32 num_chunks = 0; char __user *to;
- if (!sctp_auth_enable) + if (!net->sctp.auth_enable) return -EACCES;
if (len < sizeof(struct sctp_authchunks)) @@@ -5784,7 -5790,7 +5805,7 @@@ static void sctp_unhash(struct sock *sk * a fastreuse flag (FIXME: NPI ipg). */ static struct sctp_bind_bucket *sctp_bucket_create( - struct sctp_bind_hashbucket *head, unsigned short snum); + struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) { @@@ -5814,11 -5820,12 +5835,12 @@@ rover = low; if (inet_is_reserved_local_port(rover)) continue; - index = sctp_phashfn(rover); + index = sctp_phashfn(sock_net(sk), rover); head = &sctp_port_hashtable[index]; sctp_spin_lock(&head->lock); sctp_for_each_hentry(pp, node, &head->chain) - if (pp->port == rover) + if ((pp->port == rover) && + net_eq(sock_net(sk), pp->net)) goto next; break; next: @@@ -5842,10 -5849,10 +5864,10 @@@ * to the port number (snum) - we detect that with the * port iterator, pp being NULL. */ - head = &sctp_port_hashtable[sctp_phashfn(snum)]; + head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; sctp_spin_lock(&head->lock); sctp_for_each_hentry(pp, node, &head->chain) { - if (pp->port == snum) + if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) goto pp_found; } } @@@ -5896,7 -5903,7 +5918,7 @@@ pp_found pp_not_found: /* If there was a hash table miss, create a new port. */ ret = 1; - if (!pp && !(pp = sctp_bucket_create(head, snum))) + if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) goto fail_unlock;
/* In either case (hit or miss), make sure fastreuse is 1 only @@@ -6128,7 -6135,7 +6150,7 @@@ unsigned int sctp_poll(struct file *fil ********************************************************************/
static struct sctp_bind_bucket *sctp_bucket_create( - struct sctp_bind_hashbucket *head, unsigned short snum) + struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) { struct sctp_bind_bucket *pp;
@@@ -6138,6 -6145,7 +6160,7 @@@ pp->port = snum; pp->fastreuse = 0; INIT_HLIST_HEAD(&pp->owner); + pp->net = net; hlist_add_head(&pp->node, &head->chain); } return pp; @@@ -6157,7 -6165,8 +6180,8 @@@ static void sctp_bucket_destroy(struct static inline void __sctp_put_port(struct sock *sk) { struct sctp_bind_hashbucket *head = - &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->inet_num)]; + &sctp_port_hashtable[sctp_phashfn(sock_net(sk), + inet_sk(sk)->inet_num)]; struct sctp_bind_bucket *pp;
sctp_spin_lock(&head->lock); @@@ -6824,7 -6833,8 +6848,8 @@@ static void sctp_sock_migrate(struct so newsp->hmac = NULL;
/* Hook this new socket in to the bind_hash list. */ - head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->inet_num)]; + head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), + inet_sk(oldsk)->inet_num)]; sctp_local_bh_disable(); sctp_spin_lock(&head->lock); pp = sctp_sk(oldsk)->bind_hash; diff --combined net/socket.c index 38a1431,c641549..06c4dbe --- a/net/socket.c +++ b/net/socket.c @@@ -88,6 -88,7 +88,7 @@@ #include <linux/nsproxy.h> #include <linux/magic.h> #include <linux/slab.h> + #include <linux/xattr.h>
#include <asm/uaccess.h> #include <asm/unistd.h> @@@ -346,15 -347,30 +347,23 @@@ static struct file_system_type sock_fs_ * but we take care of internal coherence yet. */
- struct file *sock_alloc_file(struct socket *sock, int flags) -static int sock_alloc_file(struct socket *sock, struct file **f, int flags, - const char *dname) ++struct file *sock_alloc_file(struct socket *sock, int flags, ++ const char *dname) { struct qstr name = { .name = "" }; struct path path; struct file *file; - int fd; - - fd = get_unused_fd_flags(flags); - if (unlikely(fd < 0)) - return fd;
+ if (dname) { + name.name = dname; + name.len = strlen(name.name); + } else if (sock->sk) { + name.name = sock->sk->sk_prot_creator->name; + name.len = strlen(name.name); + } path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); - if (unlikely(!path.dentry)) { - put_unused_fd(fd); - return -ENOMEM; - } + if (unlikely(!path.dentry)) + return ERR_PTR(-ENOMEM); path.mnt = mntget(sock_mnt);
d_instantiate(path.dentry, SOCK_INODE(sock)); @@@ -366,33 -382,30 +375,33 @@@ /* drop dentry, keep inode */ ihold(path.dentry->d_inode); path_put(&path); - put_unused_fd(fd); - return -ENFILE; + return ERR_PTR(-ENFILE); }
sock->file = file; file->f_flags = O_RDWR | (flags & O_NONBLOCK); file->f_pos = 0; file->private_data = sock; - - *f = file; - return fd; + return file; } +EXPORT_SYMBOL(sock_alloc_file);
-int sock_map_fd(struct socket *sock, int flags) +static int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; - int fd = sock_alloc_file(sock, &newfile, flags, NULL); + int fd = get_unused_fd_flags(flags); + if (unlikely(fd < 0)) + return fd;
- newfile = sock_alloc_file(sock, flags); - if (likely(fd >= 0)) ++ newfile = sock_alloc_file(sock, flags, NULL); + if (likely(!IS_ERR(newfile))) { fd_install(fd, newfile); + return fd; + }
- return fd; + put_unused_fd(fd); + return PTR_ERR(newfile); } -EXPORT_SYMBOL(sock_map_fd);
struct socket *sock_from_file(struct file *file, int *err) { @@@ -451,6 -464,68 +460,68 @@@ static struct socket *sockfd_lookup_lig return NULL; }
+ #define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname" + #define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX) + #define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1) + static ssize_t sockfs_getxattr(struct dentry *dentry, + const char *name, void *value, size_t size) + { + const char *proto_name; + size_t proto_size; + int error; + + error = -ENODATA; + if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) { + proto_name = dentry->d_name.name; + proto_size = strlen(proto_name); + + if (value) { + error = -ERANGE; + if (proto_size + 1 > size) + goto out; + + strncpy(value, proto_name, proto_size + 1); + } + error = proto_size + 1; + } + + out: + return error; + } + + static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer, + size_t size) + { + ssize_t len; + ssize_t used = 0; + + len = security_inode_listsecurity(dentry->d_inode, buffer, size); + if (len < 0) + return len; + used += len; + if (buffer) { + if (size < used) + return -ERANGE; + buffer += len; + } + + len = (XATTR_NAME_SOCKPROTONAME_LEN + 1); + used += len; + if (buffer) { + if (size < used) + return -ERANGE; + memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len); + buffer += len; + } + + return used; + } + + static const struct inode_operations sockfs_inode_ops = { + .getxattr = sockfs_getxattr, + .listxattr = sockfs_listxattr, + }; + /** * sock_alloc - allocate a socket * @@@ -475,6 -550,7 +546,7 @@@ static struct socket *sock_alloc(void inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); + inode->i_op = &sockfs_inode_ops;
this_cpu_add(sockets_in_use, 1); return sock; @@@ -1390,32 -1466,17 +1462,32 @@@ SYSCALL_DEFINE4(socketpair, int, family if (err < 0) goto out_release_both;
- fd1 = sock_alloc_file(sock1, &newfile1, flags, NULL); + fd1 = get_unused_fd_flags(flags); if (unlikely(fd1 < 0)) { err = fd1; goto out_release_both; } - - fd2 = sock_alloc_file(sock2, &newfile2, flags, NULL); + fd2 = get_unused_fd_flags(flags); if (unlikely(fd2 < 0)) { err = fd2; + put_unused_fd(fd1); + goto out_release_both; + } + - newfile1 = sock_alloc_file(sock1, flags); ++ newfile1 = sock_alloc_file(sock1, flags, NULL); + if (unlikely(IS_ERR(newfile1))) { + err = PTR_ERR(newfile1); + put_unused_fd(fd1); + put_unused_fd(fd2); + goto out_release_both; + } + - newfile2 = sock_alloc_file(sock2, flags); ++ newfile2 = sock_alloc_file(sock2, flags, NULL); + if (IS_ERR(newfile2)) { + err = PTR_ERR(newfile2); fput(newfile1); put_unused_fd(fd1); + put_unused_fd(fd2); sock_release(sock2); goto out; } @@@ -1547,19 -1608,13 +1619,20 @@@ SYSCALL_DEFINE4(accept4, int, fd, struc */ __module_get(newsock->ops->owner);
- newfd = sock_alloc_file(newsock, &newfile, flags, - sock->sk->sk_prot_creator->name); + newfd = get_unused_fd_flags(flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } - newfile = sock_alloc_file(newsock, flags); ++ newfile = sock_alloc_file(newsock, flags, ++ sock->sk->sk_prot_creator->name); + if (unlikely(IS_ERR(newfile))) { + err = PTR_ERR(newfile); + put_unused_fd(newfd); + sock_release(newsock); + goto out_put; + }
err = security_socket_accept(sock, newsock); if (err) diff --combined net/wireless/reg.c index 72d170c,1ad04e5..4de18ae --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@@ -350,9 -350,6 +350,9 @@@ static void reg_regdb_search(struct wor struct reg_regdb_search_request *request; const struct ieee80211_regdomain *curdom, *regdom; int i, r; + bool set_reg = false; + + mutex_lock(&cfg80211_mutex);
mutex_lock(®_regdb_search_mutex); while (!list_empty(®_regdb_search_list)) { @@@ -368,7 -365,9 +368,7 @@@ r = reg_copy_regd(®dom, curdom); if (r) break; - mutex_lock(&cfg80211_mutex); - set_regdom(regdom); - mutex_unlock(&cfg80211_mutex); + set_reg = true; break; } } @@@ -376,11 -375,6 +376,11 @@@ kfree(request); } mutex_unlock(®_regdb_search_mutex); + + if (set_reg) + set_regdom(regdom); + + mutex_unlock(&cfg80211_mutex); }
static DECLARE_WORK(reg_regdb_work, reg_regdb_search); @@@ -1955,8 -1949,7 +1955,7 @@@ static void restore_regulatory_settings if (reg_request->initiator != NL80211_REGDOM_SET_BY_USER) continue; - list_del(®_request->list); - list_add_tail(®_request->list, &tmp_reg_req_list); + list_move_tail(®_request->list, &tmp_reg_req_list); } } spin_unlock(®_requests_lock); @@@ -2015,8 -2008,7 +2014,7 @@@ "into the queue\n", reg_request->alpha2[0], reg_request->alpha2[1]); - list_del(®_request->list); - list_add_tail(®_request->list, ®_requests_list); + list_move_tail(®_request->list, ®_requests_list); } spin_unlock(®_requests_lock);
diff --combined net/xfrm/xfrm_policy.c index 387848e,741a32a..f4e0a6a --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@@ -42,13 -42,12 +42,12 @@@ static DEFINE_SPINLOCK(xfrm_policy_sk_b static struct dst_entry *xfrm_policy_sk_bundles; static DEFINE_RWLOCK(xfrm_policy_lock);
- static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); - static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; + static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); + static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] + __read_mostly;
static struct kmem_cache *xfrm_dst_cache __read_mostly;
- static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); - static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); static void xfrm_init_pmtu(struct dst_entry *dst); static int stale_bundle(struct dst_entry *dst); static int xfrm_bundle_ok(struct xfrm_dst *xdst); @@@ -95,6 -94,24 +94,24 @@@ bool xfrm_selector_match(const struct x return false; }
+ static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) + { + struct xfrm_policy_afinfo *afinfo; + + if (unlikely(family >= NPROTO)) + return NULL; + rcu_read_lock(); + afinfo = rcu_dereference(xfrm_policy_afinfo[family]); + if (unlikely(!afinfo)) + rcu_read_unlock(); + return afinfo; + } + + static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) + { + rcu_read_unlock(); + } + static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, const xfrm_address_t *saddr, const xfrm_address_t *daddr, @@@ -585,7 -602,6 +602,7 @@@ int xfrm_policy_insert(int dir, struct xfrm_pol_hold(policy); net->xfrm.policy_count[dir]++; atomic_inc(&flow_cache_genid); + rt_genid_bump(net); if (delpol) __xfrm_policy_unlink(delpol, dir); policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); @@@ -1764,7 -1780,7 +1781,7 @@@ static struct dst_entry *make_blackhole
if (!afinfo) { dst_release(dst_orig); - ret = ERR_PTR(-EINVAL); + return ERR_PTR(-EINVAL); } else { ret = afinfo->blackhole_route(net, dst_orig); } @@@ -2421,7 -2437,7 +2438,7 @@@ int xfrm_policy_register_afinfo(struct return -EINVAL; if (unlikely(afinfo->family >= NPROTO)) return -EAFNOSUPPORT; - write_lock_bh(&xfrm_policy_afinfo_lock); + spin_lock(&xfrm_policy_afinfo_lock); if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) err = -ENOBUFS; else { @@@ -2442,9 -2458,9 +2459,9 @@@ dst_ops->neigh_lookup = xfrm_neigh_lookup; if (likely(afinfo->garbage_collect == NULL)) afinfo->garbage_collect = xfrm_garbage_collect_deferred; - xfrm_policy_afinfo[afinfo->family] = afinfo; + rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo); } - write_unlock_bh(&xfrm_policy_afinfo_lock); + spin_unlock(&xfrm_policy_afinfo_lock);
rtnl_lock(); for_each_net(net) { @@@ -2477,21 -2493,26 +2494,26 @@@ int xfrm_policy_unregister_afinfo(struc return -EINVAL; if (unlikely(afinfo->family >= NPROTO)) return -EAFNOSUPPORT; - write_lock_bh(&xfrm_policy_afinfo_lock); + spin_lock(&xfrm_policy_afinfo_lock); if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) err = -EINVAL; - else { - struct dst_ops *dst_ops = afinfo->dst_ops; - xfrm_policy_afinfo[afinfo->family] = NULL; - dst_ops->kmem_cachep = NULL; - dst_ops->check = NULL; - dst_ops->negative_advice = NULL; - dst_ops->link_failure = NULL; - afinfo->garbage_collect = NULL; - } + else + RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family], + NULL); + } + spin_unlock(&xfrm_policy_afinfo_lock); + if (!err) { + struct dst_ops *dst_ops = afinfo->dst_ops; + + synchronize_rcu(); + + dst_ops->kmem_cachep = NULL; + dst_ops->check = NULL; + dst_ops->negative_advice = NULL; + dst_ops->link_failure = NULL; + afinfo->garbage_collect = NULL; } - write_unlock_bh(&xfrm_policy_afinfo_lock); return err; } EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); @@@ -2500,33 -2521,16 +2522,16 @@@ static void __net_init xfrm_dst_ops_ini { struct xfrm_policy_afinfo *afinfo;
- read_lock_bh(&xfrm_policy_afinfo_lock); - afinfo = xfrm_policy_afinfo[AF_INET]; + rcu_read_lock(); + afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]); if (afinfo) net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; #if IS_ENABLED(CONFIG_IPV6) - afinfo = xfrm_policy_afinfo[AF_INET6]; + afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]); if (afinfo) net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; #endif - read_unlock_bh(&xfrm_policy_afinfo_lock); - } - - static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) - { - struct xfrm_policy_afinfo *afinfo; - if (unlikely(family >= NPROTO)) - return NULL; - read_lock(&xfrm_policy_afinfo_lock); - afinfo = xfrm_policy_afinfo[family]; - if (unlikely(!afinfo)) - read_unlock(&xfrm_policy_afinfo_lock); - return afinfo; - } - - static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) - { - read_unlock(&xfrm_policy_afinfo_lock); + rcu_read_unlock(); }
static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) diff --combined net/xfrm/xfrm_user.c index 289f4bf,5d6eb4b..94a2a1f --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@@ -123,21 -123,9 +123,21 @@@ static inline int verify_replay(struct struct nlattr **attrs) { struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; + struct xfrm_replay_state_esn *rs;
- if ((p->flags & XFRM_STATE_ESN) && !rt) - return -EINVAL; + if (p->flags & XFRM_STATE_ESN) { + if (!rt) + return -EINVAL; + + rs = nla_data(rt); + + if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) + return -EINVAL; + + if (nla_len(rt) < xfrm_replay_state_esn_len(rs) && + nla_len(rt) != sizeof(*rs)) + return -EINVAL; + }
if (!rt) return 0; @@@ -382,15 -370,14 +382,15 @@@ static inline int xfrm_replay_verify_le struct nlattr *rp) { struct xfrm_replay_state_esn *up; + int ulen;
if (!replay_esn || !rp) return 0;
up = nla_data(rp); + ulen = xfrm_replay_state_esn_len(up);
- if (xfrm_replay_state_esn_len(replay_esn) != - xfrm_replay_state_esn_len(up)) + if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) return -EINVAL;
return 0; @@@ -401,28 -388,22 +401,28 @@@ static int xfrm_alloc_replay_state_esn( struct nlattr *rta) { struct xfrm_replay_state_esn *p, *pp, *up; + int klen, ulen;
if (!rta) return 0;
up = nla_data(rta); + klen = xfrm_replay_state_esn_len(up); + ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
- p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); + p = kzalloc(klen, GFP_KERNEL); if (!p) return -ENOMEM;
- pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); + pp = kzalloc(klen, GFP_KERNEL); if (!pp) { kfree(p); return -ENOMEM; }
+ memcpy(p, up, ulen); + memcpy(pp, up, ulen); + *replay_esn = p; *preplay_esn = pp;
@@@ -461,11 -442,10 +461,11 @@@ static void copy_from_user_state(struc * somehow made shareable and move it to xfrm_state.c - JHS * */ -static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs) +static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, + int update_esn) { struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; - struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; + struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL; struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; @@@ -575,7 -555,7 +575,7 @@@ static struct xfrm_state *xfrm_state_co goto error;
/* override default values from above */ - xfrm_update_ae_params(x, attrs); + xfrm_update_ae_params(x, attrs, 0);
return x;
@@@ -623,7 -603,7 +623,7 @@@ static int xfrm_add_sa(struct sk_buff * }
c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; c.event = nlh->nlmsg_type;
km_state_notify(x, &c); @@@ -696,7 -676,7 +696,7 @@@ static int xfrm_del_sa(struct sk_buff * goto out;
c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; c.event = nlh->nlmsg_type; km_state_notify(x, &c);
@@@ -709,7 -689,6 +709,7 @@@ out
static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) { + memset(p, 0, sizeof(*p)); memcpy(&p->id, &x->id, sizeof(p->id)); memcpy(&p->sel, &x->sel, sizeof(p->sel)); memcpy(&p->lft, &x->lft, sizeof(p->lft)); @@@ -763,7 -742,7 +763,7 @@@ static int copy_to_user_auth(struct xfr return -EMSGSIZE;
algo = nla_data(nla); - strcpy(algo->alg_name, auth->alg_name); + strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name)); memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); algo->alg_key_len = auth->alg_key_len;
@@@ -847,7 -826,7 +847,7 @@@ static int dump_one_state(struct xfrm_s struct nlmsghdr *nlh; int err;
- nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, + nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); if (nlh == NULL) return -EMSGSIZE; @@@ -899,7 -878,6 +899,7 @@@ static struct sk_buff *xfrm_state_netli { struct xfrm_dump_info info; struct sk_buff *skb; + int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) @@@ -910,10 -888,9 +910,10 @@@ info.nlmsg_seq = seq; info.nlmsg_flags = 0;
- if (dump_one_state(x, 0, &info)) { + err = dump_one_state(x, 0, &info); + if (err) { kfree_skb(skb); - return NULL; + return ERR_PTR(err); }
return skb; @@@ -927,7 -904,7 +927,7 @@@ static inline size_t xfrm_spdinfo_msgsi }
static int build_spdinfo(struct sk_buff *skb, struct net *net, - u32 pid, u32 seq, u32 flags) + u32 portid, u32 seq, u32 flags) { struct xfrmk_spdinfo si; struct xfrmu_spdinfo spc; @@@ -936,7 -913,7 +936,7 @@@ int err; u32 *f;
- nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); + nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); if (nlh == NULL) /* shouldn't really happen ... */ return -EMSGSIZE;
@@@ -969,17 -946,17 +969,17 @@@ static int xfrm_get_spdinfo(struct sk_b struct net *net = sock_net(skb->sk); struct sk_buff *r_skb; u32 *flags = nlmsg_data(nlh); - u32 spid = NETLINK_CB(skb).pid; + u32 sportid = NETLINK_CB(skb).portid; u32 seq = nlh->nlmsg_seq;
r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); if (r_skb == NULL) return -ENOMEM;
- if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0) + if (build_spdinfo(r_skb, net, sportid, seq, *flags) < 0) BUG();
- return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); + return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid); }
static inline size_t xfrm_sadinfo_msgsize(void) @@@ -990,7 -967,7 +990,7 @@@ }
static int build_sadinfo(struct sk_buff *skb, struct net *net, - u32 pid, u32 seq, u32 flags) + u32 portid, u32 seq, u32 flags) { struct xfrmk_sadinfo si; struct xfrmu_sadhinfo sh; @@@ -998,7 -975,7 +998,7 @@@ int err; u32 *f;
- nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); + nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); if (nlh == NULL) /* shouldn't really happen ... */ return -EMSGSIZE;
@@@ -1026,17 -1003,17 +1026,17 @@@ static int xfrm_get_sadinfo(struct sk_b struct net *net = sock_net(skb->sk); struct sk_buff *r_skb; u32 *flags = nlmsg_data(nlh); - u32 spid = NETLINK_CB(skb).pid; + u32 sportid = NETLINK_CB(skb).portid; u32 seq = nlh->nlmsg_seq;
r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); if (r_skb == NULL) return -ENOMEM;
- if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0) + if (build_sadinfo(r_skb, net, sportid, seq, *flags) < 0) BUG();
- return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); + return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid); }
static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, @@@ -1056,7 -1033,7 +1056,7 @@@ if (IS_ERR(resp_skb)) { err = PTR_ERR(resp_skb); } else { - err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); + err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid); } xfrm_state_put(x); out_noput: @@@ -1137,7 -1114,7 +1137,7 @@@ static int xfrm_alloc_userspi(struct sk goto out; }
- err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); + err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
out: xfrm_state_put(x); @@@ -1340,7 -1317,6 +1340,7 @@@ static void copy_from_user_policy(struc
static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) { + memset(p, 0, sizeof(*p)); memcpy(&p->sel, &xp->selector, sizeof(p->sel)); memcpy(&p->lft, &xp->lft, sizeof(p->lft)); memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); @@@ -1425,7 -1401,7 +1425,7 @@@ static int xfrm_add_policy(struct sk_bu
c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; km_policy_notify(xp, p->dir, &c);
xfrm_pol_put(xp); @@@ -1445,7 -1421,6 +1445,7 @@@ static int copy_to_user_tmpl(struct xfr struct xfrm_user_tmpl *up = &vec[i]; struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
+ memset(up, 0, sizeof(*up)); memcpy(&up->id, &kp->id, sizeof(up->id)); up->family = kp->encap_family; memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); @@@ -1511,7 -1486,7 +1511,7 @@@ static int dump_one_policy(struct xfrm_ struct nlmsghdr *nlh; int err;
- nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, + nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); if (nlh == NULL) return -EMSGSIZE; @@@ -1571,7 -1546,6 +1571,7 @@@ static struct sk_buff *xfrm_policy_netl { struct xfrm_dump_info info; struct sk_buff *skb; + int err;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) @@@ -1582,10 -1556,9 +1582,10 @@@ info.nlmsg_seq = seq; info.nlmsg_flags = 0;
- if (dump_one_policy(xp, dir, 0, &info) < 0) { + err = dump_one_policy(xp, dir, 0, &info); + if (err) { kfree_skb(skb); - return NULL; + return ERR_PTR(err); }
return skb; @@@ -1648,7 -1621,7 +1648,7 @@@ static int xfrm_get_policy(struct sk_bu err = PTR_ERR(resp_skb); } else { err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, - NETLINK_CB(skb).pid); + NETLINK_CB(skb).portid); } } else { uid_t loginuid = audit_get_loginuid(current); @@@ -1665,7 -1638,7 +1665,7 @@@ c.data.byid = p->index; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; km_policy_notify(xp, p->dir, &c); }
@@@ -1695,7 -1668,7 +1695,7 @@@ static int xfrm_flush_sa(struct sk_buf c.data.proto = p->proto; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; c.net = net; km_state_notify(NULL, &c);
@@@ -1722,7 -1695,7 +1722,7 @@@ static int build_aevent(struct sk_buff struct nlmsghdr *nlh; int err;
- nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); + nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); if (nlh == NULL) return -EMSGSIZE;
@@@ -1804,11 -1777,11 +1804,11 @@@ static int xfrm_get_ae(struct sk_buff * spin_lock_bh(&x->lock); c.data.aevent = p->flags; c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid;
if (build_aevent(r_skb, x, &c) < 0) BUG(); - err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid); + err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid); spin_unlock_bh(&x->lock); xfrm_state_put(x); return err; @@@ -1849,12 -1822,12 +1849,12 @@@ static int xfrm_new_ae(struct sk_buff * goto out;
spin_lock_bh(&x->lock); - xfrm_update_ae_params(x, attrs); + xfrm_update_ae_params(x, attrs, 1); spin_unlock_bh(&x->lock);
c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; c.data.aevent = XFRM_AE_CU; km_state_notify(x, &c); err = 0; @@@ -1889,7 -1862,7 +1889,7 @@@ static int xfrm_flush_policy(struct sk_ c.data.type = type; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; - c.pid = nlh->nlmsg_pid; + c.portid = nlh->nlmsg_pid; c.net = net; km_policy_notify(NULL, 0, &c); return 0; @@@ -1957,7 -1930,7 +1957,7 @@@ static int xfrm_add_pol_expire(struct s // reset the timers here? WARN(1, "Dont know what to do with soft policy expire\n"); } - km_policy_expired(xp, p->dir, up->hard, current->pid); + km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
out: xfrm_pol_put(xp); @@@ -1985,7 -1958,7 +1985,7 @@@ static int xfrm_add_sa_expire(struct sk err = -EINVAL; if (x->km.state != XFRM_STATE_VALID) goto out; - km_state_expired(x, ue->hard, current->pid); + km_state_expired(x, ue->hard, nlh->nlmsg_pid);
if (ue->hard) { uid_t loginuid = audit_get_loginuid(current); @@@ -2397,7 -2370,7 +2397,7 @@@ static int build_expire(struct sk_buff struct nlmsghdr *nlh; int err;
- nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); + nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); if (nlh == NULL) return -EMSGSIZE;
@@@ -2456,7 -2429,7 +2456,7 @@@ static int xfrm_notify_sa_flush(const s if (skb == NULL) return -ENOMEM;
- nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); + nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); if (nlh == NULL) { kfree_skb(skb); return -EMSGSIZE; @@@ -2524,7 -2497,7 +2524,7 @@@ static int xfrm_notify_sa(struct xfrm_s if (skb == NULL) return -ENOMEM;
- nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); + nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); err = -EMSGSIZE; if (nlh == NULL) goto out_free_skb; @@@ -2594,8 -2567,7 +2594,7 @@@ static inline size_t xfrm_acquire_msgsi }
static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, - struct xfrm_tmpl *xt, struct xfrm_policy *xp, - int dir) + struct xfrm_tmpl *xt, struct xfrm_policy *xp) { __u32 seq = xfrm_get_acqseq(); struct xfrm_user_acquire *ua; @@@ -2610,7 -2582,7 +2609,7 @@@ memcpy(&ua->id, &x->id, sizeof(ua->id)); memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); - copy_to_user_policy(xp, &ua->policy, dir); + copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT); ua->aalgos = xt->aalgos; ua->ealgos = xt->ealgos; ua->calgos = xt->calgos; @@@ -2632,7 -2604,7 +2631,7 @@@ }
static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, - struct xfrm_policy *xp, int dir) + struct xfrm_policy *xp) { struct net *net = xs_net(x); struct sk_buff *skb; @@@ -2641,7 -2613,7 +2640,7 @@@ if (skb == NULL) return -ENOMEM;
- if (build_acquire(skb, x, xt, xp, dir) < 0) + if (build_acquire(skb, x, xt, xp) < 0) BUG();
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); @@@ -2724,7 -2696,7 +2723,7 @@@ static int build_polexpire(struct sk_bu struct nlmsghdr *nlh; int err;
- nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); + nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); if (nlh == NULL) return -EMSGSIZE;
@@@ -2784,7 -2756,7 +2783,7 @@@ static int xfrm_notify_policy(struct xf if (skb == NULL) return -ENOMEM;
- nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); + nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); err = -EMSGSIZE; if (nlh == NULL) goto out_free_skb; @@@ -2838,7 -2810,7 +2837,7 @@@ static int xfrm_notify_policy_flush(con if (skb == NULL) return -ENOMEM;
- nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); + nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); err = -EMSGSIZE; if (nlh == NULL) goto out_free_skb; @@@ -2991,7 -2963,7 +2990,7 @@@ static int __net_init xfrm_user_net_ini .input = xfrm_netlink_rcv, };
- nlsk = netlink_kernel_create(net, NETLINK_XFRM, THIS_MODULE, &cfg); + nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg); if (nlsk == NULL) return -ENOMEM; net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
linux-merge@lists.open-mesh.org