The following commit has been merged in the master branch: commit 5adc86b9b44c7c588cfa60eeccd26092cf2ed224 Merge: ca5e93e7b9f629b325aea5ebc5dfdd77cb085009 c1cb0d3b569b1d21ada8666efb42dfd437617a09 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Mon Apr 22 13:25:57 2013 +1000
20130419/net-next
Conflicts: drivers/infiniband/hw/cxgb4/qp.c drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c include/net/scm.h net/batman-adv/routing.c net/ipv4/tcp_input.c
diff --combined MAINTAINERS index 8b1ee30,c39bdc3..5c9d62d --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -1764,7 -1764,7 +1764,7 @@@ F: arch/arm/configs/bcm2835_defconfi F: drivers/*/*bcm2835*
BROADCOM TG3 GIGABIT ETHERNET DRIVER - M: Matt Carlson mcarlson@broadcom.com + M: Nithin Nayak Sujir nsujir@broadcom.com M: Michael Chan mchan@broadcom.com L: netdev@vger.kernel.org S: Supported @@@ -2206,17 -2206,6 +2206,17 @@@ S: Maintaine F: drivers/cpufreq/ F: include/linux/cpufreq.h
+CPU FREQUENCY DRIVERS - ARM BIG LITTLE +M: Viresh Kumar viresh.kumar@linaro.org +M: Sudeep KarkadaNagesha sudeep.karkadanagesha@arm.com +L: cpufreq@vger.kernel.org +L: linux-pm@vger.kernel.org +W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php +S: Maintained +F: drivers/cpufreq/arm_big_little.h +F: drivers/cpufreq/arm_big_little.c +F: drivers/cpufreq/arm_big_little_dt.c + CPUID/MSR DRIVER M: "H. Peter Anvin" hpa@zytor.com S: Maintained @@@ -2295,7 -2284,7 +2295,7 @@@ L: linux-media@vger.kernel.or T: git git://linuxtv.org/media_tree.git W: http://linuxtv.org S: Maintained -F: drivers/media/i2c/cx2341x* +F: drivers/media/common/cx2341x* F: include/media/cx2341x*
CX88 VIDEO4LINUX DRIVER @@@ -2378,16 -2367,6 +2378,16 @@@ W: http://www.cyclades.com S: Orphan F: drivers/net/wan/pc300*
+CYPRESS_FIRMWARE MEDIA DRIVER +M: Antti Palosaari crope@iki.fi +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://palosaari.fi/linux/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +T: git git://linuxtv.org/anttip/media_tree.git +S: Maintained +F: drivers/media/common/cypress_firmware* + CYTTSP TOUCHSCREEN DRIVER M: Javier Martinez Canillas javier@dowhile0.org L: linux-input@vger.kernel.org @@@ -2752,7 -2731,7 +2752,7 @@@ T: git git://linuxtv.org/media_tree.gi S: Maintained F: drivers/media/usb/dvb-usb/cxusb*
-DVB_USB_CYPRESS_FIRMWARE MEDIA DRIVER +DVB_USB_EC168 MEDIA DRIVER M: Antti Palosaari crope@iki.fi L: linux-media@vger.kernel.org W: http://linuxtv.org/ @@@ -2760,16 -2739,17 +2760,16 @@@ W: http://palosaari.fi/linux Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained -F: drivers/media/usb/dvb-usb-v2/cypress_firmware* +F: drivers/media/usb/dvb-usb-v2/ec168*
-DVB_USB_EC168 MEDIA DRIVER +DVB_USB_GL861 MEDIA DRIVER M: Antti Palosaari crope@iki.fi L: linux-media@vger.kernel.org W: http://linuxtv.org/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained -F: drivers/media/usb/dvb-usb-v2/ec168* +F: drivers/media/usb/dvb-usb-v2/gl861*
DVB_USB_MXL111SF MEDIA DRIVER M: Michael Krufky mkrufky@linuxtv.org @@@ -3613,14 -3593,6 +3613,14 @@@ W: http://www.kernel.org/pub/linux/kern S: Maintained F: drivers/platform/x86/hdaps.c
+HDPVR USB VIDEO ENCODER DRIVER +M: Hans Verkuil hverkuil@xs4all.nl +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +W: http://linuxtv.org +S: Odd Fixes +F: drivers/media/usb/hdpvr + HWPOISON MEMORY FAILURE HANDLING M: Andi Kleen andi@firstfloor.org L: linux-mm@kvack.org @@@ -3892,6 -3864,7 +3892,6 @@@ F: drivers/i2c/i2c-stub.
I2C SUBSYSTEM M: Wolfram Sang wsa@the-dreams.de -M: "Ben Dooks (embedded platforms)" ben-linux@fluff.org L: linux-i2c@vger.kernel.org W: http://i2c.wiki.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git @@@ -4449,16 -4422,6 +4449,16 @@@ Q: http://patchwork.linuxtv.org/project S: Maintained F: drivers/media/dvb-frontends/it913x-fe*
+IT913X MEDIA DRIVER +M: Antti Palosaari crope@iki.fi +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://palosaari.fi/linux/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +T: git git://linuxtv.org/anttip/media_tree.git +S: Maintained +F: drivers/media/tuners/it913x* + IVTV VIDEO4LINUX DRIVER M: Andy Walls awalls@md.metrocast.net L: ivtv-devel@ivtvdriver.org (moderated for non-subscribers) @@@ -4978,12 -4941,6 +4978,12 @@@ W: logfs.or S: Maintained F: fs/logfs/
+LPC32XX MACHINE SUPPORT +M: Roland Stigge stigge@antcom.de +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/mach-lpc32xx/ + LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) M: Nagalakshmi Nandigama Nagalakshmi.Nandigama@lsi.com M: Sreekanth Reddy Sreekanth.Reddy@lsi.com @@@ -5449,13 -5406,6 +5449,13 @@@ L: linux-scsi@vger.kernel.or S: Maintained F: drivers/scsi/NCR_D700.*
+NCT6775 HARDWARE MONITOR DRIVER +M: Guenter Roeck linux@roeck-us.net +L: lm-sensors@lm-sensors.org +S: Maintained +F: Documentation/hwmon/nct6775 +F: drivers/hwmon/nct6775.c + NETEFFECT IWARP RNIC DRIVER (IW_NES) M: Faisal Latif faisal.latif@intel.com L: linux-rdma@vger.kernel.org @@@ -6242,7 -6192,7 +6242,7 @@@ S: Supporte F: drivers/scsi/pmcraid.*
PMC SIERRA PM8001 DRIVER -M: jack_wang@usish.com +M: xjtuwjp@gmail.com M: lindar_liu@usish.com L: linux-scsi@vger.kernel.org S: Supported @@@ -6380,6 -6330,7 +6380,7 @@@ F: drivers/acpi/apei/erst.
PTP HARDWARE CLOCK SUPPORT M: Richard Cochran richardcochran@gmail.com + L: netdev@vger.kernel.org S: Maintained W: http://linuxptp.sourceforge.net/ F: Documentation/ABI/testing/sysfs-ptp @@@ -6511,6 -6462,7 +6512,7 @@@ S: Supporte F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER + M: Shahed Shaikh shahed.shaikh@qlogic.com M: Jitendra Kalsaria jitendra.kalsaria@qlogic.com M: Ron Mercer ron.mercer@qlogic.com M: linux-driver@qlogic.com @@@ -6675,7 -6627,7 +6677,7 @@@ S: Supporte F: fs/reiserfs/
REGISTER MAP ABSTRACTION -M: Mark Brown broonie@opensource.wolfsonmicro.com +M: Mark Brown broonie@kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git S: Supported F: drivers/base/regmap/ @@@ -6736,16 -6688,6 +6738,16 @@@ T: git git://linuxtv.org/anttip/media_t S: Maintained F: drivers/media/dvb-frontends/rtl2830*
+RTL2832 MEDIA DRIVER +M: Antti Palosaari crope@iki.fi +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://palosaari.fi/linux/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +T: git git://linuxtv.org/anttip/media_tree.git +S: Maintained +F: drivers/media/dvb-frontends/rtl2832* + RTL8180 WIRELESS DRIVER M: "John W. Linville" linville@tuxdriver.com L: linux-wireless@vger.kernel.org @@@ -6848,7 -6790,7 +6850,7 @@@ L: linux-media@vger.kernel.or W: http://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Odd fixes -F: Documentation/video4linux/saa7134/ +F: Documentation/video4linux/*.saa7134 F: drivers/media/pci/saa7134/
SAA7146 VIDEO4LINUX-2 DRIVER @@@ -6941,8 -6883,9 +6943,8 @@@ F: drivers/clocksourc
TLG2300 VIDEO4LINUX-2 DRIVER M: Huang Shijie shijie8@gmail.com -M: Kang Yong kangyong@telegent.com -M: Zhang Xiaobing xbzhang@telegent.com -S: Supported +M: Hans Verkuil hverkuil@xs4all.nl +S: Odd Fixes F: drivers/media/usb/tlg2300
SC1200 WDT DRIVER @@@ -7188,43 -7131,17 +7190,43 @@@ F: drivers/media/radio/si470x/radio-si4 F: drivers/media/radio/si470x/radio-si470x.h F: drivers/media/radio/si470x/radio-si470x-usb.c
+SI4713 FM RADIO TRANSMITTER I2C DRIVER +M: Eduardo Valentin edubezval@gmail.com +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +W: http://linuxtv.org +S: Odd Fixes +F: drivers/media/radio/si4713-i2c.? + +SI4713 FM RADIO TRANSMITTER PLATFORM DRIVER +M: Eduardo Valentin edubezval@gmail.com +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +W: http://linuxtv.org +S: Odd Fixes +F: drivers/media/radio/radio-si4713.h + +SIANO DVB DRIVER +M: Mauro Carvalho Chehab mchehab@redhat.com +L: linux-media@vger.kernel.org +W: http://linuxtv.org +T: git git://linuxtv.org/media_tree.git +S: Odd fixes +F: drivers/media/common/siano/ +F: drivers/media/dvb/siano/ +F: drivers/media/usb/siano/ +F: drivers/media/mmc/siano + SH_VEU V4L2 MEM2MEM DRIVER M: Guennadi Liakhovetski g.liakhovetski@gmx.de L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/sh_veu.c -F: include/media/sh_veu.h
SH_VOU V4L2 OUTPUT DRIVER M: Guennadi Liakhovetski g.liakhovetski@gmx.de L: linux-media@vger.kernel.org -S: Maintained +S: Odd Fixes F: drivers/media/platform/sh_vou.c F: include/media/sh_vou.h
@@@ -7266,13 -7183,14 +7268,13 @@@ F: arch/arm/mach-davinc F: drivers/i2c/busses/i2c-davinci.c
TI DAVINCI SERIES MEDIA DRIVER -M: Manjunath Hadli manjunath.hadli@ti.com -M: Prabhakar Lad prabhakar.lad@ti.com +M: Lad, Prabhakar prabhakar.csengg@gmail.com L: linux-media@vger.kernel.org L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers) W: http://linuxtv.org/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git -S: Supported +S: Maintained F: drivers/media/platform/davinci/ F: include/media/davinci/
@@@ -7457,7 -7375,7 +7459,7 @@@ F: sound
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) M: Liam Girdwood lgirdwood@gmail.com -M: Mark Brown broonie@opensource.wolfsonmicro.com +M: Mark Brown broonie@kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git L: alsa-devel@alsa-project.org (moderated for non-subscribers) W: http://alsa-project.org/main/index.php/ASoC @@@ -7546,7 -7464,7 +7548,7 @@@ F: drivers/clk/spear
SPI SUBSYSTEM M: Grant Likely grant.likely@secretlab.ca -M: Mark Brown broonie@opensource.wolfsonmicro.com +M: Mark Brown broonie@kernel.org L: spi-devel-general@lists.sourceforge.net Q: http://patchwork.kernel.org/project/spi-devel-general/list/ T: git git://git.secretlab.ca/git/linux-2.6.git @@@ -7644,11 -7562,6 +7646,11 @@@ M: David Täht <d@teklibre.com S: Odd Fixes F: drivers/staging/frontier/
+STAGING - GO7007 MPEG CODEC +M: Hans Verkuil hans.verkuil@cisco.com +S: Maintained +F: drivers/staging/media/go7007/ + STAGING - INDUSTRIAL IO M: Jonathan Cameron jic23@cam.ac.uk L: linux-iio@vger.kernel.org @@@ -7699,8 -7612,8 +7701,8 @@@ S: Odd Fixe F: drivers/staging/sm7xxfb/
STAGING - SOFTLOGIC 6x10 MPEG CODEC -M: Ben Collins bcollins@bluecherry.net -S: Odd Fixes +M: Ismael Luceno ismael.luceno@corp.bluecherry.net +S: Supported F: drivers/staging/media/solo6x10/
STAGING - SPEAKUP CONSOLE SPEECH DRIVER @@@ -8603,7 -8516,7 +8605,7 @@@ F: drivers/usb/gadget/*uvc*. F: drivers/usb/gadget/webcam.c
USB WIRELESS RNDIS DRIVER (rndis_wlan) - M: Jussi Kivilinna jussi.kivilinna@mbnet.fi + M: Jussi Kivilinna jussi.kivilinna@iki.fi L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/rndis_wlan.c @@@ -8796,7 -8709,7 +8798,7 @@@ F: drivers/scsi/vmw_pvscsi.
VOLTAGE AND CURRENT REGULATOR FRAMEWORK M: Liam Girdwood lrg@ti.com -M: Mark Brown broonie@opensource.wolfsonmicro.com +M: Mark Brown broonie@kernel.org W: http://opensource.wolfsonmicro.com/node/15 W: http://www.slimlogic.co.uk/?p=48 T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git diff --combined drivers/infiniband/ulp/ipoib/ipoib_main.c index 31dd2a7,554b906..b6e049a --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@@ -730,7 -730,8 +730,8 @@@ static int ipoib_start_xmit(struct sk_b if ((header->proto != htons(ETH_P_IP)) && (header->proto != htons(ETH_P_IPV6)) && (header->proto != htons(ETH_P_ARP)) && - (header->proto != htons(ETH_P_RARP))) { + (header->proto != htons(ETH_P_RARP)) && + (header->proto != htons(ETH_P_TIPC))) { /* ethertype not supported by IPoIB */ ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); @@@ -751,6 -752,7 +752,7 @@@ switch (header->proto) { case htons(ETH_P_IP): case htons(ETH_P_IPV6): + case htons(ETH_P_TIPC): neigh = ipoib_neigh_get(dev, cb->hwaddr); if (unlikely(!neigh)) { neigh_add_path(skb, cb->hwaddr, dev); @@@ -828,7 -830,7 +830,7 @@@ static int ipoib_hard_header(struct sk_ */ memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
- return 0; + return sizeof *header; }
static void ipoib_set_mcast_list(struct net_device *dev) diff --combined drivers/media/dvb-core/dvb_net.c index e17cb85,83a23af..c3cc3b5 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@@ -185,7 -185,7 +185,7 @@@ static __be16 dvb_net_eth_type_trans(st skb->pkt_type=PACKET_MULTICAST; }
- if (ntohs(eth->h_proto) >= 1536) + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) return eth->h_proto;
rawp = skb->data; @@@ -228,9 -228,9 +228,9 @@@ static int ule_test_sndu( struct dvb_ne static int ule_bridged_sndu( struct dvb_net_priv *p ) { struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr; - if(ntohs(hdr->h_proto) < 1536) { + if(ntohs(hdr->h_proto) < ETH_P_802_3_MIN) { int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data); - /* A frame Type < 1536 for a bridged frame, introduces a LLC Length field. */ + /* A frame Type < ETH_P_802_3_MIN for a bridged frame, introduces a LLC Length field. */ if(framelen != ntohs(hdr->h_proto)) { return -1; } @@@ -320,7 -320,7 +320,7 @@@ static int handle_ule_extensions( struc (int) p->ule_sndu_type, l, total_ext_len); #endif
- } while (p->ule_sndu_type < 1536); + } while (p->ule_sndu_type < ETH_P_802_3_MIN);
return total_ext_len; } @@@ -712,7 -712,7 +712,7 @@@ static void dvb_net_ule( struct net_dev }
/* Handle ULE Extension Headers. */ - if (priv->ule_sndu_type < 1536) { + if (priv->ule_sndu_type < ETH_P_802_3_MIN) { /* There is an extension header. Handle it accordingly. */ int l = handle_ule_extensions(priv); if (l < 0) { @@@ -1044,7 -1044,7 +1044,7 @@@ static int dvb_net_feed_start(struct ne ret = priv->tsfeed->set(priv->tsfeed, priv->pid, /* pid */ TS_PACKET, /* type */ - DMX_TS_PES_OTHER, /* pes type */ + DMX_PES_OTHER, /* pes type */ 32768, /* circular buffer size */ timeout /* timeout */ ); diff --combined drivers/net/bonding/bond_main.c index dbbea0e,2aac890..dc35c24 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@@ -796,9 -796,8 +796,8 @@@ static void bond_resend_igmp_join_reque { struct bonding *bond = container_of(work, struct bonding, mcast_work.work); - rcu_read_lock(); + bond_resend_igmp_join_requests(bond); - rcu_read_unlock(); }
/* @@@ -846,10 -845,8 +845,10 @@@ static void bond_mc_swap(struct bondin if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1);
+ netif_addr_lock_bh(bond->dev); netdev_for_each_mc_addr(ha, bond->dev) dev_mc_del(old_active->dev, ha->addr); + netif_addr_unlock_bh(bond->dev); }
if (new_active) { @@@ -860,10 -857,8 +859,10 @@@ if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(new_active->dev, 1);
+ netif_addr_lock_bh(bond->dev); netdev_for_each_mc_addr(ha, bond->dev) dev_mc_add(new_active->dev, ha->addr); + netif_addr_unlock_bh(bond->dev); } }
@@@ -1905,29 -1900,11 +1904,29 @@@ err_dest_symlinks bond_destroy_slave_symlinks(bond_dev, slave_dev);
err_detach: + if (!USES_PRIMARY(bond->params.mode)) { + netif_addr_lock_bh(bond_dev); + bond_mc_list_flush(bond_dev, slave_dev); + netif_addr_unlock_bh(bond_dev); + } + bond_del_vlans_from_slave(bond, slave_dev); write_lock_bh(&bond->lock); bond_detach_slave(bond, new_slave); + if (bond->primary_slave == new_slave) + bond->primary_slave = NULL; write_unlock_bh(&bond->lock); + if (bond->curr_active_slave == new_slave) { + read_lock(&bond->lock); + write_lock_bh(&bond->curr_slave_lock); + bond_change_active_slave(bond, NULL); + bond_select_active_slave(bond); + write_unlock_bh(&bond->curr_slave_lock); + read_unlock(&bond->lock); + } + slave_disable_netpoll(new_slave);
err_close: + slave_dev->priv_flags &= ~IFF_BONDING; dev_close(slave_dev);
err_unset_master: @@@ -3190,20 -3167,11 +3189,20 @@@ static int bond_slave_netdev_event(unsi struct net_device *slave_dev) { struct slave *slave = bond_slave_get_rtnl(slave_dev); - struct bonding *bond = slave->bond; - struct net_device *bond_dev = slave->bond->dev; + struct bonding *bond; + struct net_device *bond_dev; u32 old_speed; u8 old_duplex;
+ /* A netdev event can be generated while enslaving a device + * before netdev_rx_handler_register is called in which case + * slave will be NULL + */ + if (!slave) + return NOTIFY_DONE; + bond_dev = slave->bond->dev; + bond = slave->bond; + switch (event) { case NETDEV_UNREGISTER: if (bond->setup_by_slave) @@@ -3317,22 -3285,20 +3316,22 @@@ static int bond_xmit_hash_policy_l2(str */ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) { - struct ethhdr *data = (struct ethhdr *)skb->data; - struct iphdr *iph; - struct ipv6hdr *ipv6h; + const struct ethhdr *data; + const struct iphdr *iph; + const struct ipv6hdr *ipv6h; u32 v6hash; - __be32 *s, *d; + const __be32 *s, *d;
if (skb->protocol == htons(ETH_P_IP) && - skb_network_header_len(skb) >= sizeof(*iph)) { + pskb_network_may_pull(skb, sizeof(*iph))) { iph = ip_hdr(skb); + data = (struct ethhdr *)skb->data; return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ (data->h_dest[5] ^ data->h_source[5])) % count; } else if (skb->protocol == htons(ETH_P_IPV6) && - skb_network_header_len(skb) >= sizeof(*ipv6h)) { + pskb_network_may_pull(skb, sizeof(*ipv6h))) { ipv6h = ipv6_hdr(skb); + data = (struct ethhdr *)skb->data; s = &ipv6h->saddr.s6_addr32[0]; d = &ipv6h->daddr.s6_addr32[0]; v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); @@@ -3351,36 -3317,33 +3350,36 @@@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) { u32 layer4_xor = 0; - struct iphdr *iph; - struct ipv6hdr *ipv6h; - __be32 *s, *d; - __be16 *layer4hdr; + const struct iphdr *iph; + const struct ipv6hdr *ipv6h; + const __be32 *s, *d; + const __be16 *l4 = NULL; + __be16 _l4[2]; + int noff = skb_network_offset(skb); + int poff;
if (skb->protocol == htons(ETH_P_IP) && - skb_network_header_len(skb) >= sizeof(*iph)) { + pskb_may_pull(skb, noff + sizeof(*iph))) { iph = ip_hdr(skb); - if (!ip_is_fragment(iph) && - (iph->protocol == IPPROTO_TCP || - iph->protocol == IPPROTO_UDP) && - (skb_headlen(skb) - skb_network_offset(skb) >= - iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) { - layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); - layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); + poff = proto_ports_offset(iph->protocol); + + if (!ip_is_fragment(iph) && poff >= 0) { + l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff, + sizeof(_l4), &_l4); + if (l4) + layer4_xor = ntohs(l4[0] ^ l4[1]); } return (layer4_xor ^ ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; } else if (skb->protocol == htons(ETH_P_IPV6) && - skb_network_header_len(skb) >= sizeof(*ipv6h)) { + pskb_may_pull(skb, noff + sizeof(*ipv6h))) { ipv6h = ipv6_hdr(skb); - if ((ipv6h->nexthdr == IPPROTO_TCP || - ipv6h->nexthdr == IPPROTO_UDP) && - (skb_headlen(skb) - skb_network_offset(skb) >= - sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) { - layer4hdr = (__be16 *)(ipv6h + 1); - layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); + poff = proto_ports_offset(ipv6h->nexthdr); + if (poff >= 0) { + l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff, + sizeof(_l4), &_l4); + if (l4) + layer4_xor = ntohs(l4[0] ^ l4[1]); } s = &ipv6h->saddr.s6_addr32[0]; d = &ipv6h->daddr.s6_addr32[0]; @@@ -4882,18 -4845,9 +4881,18 @@@ static int __net_init bond_net_init(str static void __net_exit bond_net_exit(struct net *net) { struct bond_net *bn = net_generic(net, bond_net_id); + struct bonding *bond, *tmp_bond; + LIST_HEAD(list);
bond_destroy_sysfs(bn); bond_destroy_proc_dir(bn); + + /* Kill off any bonds created after unregistering bond rtnl ops */ + rtnl_lock(); + list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) + unregister_netdevice_queue(bond->dev, &list); + unregister_netdevice_many(&list); + rtnl_unlock(); }
static struct pernet_operations bond_net_ops = { @@@ -4947,8 -4901,8 +4946,8 @@@ static void __exit bonding_exit(void
bond_destroy_debugfs();
- unregister_pernet_subsys(&bond_net_ops); rtnl_link_unregister(&bond_link_ops); + unregister_pernet_subsys(&bond_net_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER /* diff --combined drivers/net/can/mcp251x.c index 9aa0c64,55033dd5..8cda23b --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@@ -269,7 -269,7 +269,7 @@@ struct mcp251x_priv #define MCP251X_IS(_model) \ static inline int mcp251x_is_##_model(struct spi_device *spi) \ { \ - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); \ + struct mcp251x_priv *priv = spi_get_drvdata(spi); \ return priv->model == CAN_MCP251X_MCP##_model; \ }
@@@ -305,7 -305,7 +305,7 @@@ static void mcp251x_clean(struct net_de */ static int mcp251x_spi_trans(struct spi_device *spi, int len) { - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi); struct spi_transfer t = { .tx_buf = priv->spi_tx_buf, .rx_buf = priv->spi_rx_buf, @@@ -333,7 -333,7 +333,7 @@@
static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg) { - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi); u8 val = 0;
priv->spi_tx_buf[0] = INSTRUCTION_READ; @@@ -348,7 -348,7 +348,7 @@@ static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg, uint8_t *v1, uint8_t *v2) { - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi);
priv->spi_tx_buf[0] = INSTRUCTION_READ; priv->spi_tx_buf[1] = reg; @@@ -361,7 -361,7 +361,7 @@@
static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val) { - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi);
priv->spi_tx_buf[0] = INSTRUCTION_WRITE; priv->spi_tx_buf[1] = reg; @@@ -373,7 -373,7 +373,7 @@@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg, u8 mask, uint8_t val) { - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi);
priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY; priv->spi_tx_buf[1] = reg; @@@ -386,7 -386,7 +386,7 @@@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, int len, int tx_buf_idx) { - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi);
if (mcp251x_is_2510(spi)) { int i; @@@ -403,7 -403,7 +403,7 @@@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, int tx_buf_idx) { - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi); u32 sid, eid, exide, rtr; u8 buf[SPI_TRANSFER_BUF_LEN];
@@@ -434,7 -434,7 +434,7 @@@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, int buf_idx) { - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi);
if (mcp251x_is_2510(spi)) { int i, len; @@@ -454,7 -454,7 +454,7 @@@
static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) { - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi); struct sk_buff *skb; struct can_frame *frame; u8 buf[SPI_TRANSFER_BUF_LEN]; @@@ -550,7 -550,7 +550,7 @@@ static int mcp251x_do_set_mode(struct n
static int mcp251x_set_normal_mode(struct spi_device *spi) { - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi); unsigned long timeout;
/* Enable interrupts */ @@@ -620,7 -620,7 +620,7 @@@ static int mcp251x_setup(struct net_dev
static int mcp251x_hw_reset(struct spi_device *spi) { - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi); int ret; unsigned long timeout;
@@@ -929,7 -929,6 +929,7 @@@ static int mcp251x_open(struct net_devi struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; struct mcp251x_platform_data *pdata = spi->dev.platform_data; + unsigned long flags; int ret;
ret = open_candev(net); @@@ -946,14 -945,9 +946,14 @@@ priv->tx_skb = NULL; priv->tx_len = 0;
+ flags = IRQF_ONESHOT; + if (pdata->irq_flags) + flags |= pdata->irq_flags; + else + flags |= IRQF_TRIGGER_FALLING; + ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, - pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING, - DEVICE_NAME, priv); + flags, DEVICE_NAME, priv); if (ret) { dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); if (pdata->transceiver_enable) @@@ -1026,7 -1020,7 +1026,7 @@@ static int mcp251x_can_probe(struct spi CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; priv->model = spi_get_device_id(spi)->driver_data; priv->net = net; - dev_set_drvdata(&spi->dev, priv); + spi_set_drvdata(spi, priv);
priv->spi = spi; mutex_init(&priv->mcp_lock); @@@ -1124,7 -1118,7 +1124,7 @@@ error_out static int mcp251x_can_remove(struct spi_device *spi) { struct mcp251x_platform_data *pdata = spi->dev.platform_data; - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net;
unregister_candev(net); @@@ -1144,11 -1138,13 +1144,13 @@@ return 0; }
- #ifdef CONFIG_PM - static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state) + #ifdef CONFIG_PM_SLEEP + + static int mcp251x_can_suspend(struct device *dev) { + struct spi_device *spi = to_spi_device(dev); struct mcp251x_platform_data *pdata = spi->dev.platform_data; - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net;
priv->force_quit = 1; @@@ -1176,10 -1172,11 +1178,11 @@@ return 0; }
- static int mcp251x_can_resume(struct spi_device *spi) + static int mcp251x_can_resume(struct device *dev) { + struct spi_device *spi = to_spi_device(dev); struct mcp251x_platform_data *pdata = spi->dev.platform_data; - struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + struct mcp251x_priv *priv = spi_get_drvdata(spi);
if (priv->after_suspend & AFTER_SUSPEND_POWER) { pdata->power_enable(1); @@@ -1197,11 -1194,11 +1200,11 @@@ enable_irq(spi->irq); return 0; } - #else - #define mcp251x_can_suspend NULL - #define mcp251x_can_resume NULL #endif
+ static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend, + mcp251x_can_resume); + static const struct spi_device_id mcp251x_id_table[] = { { "mcp2510", CAN_MCP251X_MCP2510 }, { "mcp2515", CAN_MCP251X_MCP2515 }, @@@ -1213,29 -1210,15 +1216,15 @@@ MODULE_DEVICE_TABLE(spi, mcp251x_id_tab static struct spi_driver mcp251x_can_driver = { .driver = { .name = DEVICE_NAME, - .bus = &spi_bus_type, .owner = THIS_MODULE, + .pm = &mcp251x_can_pm_ops, },
.id_table = mcp251x_id_table, .probe = mcp251x_can_probe, .remove = mcp251x_can_remove, - .suspend = mcp251x_can_suspend, - .resume = mcp251x_can_resume, }; - - static int __init mcp251x_can_init(void) - { - return spi_register_driver(&mcp251x_can_driver); - } - - static void __exit mcp251x_can_exit(void) - { - spi_unregister_driver(&mcp251x_can_driver); - } - - module_init(mcp251x_can_init); - module_exit(mcp251x_can_exit); + module_spi_driver(mcp251x_can_driver);
MODULE_AUTHOR("Chris Elston celston@katalix.com, " "Christian Pellegrin chripell@evolware.org"); diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 57619dd,352e58e..142df1c --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@@ -451,7 -451,8 +451,8 @@@ static void bnx2x_tpa_start(struct bnx2 * Compute number of aggregated segments, and gso_type. */ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, - u16 len_on_bd, unsigned int pkt_len) + u16 len_on_bd, unsigned int pkt_len, + u16 num_of_coalesced_segs) { /* TPA aggregation won't have either IP options or TCP options * other than timestamp or IPv6 extension headers. @@@ -480,8 -481,7 +481,7 @@@ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count * to skb_shinfo(skb)->gso_segs */ - NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len, - skb_shinfo(skb)->gso_size); + NAPI_GRO_CB(skb)->count = num_of_coalesced_segs; }
static int bnx2x_alloc_rx_sge(struct bnx2x *bp, @@@ -537,7 -537,8 +537,8 @@@ static int bnx2x_fill_frag_skb(struct b /* This is needed in order to enable forwarding support */ if (frag_size) bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd, - le16_to_cpu(cqe->pkt_len)); + le16_to_cpu(cqe->pkt_len), + le16_to_cpu(cqe->num_of_coalesced_segs));
#ifdef BNX2X_STOP_ON_ERROR if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { @@@ -2009,7 -2010,7 +2010,7 @@@ static int bnx2x_init_hw(struct bnx2x * * Cleans the object that have internal lists without sending * ramrods. Should be run when interrutps are disabled. */ - static void bnx2x_squeeze_objects(struct bnx2x *bp) + void bnx2x_squeeze_objects(struct bnx2x *bp) { int rc; unsigned long ramrod_flags = 0, vlan_mac_flags = 0; @@@ -2614,9 -2615,6 +2615,9 @@@ int bnx2x_nic_load(struct bnx2x *bp, in } }
+ /* initialize FW coalescing state machines in RAM */ + bnx2x_update_coalesce(bp); + /* setup the leading queue */ rc = bnx2x_setup_leading(bp); if (rc) { @@@ -2777,7 -2775,7 +2778,7 @@@ load_error0 #endif /* ! BNX2X_STOP_ON_ERROR */ }
- static int bnx2x_drain_tx_queues(struct bnx2x *bp) + int bnx2x_drain_tx_queues(struct bnx2x *bp) { u8 rc = 0, cos, i;
@@@ -3089,11 -3087,11 +3090,11 @@@ int bnx2x_poll(struct napi_struct *napi * to ease the pain of our fellow microcode engineers * we use one mapping for both BDs */ - static noinline u16 bnx2x_tx_split(struct bnx2x *bp, - struct bnx2x_fp_txdata *txdata, - struct sw_tx_bd *tx_buf, - struct eth_tx_start_bd **tx_bd, u16 hlen, - u16 bd_prod, int nbd) + static u16 bnx2x_tx_split(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata, + struct sw_tx_bd *tx_buf, + struct eth_tx_start_bd **tx_bd, u16 hlen, + u16 bd_prod) { struct eth_tx_start_bd *h_tx_bd = *tx_bd; struct eth_tx_bd *d_tx_bd; @@@ -3101,11 -3099,10 +3102,10 @@@ int old_len = le16_to_cpu(h_tx_bd->nbytes);
/* first fix first BD */ - h_tx_bd->nbd = cpu_to_le16(nbd); h_tx_bd->nbytes = cpu_to_le16(hlen);
- DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n", - h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd); + DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n", + h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
/* now get a new data BD * (after the pbd) and fill it */ @@@ -3134,7 -3131,7 +3134,7 @@@
#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32))) #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16))) - static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) + static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) { __sum16 tsum = (__force __sum16) csum;
@@@ -3149,30 -3146,47 +3149,47 @@@ return bswab16(tsum); }
- static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) + static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) { u32 rc; + __u8 prot = 0; + __be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL) - rc = XMIT_PLAIN; + return XMIT_PLAIN;
- else { - if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { - rc = XMIT_CSUM_V6; - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - rc |= XMIT_CSUM_TCP; + protocol = vlan_get_protocol(skb); + if (protocol == htons(ETH_P_IPV6)) { + rc = XMIT_CSUM_V6; + prot = ipv6_hdr(skb)->nexthdr; + } else { + rc = XMIT_CSUM_V4; + prot = ip_hdr(skb)->protocol; + }
+ if (!CHIP_IS_E1x(bp) && skb->encapsulation) { + if (inner_ip_hdr(skb)->version == 6) { + rc |= XMIT_CSUM_ENC_V6; + if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; } else { - rc = XMIT_CSUM_V4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) + rc |= XMIT_CSUM_ENC_V4; + if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP) rc |= XMIT_CSUM_TCP; } } + if (prot == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP;
- if (skb_is_gso_v6(skb)) - rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6; - else if (skb_is_gso(skb)) - rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP; + if (skb_is_gso_v6(skb)) { + rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6); + if (rc & XMIT_CSUM_ENC) + rc |= XMIT_GSO_ENC_V6; + } else if (skb_is_gso(skb)) { + rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); + if (rc & XMIT_CSUM_ENC) + rc |= XMIT_GSO_ENC_V4; + }
return rc; } @@@ -3257,14 -3271,23 +3274,23 @@@ exit_lbl } #endif
- static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, - u32 xmit_type) + static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, + u32 xmit_type) { + struct ipv6hdr *ipv6; + *parsing_data |= (skb_shinfo(skb)->gso_size << ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & ETH_TX_PARSE_BD_E2_LSO_MSS; - if ((xmit_type & XMIT_GSO_V6) && - (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) + + if (xmit_type & XMIT_GSO_ENC_V6) + ipv6 = inner_ipv6_hdr(skb); + else if (xmit_type & XMIT_GSO_V6) + ipv6 = ipv6_hdr(skb); + else + ipv6 = NULL; + + if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6) *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; }
@@@ -3275,13 -3298,13 +3301,13 @@@ * @pbd: parse BD * @xmit_type: xmit flags */ - static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, - struct eth_tx_parse_bd_e1x *pbd, - u32 xmit_type) + static void bnx2x_set_pbd_gso(struct sk_buff *skb, + struct eth_tx_parse_bd_e1x *pbd, + u32 xmit_type) { pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); - pbd->tcp_flags = pbd_tcp_flags(skb); + pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
if (xmit_type & XMIT_GSO_V4) { pbd->ip_id = bswab16(ip_hdr(skb)->id); @@@ -3301,6 -3324,40 +3327,40 @@@ }
/** + * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length + * + * @bp: driver handle + * @skb: packet skb + * @parsing_data: data to be updated + * @xmit_type: xmit flags + * + * 57712/578xx related, when skb has encapsulation + */ + static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, + u32 *parsing_data, u32 xmit_type) + { + *parsing_data |= + ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) << + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; + + if (xmit_type & XMIT_CSUM_TCP) { + *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) << + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; + + return skb_inner_transport_header(skb) + + inner_tcp_hdrlen(skb) - skb->data; + } + + /* We support checksum offload for TCP and UDP only. + * No need to pass the UDP header length - it's a constant. + */ + return skb_inner_transport_header(skb) + + sizeof(struct udphdr) - skb->data; + } + + /** * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length * * @bp: driver handle @@@ -3308,15 -3365,15 +3368,15 @@@ * @parsing_data: data to be updated * @xmit_type: xmit flags * - * 57712 related + * 57712/578xx related */ - static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, - u32 *parsing_data, u32 xmit_type) + static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, + u32 *parsing_data, u32 xmit_type) { *parsing_data |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
if (xmit_type & XMIT_CSUM_TCP) { *parsing_data |= ((tcp_hdrlen(skb) / 4) << @@@ -3331,17 -3388,15 +3391,15 @@@ return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data; }
- static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, - struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) + /* set FW indication according to inner or outer protocols if tunneled */ + static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_start_bd *tx_start_bd, + u32 xmit_type) { tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
- if (xmit_type & XMIT_CSUM_V4) - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IP_CSUM; - else - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IPV6; + if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6)) + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
if (!(xmit_type & XMIT_CSUM_TCP)) tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; @@@ -3355,9 -3410,9 +3413,9 @@@ * @pbd: parse BD to be updated * @xmit_type: xmit flags */ - static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, - struct eth_tx_parse_bd_e1x *pbd, - u32 xmit_type) + static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_parse_bd_e1x *pbd, + u32 xmit_type) { u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
@@@ -3403,6 -3458,70 +3461,70 @@@ return hlen; }
+ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, + struct eth_tx_parse_bd_e2 *pbd_e2, + struct eth_tx_parse_2nd_bd *pbd2, + u16 *global_data, + u32 xmit_type) + { + u16 hlen_w = 0; + u8 outerip_off, outerip_len = 0; + /* from outer IP to transport */ + hlen_w = (skb_inner_transport_header(skb) - + skb_network_header(skb)) >> 1; + + /* transport len */ + if (xmit_type & XMIT_CSUM_TCP) + hlen_w += inner_tcp_hdrlen(skb) >> 1; + else + hlen_w += sizeof(struct udphdr) >> 1; + + pbd2->fw_ip_hdr_to_payload_w = hlen_w; + + if (xmit_type & XMIT_CSUM_ENC_V4) { + struct iphdr *iph = ip_hdr(skb); + pbd2->fw_ip_csum_wo_len_flags_frag = + bswab16(csum_fold((~iph->check) - + iph->tot_len - iph->frag_off)); + } else { + pbd2->fw_ip_hdr_to_payload_w = + hlen_w - ((sizeof(struct ipv6hdr)) >> 1); + } + + pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); + + pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); + + if (xmit_type & XMIT_GSO_V4) { + pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); + + pbd_e2->data.tunnel_data.pseudo_csum = + bswab16(~csum_tcpudp_magic( + inner_ip_hdr(skb)->saddr, + inner_ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + + outerip_len = ip_hdr(skb)->ihl << 1; + } else { + pbd_e2->data.tunnel_data.pseudo_csum = + bswab16(~csum_ipv6_magic( + &inner_ipv6_hdr(skb)->saddr, + &inner_ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + } + + outerip_off = (skb_network_header(skb) - skb->data) >> 1; + + *global_data |= + outerip_off | + (!!(xmit_type & XMIT_CSUM_V6) << + ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) | + (outerip_len << + ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | + ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << + ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT); + } + /* called with netif_tx_lock * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call * netif_wake_queue() @@@ -3418,6 -3537,7 +3540,7 @@@ netdev_tx_t bnx2x_start_xmit(struct sk_ struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; + struct eth_tx_parse_2nd_bd *pbd2 = NULL; u32 pbd_e2_parsing_data = 0; u16 pkt_prod, bd_prod; int nbd, txq_index; @@@ -3485,7 -3605,7 +3608,7 @@@ mac_type = MULTICAST_ADDRESS; }
- #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) + #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) /* First, check if we need to linearize the skb (due to FW restrictions). No need to check fragmentation if page size > 8K (there will be no violation to FW restrictions) */ @@@ -3533,12 -3653,9 +3656,9 @@@ first_bd = tx_start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; - SET_FLAG(tx_start_bd->general_data, - ETH_TX_START_BD_PARSE_NBDS, - 0);
- /* header nbd */ - SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); + /* header nbd: indirectly zero other flags! */ + tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
/* remember the first BD of the packet */ tx_buf->first_bd = txdata->tx_bd_prod; @@@ -3558,19 -3675,16 +3678,16 @@@ /* when transmitting in a vf, start bd must hold the ethertype * for fw to enforce it */ - #ifndef BNX2X_STOP_ON_ERROR - if (IS_VF(bp)) { - #endif + if (IS_VF(bp)) tx_start_bd->vlan_or_ethertype = cpu_to_le16(ntohs(eth->h_proto)); - #ifndef BNX2X_STOP_ON_ERROR - } else { + else /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); - } - #endif }
+ nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ + /* turn on parsing and get a BD */ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
@@@ -3580,23 -3694,58 +3697,58 @@@ if (!CHIP_IS_E1x(bp)) { pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); - /* Set PBD in checksum offload case */ - if (xmit_type & XMIT_CSUM) + + if (xmit_type & XMIT_CSUM_ENC) { + u16 global_data = 0; + + /* Set PBD in enc checksum offload case */ + hlen = bnx2x_set_pbd_csum_enc(bp, skb, + &pbd_e2_parsing_data, + xmit_type); + + /* turn on 2nd parsing and get a BD */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd; + + memset(pbd2, 0, sizeof(*pbd2)); + + pbd_e2->data.tunnel_data.ip_hdr_start_inner_w = + (skb_inner_network_header(skb) - + skb->data) >> 1; + + if (xmit_type & XMIT_GSO_ENC) + bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2, + &global_data, + xmit_type); + + pbd2->global_data = cpu_to_le16(global_data); + + /* add addition parse BD indication to start BD */ + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_PARSE_NBDS, 1); + /* set encapsulation flag in start BD */ + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_TUNNEL_EXIST, 1); + nbd++; + } else if (xmit_type & XMIT_CSUM) { + /* Set PBD in checksum offload case w/o encapsulation */ hlen = bnx2x_set_pbd_csum_e2(bp, skb, &pbd_e2_parsing_data, xmit_type); + }
- if (IS_MF_SI(bp) || IS_VF(bp)) { - /* fill in the MAC addresses in the PBD - for local - * switching - */ - bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, - &pbd_e2->src_mac_addr_mid, - &pbd_e2->src_mac_addr_lo, + /* Add the macs to the parsing BD this is a vf */ + if (IS_VF(bp)) { + /* override GRE parameters in BD */ + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, + &pbd_e2->data.mac_addr.src_mid, + &pbd_e2->data.mac_addr.src_lo, eth->h_source); - bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, - &pbd_e2->dst_mac_addr_mid, - &pbd_e2->dst_mac_addr_lo, + + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, + &pbd_e2->data.mac_addr.dst_mid, + &pbd_e2->data.mac_addr.dst_lo, eth->h_dest); }
@@@ -3618,14 -3767,13 +3770,13 @@@ /* Setup the data pointer of the first BD of the packet */ tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); pkt_size = tx_start_bd->nbytes;
DP(NETIF_MSG_TX_QUEUED, - "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n", + "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n", tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, - le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), + le16_to_cpu(tx_start_bd->nbytes), tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan_or_ethertype));
@@@ -3638,10 -3786,12 +3789,12 @@@
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
- if (unlikely(skb_headlen(skb) > hlen)) + if (unlikely(skb_headlen(skb) > hlen)) { + nbd++; bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, &tx_start_bd, hlen, - bd_prod, ++nbd); + bd_prod); + } if (!CHIP_IS_E1x(bp)) bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, xmit_type); @@@ -3731,9 -3881,13 +3884,13 @@@ if (pbd_e2) DP(NETIF_MSG_TX_QUEUED, "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", - pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid, - pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi, - pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo, + pbd_e2, + pbd_e2->data.mac_addr.dst_hi, + pbd_e2->data.mac_addr.dst_mid, + pbd_e2->data.mac_addr.dst_lo, + pbd_e2->data.mac_addr.src_hi, + pbd_e2->data.mac_addr.src_mid, + pbd_e2->data.mac_addr.src_lo, pbd_e2->parsing_data); DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
@@@ -4583,11 -4737,11 +4740,11 @@@ static void storm_memset_hc_disable(str u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); - u16 flags = REG_RD16(bp, addr); + u8 flags = REG_RD8(bp, addr); /* clear and set */ flags &= ~HC_INDEX_DATA_HC_ENABLED; flags |= enable_flag; - REG_WR16(bp, addr, flags); + REG_WR8(bp, addr, flags); DP(NETIF_MSG_IFUP, "port %x fw_sb_id %d sb_index %d disable %d\n", port, fw_sb_id, sb_index, disable); diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c50696b,fdfe33b..4ce0fd2 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@@ -75,8 -75,6 +75,6 @@@ #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
- #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) - /* Time in jiffies before concluding the transmitter is hung */ #define TX_TIMEOUT (5*HZ)
@@@ -2955,14 -2953,16 +2953,16 @@@ static unsigned long bnx2x_get_common_f __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
/* tx only connections collect statistics (on the same index as the - * parent connection). The statistics are zeroed when the parent - * connection is initialized. + * parent connection). The statistics are zeroed when the parent + * connection is initialized. */
__set_bit(BNX2X_Q_FLG_STATS, &flags); if (zero_stats) __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags); + __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
#ifdef BNX2X_STOP_ON_ERROR __set_bit(BNX2X_Q_FLG_TX_SEC, &flags); @@@ -3227,16 -3227,29 +3227,29 @@@ static void bnx2x_drv_info_ether_stat(s { struct eth_stats_info *ether_stat = &bp->slowpath->drv_info_to_mcp.ether_stat; + struct bnx2x_vlan_mac_obj *mac_obj = + &bp->sp_objs->mac_obj; + int i;
strlcpy(ether_stat->version, DRV_MODULE_VERSION, ETH_STAT_INFO_VERSION_LEN);
- bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj, - DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, - ether_stat->mac_local); - + /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the + * mac_local field in ether_stat struct. The base address is offset by 2 + * bytes to account for the field being 8 bytes but a mac address is + * only 6 bytes. Likewise, the stride for the get_n_elements function is + * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes + * allocated by the ether_stat struct, so the macs will land in their + * proper positions. + */ + for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++) + memset(ether_stat->mac_local + i, 0, + sizeof(ether_stat->mac_local[0])); + mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, + DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, + ether_stat->mac_local + MAC_PAD, MAC_PAD, + ETH_ALEN); ether_stat->mtu_size = bp->dev->mtu; - if (bp->dev->features & NETIF_F_RXCSUM) ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; if (bp->dev->features & NETIF_F_TSO) @@@ -3258,8 -3271,7 +3271,7 @@@ static void bnx2x_drv_info_fcoe_stat(st if (!CNIC_LOADED(bp)) return;
- memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT, - bp->fip_mac, ETH_ALEN); + memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
fcoe_stat->qos_priority = app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; @@@ -3361,8 -3373,8 +3373,8 @@@ static void bnx2x_drv_info_iscsi_stat(s if (!CNIC_LOADED(bp)) return;
- memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT, - bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); + memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, + ETH_ALEN);
iscsi_stat->qos_priority = app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; @@@ -4947,7 -4959,7 +4959,7 @@@ static void bnx2x_after_function_update q); }
- if (!NO_FCOE(bp)) { + if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { fp = &bp->fp[FCOE_IDX(bp)]; queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@@@ -6029,9 -6041,10 +6041,10 @@@ void bnx2x_nic_init(struct bnx2x *bp, u rmb(); bnx2x_init_rx_rings(bp); bnx2x_init_tx_rings(bp); - - if (IS_VF(bp)) + if (IS_VF(bp)) { + bnx2x_memset_stats(bp); return; + }
/* Initialize MOD_ABS interrupts */ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, @@@ -9525,6 -9538,10 +9538,10 @@@ sp_rtnl_not_reset bnx2x_vfpf_storm_rx_mode(bp); }
+ if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, + &bp->sp_rtnl_state)) + bnx2x_pf_set_vfs_vlan(bp); + /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) */ @@@ -9532,8 -9549,10 +9549,10 @@@
/* enable SR-IOV if applicable */ if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, - &bp->sp_rtnl_state)) + &bp->sp_rtnl_state)) { + bnx2x_disable_sriov(bp); bnx2x_enable_sriov(bp); + } }
static void bnx2x_period_task(struct work_struct *work) @@@ -9701,6 -9720,31 +9720,31 @@@ static struct bnx2x_prev_path_list return NULL; }
+ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) + { + struct bnx2x_prev_path_list *tmp_list; + int rc; + + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Received %d when tried to take lock\n", rc); + return rc; + } + + tmp_list = bnx2x_prev_path_get_entry(bp); + if (tmp_list) { + tmp_list->aer = 1; + rc = 0; + } else { + BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n", + BP_PATH(bp)); + } + + up(&bnx2x_prev_sem); + + return rc; + } + static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) { struct bnx2x_prev_path_list *tmp_list; @@@ -9709,14 -9753,15 +9753,15 @@@ if (down_trylock(&bnx2x_prev_sem)) return false;
- list_for_each_entry(tmp_list, &bnx2x_prev_list, list) { - if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && - bp->pdev->bus->number == tmp_list->bus && - BP_PATH(bp) == tmp_list->path) { + tmp_list = bnx2x_prev_path_get_entry(bp); + if (tmp_list) { + if (tmp_list->aer) { + DP(NETIF_MSG_HW, "Path %d was marked by AER\n", + BP_PATH(bp)); + } else { rc = true; BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", BP_PATH(bp)); - break; } }
@@@ -9730,6 -9775,28 +9775,28 @@@ static int bnx2x_prev_mark_path(struct struct bnx2x_prev_path_list *tmp_list; int rc;
+ rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Received %d when tried to take lock\n", rc); + return rc; + } + + /* Check whether the entry for this path already exists */ + tmp_list = bnx2x_prev_path_get_entry(bp); + if (tmp_list) { + if (!tmp_list->aer) { + BNX2X_ERR("Re-Marking the path.\n"); + } else { + DP(NETIF_MSG_HW, "Removing AER indication from path %d\n", + BP_PATH(bp)); + tmp_list->aer = 0; + } + up(&bnx2x_prev_sem); + return 0; + } + up(&bnx2x_prev_sem); + + /* Create an entry for this path and add it */ tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); if (!tmp_list) { BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); @@@ -9739,6 -9806,7 +9806,7 @@@ tmp_list->bus = bp->pdev->bus->number; tmp_list->slot = PCI_SLOT(bp->pdev->devfn); tmp_list->path = BP_PATH(bp); + tmp_list->aer = 0; tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
rc = down_interruptible(&bnx2x_prev_sem); @@@ -9746,8 -9814,8 +9814,8 @@@ BNX2X_ERR("Received %d when tried to take lock\n", rc); kfree(tmp_list); } else { - BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n", - BP_PATH(bp)); + DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n", + BP_PATH(bp)); list_add(&tmp_list->list, &bnx2x_prev_list); up(&bnx2x_prev_sem); } @@@ -9878,10 -9946,6 +9946,10 @@@ static int bnx2x_prev_unload_common(str REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); } } + if (!CHIP_IS_E1x(bp)) + /* block FW from writing to host */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + /* wait until BRB is empty */ tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); while (timer_count) { @@@ -9990,6 -10054,7 +10058,7 @@@ static int bnx2x_prev_unload(struct bnx }
do { + int aer = 0; /* Lock MCP using an unload request */ fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); if (!fw) { @@@ -9998,7 -10063,18 +10067,18 @@@ break; }
- if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n", + rc); + } else { + /* If Path is marked by EEH, ignore unload status */ + aer = !!(bnx2x_prev_path_get_entry(bp) && + bnx2x_prev_path_get_entry(bp)->aer); + up(&bnx2x_prev_sem); + } + + if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) { rc = bnx2x_prev_unload_common(bp); break; } @@@ -10038,8 -10114,12 +10118,12 @@@ static void bnx2x_get_common_hwinfo(str id = ((val & 0xffff) << 16); val = REG_RD(bp, MISC_REG_CHIP_REV); id |= ((val & 0xf) << 12); - val = REG_RD(bp, MISC_REG_CHIP_METAL); - id |= ((val & 0xff) << 4); + + /* Metal is read from PCI regs, but we can't access >=0x400 from + * the configuration space (so we need to reg_rd) + */ + val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3); + id |= (((val >> 24) & 0xf) << 4); val = REG_RD(bp, MISC_REG_BOND_ID); id |= (val & 0xf); bp->common.chip_id = id; @@@ -10816,14 -10896,12 +10900,12 @@@ static void bnx2x_get_cnic_mac_hwinfo(s } }
- if (IS_MF_STORAGE_SD(bp)) - /* Zero primary MAC configuration */ - memset(bp->dev->dev_addr, 0, ETH_ALEN); - - if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp)) - /* use FIP MAC as primary MAC */ + /* If this is a storage-only interface, use SAN mac as + * primary MAC. Notice that for SD this is already the case, + * as the SAN mac was copied from the primary MAC. + */ + if (IS_MF_FCOE_AFEX(bp)) memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); - } else { val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. iscsi_mac_upper); @@@ -11060,6 -11138,9 +11142,9 @@@ static int bnx2x_get_hwinfo(struct bnx2 } else BNX2X_DEV_INFO("illegal OV for SD\n"); break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: + bp->mf_config[vn] = 0; + break; default: /* Unknown configuration: reset mf_config */ bp->mf_config[vn] = 0; @@@ -11406,26 -11487,6 +11491,6 @@@ static int bnx2x_init_bp(struct bnx2x * * net_device service functions */
- static int bnx2x_open_epilog(struct bnx2x *bp) - { - /* Enable sriov via delayed work. This must be done via delayed work - * because it causes the probe of the vf devices to be run, which invoke - * register_netdevice which must have rtnl lock taken. As we are holding - * the lock right now, that could only work if the probe would not take - * the lock. However, as the probe of the vf may be called from other - * contexts as well (such as passthrough to vm failes) it can't assume - * the lock is being held for it. Using delayed work here allows the - * probe code to simply take the lock (i.e. wait for it to be released - * if it is being held). - */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); - - return 0; - } - /* called with rtnl_lock */ static int bnx2x_open(struct net_device *dev) { @@@ -11795,6 -11856,8 +11860,8 @@@ static const struct net_device_ops bnx2 .ndo_setup_tc = bnx2x_setup_tc, #ifdef CONFIG_BNX2X_SRIOV .ndo_set_vf_mac = bnx2x_set_vf_mac, + .ndo_set_vf_vlan = bnx2x_set_vf_vlan, + .ndo_get_vf_config = bnx2x_get_vf_config, #endif #ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, @@@ -11957,7 -12020,7 +12024,7 @@@ static int bnx2x_init_dev(struct bnx2x dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops; - bnx2x_set_ethtool_ops(dev); + bnx2x_set_ethtool_ops(bp, dev);
dev->priv_flags |= IFF_UNICAST_FLT;
@@@ -11965,6 -12028,13 +12032,13 @@@ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX; + if (!CHIP_IS_E1x(bp)) { + dev->hw_features |= NETIF_F_GSO_GRE; + dev->hw_enc_features = + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_GRE; + }
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; @@@ -12451,7 -12521,7 +12525,7 @@@ static int bnx2x_init_one(struct pci_de * l2 connections. */ if (IS_VF(bp)) { - bnx2x_vf_map_doorbells(bp); + bp->doorbells = bnx2x_vf_doorbells(bp); rc = bnx2x_vf_pci_alloc(bp); if (rc) goto init_one_exit; @@@ -12479,13 -12549,8 +12553,8 @@@ goto init_one_exit; }
- /* Enable SRIOV if capability found in configuration space. - * Once the generic SR-IOV framework makes it in from the - * pci tree this will be revised, to allow dynamic control - * over the number of VFs. Right now, change the num of vfs - * param below to enable SR-IOV. - */ - rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/); + /* Enable SRIOV if capability found in configuration space */ + rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); if (rc) goto init_one_exit;
@@@ -12497,16 -12562,6 +12566,6 @@@ if (CHIP_IS_E1x(bp)) bp->flags |= NO_FCOE_FLAG;
- /* disable FCOE for 57840 device, until FW supports it */ - switch (ent->driver_data) { - case BCM57840_O: - case BCM57840_4_10: - case BCM57840_2_20: - case BCM57840_MFO: - case BCM57840_MF: - bp->flags |= NO_FCOE_FLAG; - } - /* Set bp->num_queues for MSI-X mode*/ bnx2x_set_num_queues(bp);
@@@ -12640,9 -12695,7 +12699,7 @@@ static void bnx2x_remove_one(struct pci
static int bnx2x_eeh_nic_unload(struct bnx2x *bp) { - int i; - - bp->state = BNX2X_STATE_ERROR; + bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
bp->rx_mode = BNX2X_RX_MODE_NONE;
@@@ -12651,29 -12704,21 +12708,21 @@@
/* Stop Tx */ bnx2x_tx_disable(bp); - - bnx2x_netif_stop(bp, 0); /* Delete all NAPI objects */ bnx2x_del_all_napi(bp); if (CNIC_LOADED(bp)) bnx2x_del_all_napi_cnic(bp); + netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer); + cancel_delayed_work(&bp->sp_task); + cancel_delayed_work(&bp->period_task);
- bnx2x_stats_handle(bp, STATS_EVENT_STOP); - - /* Release IRQs */ - bnx2x_free_irq(bp); - - /* Free SKBs, SGEs, TPA pool and driver internals */ - bnx2x_free_skbs(bp); - - for_each_rx_queue(bp, i) - bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); - - bnx2x_free_mem(bp); + spin_lock_bh(&bp->stats_lock); + bp->stats_state = STATS_STATE_DISABLED; + spin_unlock_bh(&bp->stats_lock);
- bp->state = BNX2X_STATE_CLOSED; + bnx2x_save_statistics(bp);
netif_carrier_off(bp->dev);
@@@ -12709,6 -12754,8 +12758,8 @@@ static pci_ers_result_t bnx2x_io_error_
rtnl_lock();
+ BNX2X_ERR("IO error detected\n"); + netif_device_detach(dev);
if (state == pci_channel_io_perm_failure) { @@@ -12719,6 -12766,8 +12770,8 @@@ if (netif_running(dev)) bnx2x_eeh_nic_unload(bp);
+ bnx2x_prev_path_mark_eeh(bp); + pci_disable_device(pdev);
rtnl_unlock(); @@@ -12737,9 -12786,10 +12790,10 @@@ static pci_ers_result_t bnx2x_io_slot_r { struct net_device *dev = pci_get_drvdata(pdev); struct bnx2x *bp = netdev_priv(dev); + int i;
rtnl_lock(); - + BNX2X_ERR("IO slot reset initializing...\n"); if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset\n"); @@@ -12753,6 -12803,42 +12807,42 @@@ if (netif_running(dev)) bnx2x_set_power_state(bp, PCI_D0);
+ if (netif_running(dev)) { + BNX2X_ERR("IO slot reset --> driver unload\n"); + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { + u32 v; + + v = SHMEM2_RD(bp, + drv_capabilities_flag[BP_FW_MB_IDX(bp)]); + SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], + v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); + } + bnx2x_drain_tx_queues(bp); + bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); + bnx2x_netif_stop(bp, 1); + bnx2x_free_irq(bp); + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(bp, true); + + bp->sp_state = 0; + bp->port.pmf = 0; + + bnx2x_prev_unload(bp); + + /* We should have resetted the engine, so It's fair to + * assume the FW will no longer write to the bnx2x driver. + */ + bnx2x_squeeze_objects(bp); + bnx2x_free_skbs(bp); + for_each_rx_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + bnx2x_free_fp_mem(bp); + bnx2x_free_mem(bp); + + bp->state = BNX2X_STATE_CLOSED; + } + rtnl_unlock();
return PCI_ERS_RESULT_RECOVERED; @@@ -12779,6 -12865,9 +12869,9 @@@ static void bnx2x_io_resume(struct pci_
bnx2x_eeh_recover(bp);
+ bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK; + if (netif_running(dev)) bnx2x_nic_load(bp, LOAD_NORMAL);
@@@ -12801,6 -12890,9 +12894,9 @@@ static struct pci_driver bnx2x_pci_driv .suspend = bnx2x_suspend, .resume = bnx2x_resume, .err_handler = &bnx2x_err_handler, + #ifdef CONFIG_BNX2X_SRIOV + .sriov_configure = bnx2x_sriov_configure, + #endif };
static int __init bnx2x_init(void) @@@ -13358,7 -13450,6 +13454,7 @@@ static int bnx2x_unregister_cnic(struc RCU_INIT_POINTER(bp->cnic_ops, NULL); mutex_unlock(&bp->cnic_mutex); synchronize_rcu(); + bp->cnic_enabled = false; kfree(bp->cnic_kwq); bp->cnic_kwq = NULL;
diff --combined drivers/net/ethernet/emulex/benet/be_main.c index 2886c9b,536afa2..c29ea70 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@@ -1,5 -1,5 +1,5 @@@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@@ -146,20 -146,16 +146,16 @@@ static int be_queue_alloc(struct be_ada q->entry_size = entry_size; mem->size = len * entry_size; mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (!mem->va) return -ENOMEM; return 0; }
- static void be_intr_set(struct be_adapter *adapter, bool enable) + static void be_reg_intr_set(struct be_adapter *adapter, bool enable) { u32 reg, enabled;
- if (adapter->eeh_error) - return; - pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, ®); enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; @@@ -175,6 -171,22 +171,22 @@@ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); }
+ static void be_intr_set(struct be_adapter *adapter, bool enable) + { + int status = 0; + + /* On lancer interrupts can't be controlled via this register */ + if (lancer_chip(adapter)) + return; + + if (adapter->eeh_error) + return; + + status = be_cmd_intr_set(adapter, enable); + if (status) + be_reg_intr_set(adapter, enable); + } + static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) { u32 val = 0; @@@ -759,9 -771,8 +771,9 @@@ static struct sk_buff *be_insert_vlan_i
if (vlan_tx_tag_present(skb)) { vlan_tag = be_get_tx_vlan_tag(adapter, skb); - __vlan_put_tag(skb, vlan_tag); - skb->vlan_tci = 0; + skb = __vlan_put_tag(skb, vlan_tag); + if (skb) + skb->vlan_tci = 0; }
return skb; @@@ -2436,9 -2447,6 +2448,6 @@@ static int be_close(struct net_device *
be_roce_dev_close(adapter);
- if (!lancer_chip(adapter)) - be_intr_set(adapter, false); - for_all_evt_queues(adapter, eqo, i) napi_disable(&eqo->napi);
@@@ -2526,9 -2534,6 +2535,6 @@@ static int be_open(struct net_device *n
be_irq_register(adapter);
- if (!lancer_chip(adapter)) - be_intr_set(adapter, true); - for_all_rx_queues(adapter, rxo, i) be_cq_notify(adapter, rxo->cq.id, true, 0);
@@@ -2563,10 -2568,9 +2569,9 @@@ static int be_setup_wol(struct be_adapt
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (cmd.va == NULL) return -1; - memset(cmd.va, 0, cmd.size);
if (enable) { status = pci_write_config_dword(adapter->pdev, @@@ -3458,11 -3462,9 +3463,9 @@@ static int lancer_fw_download(struct be flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + LANCER_FW_DOWNLOAD_CHUNK; flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, - &flash_cmd.dma, GFP_KERNEL); + &flash_cmd.dma, GFP_KERNEL); if (!flash_cmd.va) { status = -ENOMEM; - dev_err(&adapter->pdev->dev, - "Memory allocation failure while flashing\n"); goto lancer_fw_exit; }
@@@ -3564,8 -3566,6 +3567,6 @@@ static int be_fw_download(struct be_ada &flash_cmd.dma, GFP_KERNEL); if (!flash_cmd.va) { status = -ENOMEM; - dev_err(&adapter->pdev->dev, - "Memory allocation failure while flashing\n"); goto be_fw_exit; }
@@@ -3792,12 -3792,13 +3793,13 @@@ static int be_ctrl_init(struct be_adapt
rx_filter->size = sizeof(struct be_cmd_req_rx_filter); rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size, - &rx_filter->dma, GFP_KERNEL); + &rx_filter->dma, + GFP_KERNEL | __GFP_ZERO); if (rx_filter->va == NULL) { status = -ENOMEM; goto free_mbox; } - memset(rx_filter->va, 0, rx_filter->size); + mutex_init(&adapter->mbox_lock); spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); @@@ -3839,10 -3840,9 +3841,9 @@@ static int be_stats_init(struct be_adap cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (cmd->va == NULL) return -1; - memset(cmd->va, 0, cmd->size); return 0; }
@@@ -3854,6 -3854,7 +3855,7 @@@ static void be_remove(struct pci_dev *p return;
be_roce_dev_remove(adapter); + be_intr_set(adapter, false);
cancel_delayed_work_sync(&adapter->func_recovery_work);
@@@ -4143,11 -4144,11 +4145,11 @@@ static int be_probe(struct pci_dev *pde goto ctrl_clean; }
- /* The INTR bit may be set in the card when probed by a kdump kernel - * after a crash. - */ - if (!lancer_chip(adapter)) - be_intr_set(adapter, false); + /* Wait for interrupts to quiesce after an FLR */ + msleep(100); + + /* Allow interrupts for other ULPs running on NIC function */ + be_intr_set(adapter, true);
status = be_stats_init(adapter); if (status) diff --combined drivers/net/ethernet/freescale/fec_main.c index 73195f6,2089087..2451ab1 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@@ -29,7 -29,6 +29,6 @@@ #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> - #include <linux/pci.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> @@@ -53,11 -52,6 +52,6 @@@
#include <asm/cacheflush.h>
- #ifndef CONFIG_ARM - #include <asm/coldfire.h> - #include <asm/mcfsim.h> - #endif - #include "fec.h"
#if defined(CONFIG_ARM) @@@ -107,6 -101,9 +101,9 @@@ static struct platform_device_id fec_de .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX, }, { + .name = "mvf-fec", + .driver_data = FEC_QUIRK_ENET_MAC, + }, { /* sentinel */ } }; @@@ -117,6 -114,7 +114,7 @@@ enum imx_fec_type IMX27_FEC, /* runs on i.mx27/35/51 */ IMX28_FEC, IMX6Q_FEC, + MVF_FEC, };
static const struct of_device_id fec_dt_ids[] = { @@@ -124,6 -122,7 +122,7 @@@ { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, + { .compatible = "fsl,mvf-fec", .data = &fec_devtype[MVF_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@@ -262,7 -261,7 +261,7 @@@ fec_enet_start_xmit(struct sk_buff *skb /* Ooops. All transmit buffers are full. Bail out. * This should not happen, since ndev->tbusy should be set. */ - printk("%s: tx queue full!.\n", ndev->name); + netdev_err(ndev, "tx queue full!\n"); return NETDEV_TX_BUSY; }
@@@ -574,7 -573,7 +573,7 @@@ fec_stop(struct net_device *ndev writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ udelay(10); if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) - printk("fec_stop : Graceful transmit stop did not complete !\n"); + netdev_err(ndev, "Graceful transmit stop did not complete!\n"); }
/* Whack a reset. We should wait for this. */ @@@ -672,7 -671,7 +671,7 @@@ fec_enet_tx(struct net_device *ndev }
if (status & BD_ENET_TX_READY) - printk("HEY! Enet xmit interrupt and TX_READY.\n"); + netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n");
/* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. @@@ -740,7 -739,7 +739,7 @@@ fec_enet_rx(struct net_device *ndev, in * the last indicator should be set. */ if ((status & BD_ENET_RX_LAST) == 0) - printk("FEC ENET: rcv is not +last\n"); + netdev_err(ndev, "rcv is not +last\n");
if (!fep->opened) goto rx_processing_done; @@@ -791,8 -790,6 +790,6 @@@ skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
if (unlikely(!skb)) { - printk("%s: Memory squeeze, dropping packet.\n", - ndev->name); ndev->stats.rx_dropped++; } else { skb_reserve(skb, NET_IP_ALIGN); @@@ -916,7 -913,6 +913,6 @@@ static void fec_get_mac(struct net_devi */ iap = macaddr;
- #ifdef CONFIG_OF /* * 2) from device tree data */ @@@ -928,7 -924,6 +924,6 @@@ iap = (unsigned char *) mac; } } - #endif
/* * 3) from flash or fuse (via platform data) @@@ -1002,7 -997,6 +997,7 @@@ static void fec_enet_adjust_link(struc } else { if (fep->link) { fec_stop(ndev); + fep->link = phy_dev->link; status_change = 1; } } @@@ -1032,7 -1026,7 +1027,7 @@@ static int fec_enet_mdio_read(struct mi usecs_to_jiffies(FEC_MII_TIMEOUT)); if (time_left == 0) { fep->mii_timeout = 1; - printk(KERN_ERR "FEC: MDIO read timeout\n"); + netdev_err(fep->netdev, "MDIO read timeout\n"); return -ETIMEDOUT; }
@@@ -1060,7 -1054,7 +1055,7 @@@ static int fec_enet_mdio_write(struct m usecs_to_jiffies(FEC_MII_TIMEOUT)); if (time_left == 0) { fep->mii_timeout = 1; - printk(KERN_ERR "FEC: MDIO write timeout\n"); + netdev_err(fep->netdev, "MDIO write timeout\n"); return -ETIMEDOUT; }
@@@ -1100,9 -1094,7 +1095,7 @@@ static int fec_enet_mii_probe(struct ne }
if (phy_id >= PHY_MAX_ADDR) { - printk(KERN_INFO - "%s: no PHY, assuming direct connection to switch\n", - ndev->name); + netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); phy_id = 0; } @@@ -1111,7 -1103,7 +1104,7 @@@ phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, fep->phy_interface); if (IS_ERR(phy_dev)) { - printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name); + netdev_err(ndev, "could not attach to PHY\n"); return PTR_ERR(phy_dev); }
@@@ -1129,11 -1121,9 +1122,9 @@@ fep->link = 0; fep->full_duplex = 0;
- printk(KERN_INFO - "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - ndev->name, - fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), - fep->phy_dev->irq); + netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), + fep->phy_dev->irq);
return 0; } @@@ -1443,7 -1433,7 +1434,7 @@@ static int fec_enet_alloc_buffers(struc
if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_RX_INT; + ebdp->cbd_esc = BD_ENET_TX_INT; }
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); @@@ -1608,7 -1598,7 +1599,7 @@@ fec_set_mac_address(struct net_device * * Polled functionality used by netconsole and others in non interrupt mode * */ - void fec_poll_controller(struct net_device *dev) + static void fec_poll_controller(struct net_device *dev) { int i; struct fec_enet_private *fep = netdev_priv(dev); @@@ -1649,11 -1639,9 +1640,9 @@@ static int fec_enet_init(struct net_dev
/* Allocate memory for buffer descriptors. */ cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, - GFP_KERNEL); - if (!cbd_base) { - printk("FEC: allocate descriptor memory failed?\n"); + GFP_KERNEL); + if (!cbd_base) return -ENOMEM; - }
memset(cbd_base, 0, PAGE_SIZE); spin_lock_init(&fep->hw_lock); @@@ -1685,16 -1673,6 +1674,6 @@@ }
#ifdef CONFIG_OF - static int fec_get_phy_mode_dt(struct platform_device *pdev) - { - struct device_node *np = pdev->dev.of_node; - - if (np) - return of_get_phy_mode(np); - - return -ENODEV; - } - static void fec_reset_phy(struct platform_device *pdev) { int err, phy_reset; @@@ -1723,11 -1701,6 +1702,6 @@@ gpio_set_value(phy_reset, 1); } #else /* CONFIG_OF */ - static int fec_get_phy_mode_dt(struct platform_device *pdev) - { - return -ENODEV; - } - static void fec_reset_phy(struct platform_device *pdev) { /* @@@ -1758,16 -1731,10 +1732,10 @@@ fec_probe(struct platform_device *pdev if (!r) return -ENXIO;
- r = request_mem_region(r->start, resource_size(r), pdev->name); - if (!r) - return -EBUSY; - /* Init network device */ ndev = alloc_etherdev(sizeof(struct fec_enet_private)); - if (!ndev) { - ret = -ENOMEM; - goto failed_alloc_etherdev; - } + if (!ndev) + return -ENOMEM;
SET_NETDEV_DEV(ndev, &pdev->dev);
@@@ -1779,7 -1746,7 +1747,7 @@@ (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
- fep->hwp = ioremap(r->start, resource_size(r)); + fep->hwp = devm_request_and_ioremap(&pdev->dev, r); fep->pdev = pdev; fep->dev_id = dev_id++;
@@@ -1792,7 -1759,7 +1760,7 @@@
platform_set_drvdata(pdev, ndev);
- ret = fec_get_phy_mode_dt(pdev); + ret = of_get_phy_mode(pdev->dev.of_node); if (ret < 0) { pdata = pdev->dev.platform_data; if (pdata) @@@ -1882,6 -1849,9 +1850,9 @@@ if (ret) goto failed_register;
+ if (fep->bufdesc_ex && fep->ptp_clock) + netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); + return 0;
failed_register: @@@ -1901,11 -1871,8 +1872,8 @@@ failed_regulator clk_disable_unprepare(fep->clk_ptp); failed_pin: failed_clk: - iounmap(fep->hwp); failed_ioremap: free_netdev(ndev); - failed_alloc_etherdev: - release_mem_region(r->start, resource_size(r));
return ret; } @@@ -1915,7 -1882,6 +1883,6 @@@ fec_drv_remove(struct platform_device * { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - struct resource *r; int i;
unregister_netdev(ndev); @@@ -1931,19 -1897,14 +1898,14 @@@ if (irq > 0) free_irq(irq, ndev); } - iounmap(fep->hwp); free_netdev(ndev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - BUG_ON(!r); - release_mem_region(r->start, resource_size(r)); - platform_set_drvdata(pdev, NULL);
return 0; }
- #ifdef CONFIG_PM + #ifdef CONFIG_PM_SLEEP static int fec_suspend(struct device *dev) { @@@ -1975,24 -1936,15 +1937,15 @@@ fec_resume(struct device *dev
return 0; } + #endif /* CONFIG_PM_SLEEP */
- static const struct dev_pm_ops fec_pm_ops = { - .suspend = fec_suspend, - .resume = fec_resume, - .freeze = fec_suspend, - .thaw = fec_resume, - .poweroff = fec_suspend, - .restore = fec_resume, - }; - #endif + static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
static struct platform_driver fec_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, - #ifdef CONFIG_PM .pm = &fec_pm_ops, - #endif .of_match_table = fec_dt_ids, }, .id_table = fec_devtype, diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 97e3366,b3e6530..7e977d8 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@@ -661,13 -661,7 +661,7 @@@ int ixgbe_vf_configuration(struct pci_d bool enable = ((event_mask & 0x10000000U) != 0);
if (enable) { - eth_random_addr(vf_mac_addr); - e_info(probe, "IOV: VF %d is enabled MAC %pM\n", - vfn, vf_mac_addr); - /* - * Store away the VF "permananet" MAC address, it will ask - * for it later. - */ + eth_zero_addr(vf_mac_addr); memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); }
@@@ -688,7 -682,8 +682,8 @@@ static int ixgbe_vf_reset_msg(struct ix ixgbe_vf_reset_event(adapter, vf);
/* set vf mac address */ - ixgbe_set_vf_mac(adapter, vf, vf_mac); + if (!is_zero_ether_addr(vf_mac)) + ixgbe_set_vf_mac(adapter, vf, vf_mac);
vf_shift = vf % 32; reg_offset = vf / 32; @@@ -729,8 -724,16 +724,16 @@@ IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
/* reply to reset with ack and vf mac address */ - msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; - memcpy(addr, vf_mac, ETH_ALEN); + msgbuf[0] = IXGBE_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + dev_warn(&adapter->pdev->dev, + "VF %d has no MAC address assigned, you may have to assign one manually\n", + vf); + }
/* * Piggyback the multicast filter type so VF can compute the @@@ -1049,12 -1052,6 +1052,12 @@@ int ixgbe_ndo_set_vf_vlan(struct net_de if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) return -EINVAL; if (vlan || qos) { + if (adapter->vfinfo[vf].pf_vlan) + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, + vf); + if (err) + goto out; err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); if (err) goto out; diff --combined drivers/net/ethernet/marvell/Kconfig index 434e33c,0051f0e..a49e81b --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@@ -21,8 -21,8 +21,8 @@@ if NET_VENDOR_MARVEL config MV643XX_ETH tristate "Marvell Discovery (643XX) and Orion ethernet support" depends on (MV64X60 || PPC32 || PLAT_ORION) && INET - select INET_LRO select PHYLIB + select MVMDIO ---help--- This driver supports the gigabit ethernet MACs in the Marvell Discovery PPC/MIPS chipset family (MV643XX) and @@@ -33,19 -33,17 +33,17 @@@
config MVMDIO tristate "Marvell MDIO interface support" + select PHYLIB ---help--- This driver supports the MDIO interface found in the network interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, Dove, Armada 370 and Armada XP).
- For now, this driver is only needed for the MVNETA driver - (used on Armada 370 and XP), but it could be used in the - future by the MV643XX_ETH driver. + This driver is used by the MV643XX_ETH and MVNETA drivers.
config MVNETA tristate "Marvell Armada 370/XP network interface support" depends on MACH_ARMADA_370_XP - select PHYLIB select MVMDIO ---help--- This driver supports the network interface units in the diff --combined drivers/net/ethernet/marvell/mvneta.c index a47a097,e48261e..c966785 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@@ -374,6 -374,7 +374,6 @@@ static int rxq_number = 8 static int txq_number = 8;
static int rxq_def; -static int txq_def;
#define MVNETA_DRIVER_NAME "mvneta" #define MVNETA_DRIVER_VERSION "1.0" @@@ -1474,8 -1475,7 +1474,8 @@@ error static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); - struct mvneta_tx_queue *txq = &pp->txqs[txq_def]; + u16 txq_id = skb_get_queue_mapping(skb); + struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; struct mvneta_tx_desc *tx_desc; struct netdev_queue *nq; int frags = 0; @@@ -1485,7 -1485,7 +1485,7 @@@ goto out;
frags = skb_shinfo(skb)->nr_frags + 1; - nq = netdev_get_tx_queue(dev, txq_def); + nq = netdev_get_tx_queue(dev, txq_id);
/* Get a descriptor for the first part of the packet */ tx_desc = mvneta_txq_next_desc_get(txq); @@@ -1969,13 -1969,8 +1969,8 @@@ static int mvneta_rxq_init(struct mvnet rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, rxq->size * MVNETA_DESC_ALIGNED_SIZE, &rxq->descs_phys, GFP_KERNEL); - if (rxq->descs == NULL) { - netdev_err(pp->dev, - "rxq=%d: Can't allocate %d bytes for %d RX descr\n", - rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE, - rxq->size); + if (rxq->descs == NULL) return -ENOMEM; - }
BUG_ON(rxq->descs != PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); @@@ -2029,13 -2024,8 +2024,8 @@@ static int mvneta_txq_init(struct mvnet txq->descs = dma_alloc_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, &txq->descs_phys, GFP_KERNEL); - if (txq->descs == NULL) { - netdev_err(pp->dev, - "txQ=%d: Can't allocate %d bytes for %d TX descr\n", - txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE, - txq->size); + if (txq->descs == NULL) return -ENOMEM; - }
/* Make sure descriptor address is cache line size aligned */ BUG_ON(txq->descs != @@@ -2689,7 -2679,7 +2679,7 @@@ static int mvneta_probe(struct platform return -EINVAL; }
- dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8); + dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); if (!dev) return -ENOMEM;
@@@ -2771,17 -2761,16 +2761,17 @@@
netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; + dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; + dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; + dev->priv_flags |= IFF_UNICAST_FLT; + err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register\n"); goto err_deinit; }
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM; - dev->priv_flags |= IFF_UNICAST_FLT; - netdev_info(dev, "mac: %pM\n", dev->dev_addr);
platform_set_drvdata(pdev, pp->dev); @@@ -2844,3 -2833,4 +2834,3 @@@ module_param(rxq_number, int, S_IRUGO) module_param(txq_number, int, S_IRUGO);
module_param(rxq_def, int, S_IRUGO); -module_param(txq_def, int, S_IRUGO); diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index edd63f1,374fa8a..dcdc624 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@@ -6,6 -6,7 +6,7 @@@ */
#include "qlcnic.h" + #include "qlcnic_sriov.h" #include <linux/if_vlan.h> #include <linux/ipv6.h> #include <linux/ethtool.h> @@@ -13,100 -14,7 +14,7 @@@
#define QLCNIC_MAX_TX_QUEUES 1 #define RSS_HASHTYPE_IP_TCP 0x3 - - /* status descriptor mailbox data - * @phy_addr: physical address of buffer - * @sds_ring_size: buffer size - * @intrpt_id: interrupt id - * @intrpt_val: source of interrupt - */ - struct qlcnic_sds_mbx { - u64 phy_addr; - u8 rsvd1[16]; - u16 sds_ring_size; - u16 rsvd2[3]; - u16 intrpt_id; - u8 intrpt_val; - u8 rsvd3[5]; - } __packed; - - /* receive descriptor buffer data - * phy_addr_reg: physical address of regular buffer - * phy_addr_jmb: physical address of jumbo buffer - * reg_ring_sz: size of regular buffer - * reg_ring_len: no. of entries in regular buffer - * jmb_ring_len: no. of entries in jumbo buffer - * jmb_ring_sz: size of jumbo buffer - */ - struct qlcnic_rds_mbx { - u64 phy_addr_reg; - u64 phy_addr_jmb; - u16 reg_ring_sz; - u16 reg_ring_len; - u16 jmb_ring_sz; - u16 jmb_ring_len; - } __packed; - - /* host producers for regular and jumbo rings */ - struct __host_producer_mbx { - u32 reg_buf; - u32 jmb_buf; - } __packed; - - /* Receive context mailbox data outbox registers - * @state: state of the context - * @vport_id: virtual port id - * @context_id: receive context id - * @num_pci_func: number of pci functions of the port - * @phy_port: physical port id - */ - struct qlcnic_rcv_mbx_out { - u8 rcv_num; - u8 sts_num; - u16 ctx_id; - u8 state; - u8 num_pci_func; - u8 phy_port; - u8 vport_id; - u32 host_csmr[QLCNIC_MAX_RING_SETS]; - struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS]; - } __packed; - - struct qlcnic_add_rings_mbx_out { - u8 rcv_num; - u8 sts_num; - u16 ctx_id; - u32 host_csmr[QLCNIC_MAX_RING_SETS]; - struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS]; - } __packed; - - /* Transmit context mailbox inbox registers - * @phys_addr: DMA address of the transmit buffer - * @cnsmr_index: host consumer index - * @size: legth of transmit buffer ring - * @intr_id: interrput id - * @src: src of interrupt - */ - struct qlcnic_tx_mbx { - u64 phys_addr; - u64 cnsmr_index; - u16 size; - u16 intr_id; - u8 src; - u8 rsvd[3]; - } __packed; - - /* Transmit context mailbox outbox registers - * @host_prod: host producer index - * @ctx_id: transmit context id - * @state: state of the transmit context - */ - struct qlcnic_tx_mbx_out { - u32 host_prod; - u16 ctx_id; - u8 state; - u8 rsvd; - } __packed; + #define QLC_83XX_FW_MBX_CMD 0
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, @@@ -156,9 -64,11 +64,11 @@@ {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, + {QLCNIC_CMD_CONFIG_VPORT, 4, 4}, + {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, };
- static const u32 qlcnic_83xx_ext_reg_tbl[] = { + const u32 qlcnic_83xx_ext_reg_tbl[] = { 0x38CC, /* Global Reset */ 0x38F0, /* Wildcard */ 0x38FC, /* Informant */ @@@ -204,7 -114,7 +114,7 @@@ 0x34A4, /* QLC_83XX_ASIC_TEMP */ };
- static const u32 qlcnic_83xx_reg_tbl[] = { + const u32 qlcnic_83xx_reg_tbl[] = { 0x34A8, /* PEG_HALT_STAT1 */ 0x34AC, /* PEG_HALT_STAT2 */ 0x34B0, /* FW_HEARTBEAT */ @@@ -247,6 -157,8 +157,8 @@@ static struct qlcnic_hardware_ops qlcni .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, .create_rx_ctx = qlcnic_83xx_create_rx_ctx, .create_tx_ctx = qlcnic_83xx_create_tx_ctx, + .del_rx_ctx = qlcnic_83xx_del_rx_ctx, + .del_tx_ctx = qlcnic_83xx_del_tx_ctx, .setup_link_event = qlcnic_83xx_setup_link_event, .get_nic_info = qlcnic_83xx_get_nic_info, .get_pci_info = qlcnic_83xx_get_pci_info, @@@ -355,14 -267,20 +267,20 @@@ int qlcnic_83xx_setup_intr(struct qlcni num_intr)); /* account for AEN interrupt MSI-X based interrupts */ num_msix += 1; - num_msix += adapter->max_drv_tx_rings; + + if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) + num_msix += adapter->max_drv_tx_rings; + err = qlcnic_enable_msix(adapter, num_msix); if (err == -ENOMEM) return err; if (adapter->flags & QLCNIC_MSIX_ENABLED) num_msix = adapter->ahw->num_msix; - else + else { + if (qlcnic_sriov_vf_check(adapter)) + return -EINVAL; num_msix = 1; + } /* setup interrupt mapping table for fw */ ahw->intr_tbl = vzalloc(num_msix * sizeof(struct qlcnic_intrpt_config)); @@@ -595,7 -513,7 +513,7 @@@ int qlcnic_83xx_setup_mbx_intr(struct q void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter) { u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT); - adapter->ahw->pci_func = val & 0xf; + adapter->ahw->pci_func = (val >> 24) & 0xff; }
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter) @@@ -707,6 -625,11 +625,11 @@@ void qlcnic_83xx_check_vf(struct qlcnic ahw->fw_hal_version = 2; qlcnic_get_func_no(adapter);
+ if (qlcnic_sriov_vf_check(adapter)) { + qlcnic_sriov_vf_set_ops(adapter); + return; + } + /* Determine function privilege level */ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); if (op_mode == QLC_83XX_DEFAULT_OPMODE) @@@ -722,6 -645,9 +645,9 @@@ ahw->fw_hal_version); adapter->nic_ops = &qlcnic_vf_ops; } else { + if (pci_find_ext_capability(adapter->pdev, + PCI_EXT_CAP_ID_SRIOV)) + set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state); adapter->nic_ops = &qlcnic_83xx_ops; } } @@@ -755,7 -681,7 +681,7 @@@ static void qlcnic_dump_mbx(struct qlcn }
/* Mailbox response for mac rcode */ - static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) + u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) { u32 fw_data; u8 mac_cmd_rcode; @@@ -769,7 -695,7 +695,7 @@@ return 1; }
- static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) + u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) { u32 data; unsigned long wait_time = 0; @@@ -884,6 -810,7 +810,7 @@@ int qlcnic_83xx_alloc_mbx_args(struct q size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl); for (i = 0; i < size; i++) { if (type == mbx_tbl[i].cmd) { + mbx->op_type = QLC_83XX_FW_MBX_CMD; mbx->req.num = mbx_tbl[i].in_args; mbx->rsp.num = mbx_tbl[i].out_args; mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), @@@ -901,10 -828,10 +828,10 @@@ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); temp = adapter->ahw->fw_hal_version << 29; mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp); - break; + return 0; } } - return 0; + return -EINVAL; }
void qlcnic_83xx_idc_aen_work(struct work_struct *work) @@@ -960,6 -887,9 +887,9 @@@ void qlcnic_83xx_process_aen(struct qlc break; case QLCNIC_MBX_TIME_EXTEND_EVENT: break; + case QLCNIC_MBX_BC_EVENT: + qlcnic_sriov_handle_bc_event(adapter, event[1]); + break; case QLCNIC_MBX_SFP_INSERT_EVENT: dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n", QLCNIC_MBX_RSP(event[0])); @@@ -1004,7 -934,8 +934,8 @@@ static int qlcnic_83xx_add_rings(struc sds = &recv_ctx->sds_rings[i]; sds->consumer = 0; memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); - sds_mbx.phy_addr = sds->phys_addr; + sds_mbx.phy_addr_low = LSD(sds->phys_addr); + sds_mbx.phy_addr_high = MSD(sds->phys_addr); sds_mbx.sds_ring_size = sds->num_desc;
if (adapter->flags & QLCNIC_MSIX_ENABLED) @@@ -1050,6 -981,32 +981,32 @@@ out return err; }
+ void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter) + { + int err; + u32 temp = 0; + struct qlcnic_cmd_args cmd; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX)) + return; + + if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[0] |= (0x3 << 29); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp); + + cmd.req.arg[1] = recv_ctx->context_id | temp; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to destroy rx ctx in firmware\n"); + + recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; + qlcnic_free_mbx_args(&cmd); + } + int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) { int i, err, index, sds_mbx_size, rds_mbx_size; @@@ -1080,9 -1037,17 +1037,17 @@@ /* set mailbox hdr and capabilities */ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); + + if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[0] |= (0x3 << 29); + cmd.req.arg[1] = cap; cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) | (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_pf_set_interface_id_create_rx_ctx(adapter, + &cmd.req.arg[6]); /* set up status rings, mbx 8-57/87 */ index = QLC_83XX_HOST_SDS_MBX_IDX; for (i = 0; i < num_sds; i++) { @@@ -1090,7 -1055,8 +1055,8 @@@ sds = &recv_ctx->sds_rings[i]; sds->consumer = 0; memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); - sds_mbx.phy_addr = sds->phys_addr; + sds_mbx.phy_addr_low = LSD(sds->phys_addr); + sds_mbx.phy_addr_high = MSD(sds->phys_addr); sds_mbx.sds_ring_size = sds->num_desc; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[i].id; @@@ -1110,13 -1076,15 +1076,15 @@@ rds = &recv_ctx->rds_rings[0]; rds->producer = 0; memset(&rds_mbx, 0, rds_mbx_size); - rds_mbx.phy_addr_reg = rds->phys_addr; + rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr); + rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr); rds_mbx.reg_ring_sz = rds->dma_size; rds_mbx.reg_ring_len = rds->num_desc; /* Jumbo ring */ rds = &recv_ctx->rds_rings[1]; rds->producer = 0; - rds_mbx.phy_addr_jmb = rds->phys_addr; + rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr); + rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr); rds_mbx.jmb_ring_sz = rds->dma_size; rds_mbx.jmb_ring_len = rds->num_desc; buf = &cmd.req.arg[index]; @@@ -1163,16 -1131,39 +1131,39 @@@ out return err; }
+ void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) + { + struct qlcnic_cmd_args cmd; + u32 temp = 0; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX)) + return; + + if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[0] |= (0x3 << 29); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp); + + cmd.req.arg[1] = tx_ring->ctx_id | temp; + if (qlcnic_issue_cmd(adapter, &cmd)) + dev_err(&adapter->pdev->dev, + "Failed to destroy tx ctx in firmware\n"); + qlcnic_free_mbx_args(&cmd); + } + int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx, int ring) { int err; u16 msix_id; - u32 *buf, intr_mask; + u32 *buf, intr_mask, temp = 0; struct qlcnic_cmd_args cmd; struct qlcnic_tx_mbx mbx; struct qlcnic_tx_mbx_out *mbx_out; struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 msix_vector;
/* Reset host resources */ tx->producer = 0; @@@ -1182,13 -1173,21 +1173,21 @@@ memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
/* setup mailbox inbox registerss */ - mbx.phys_addr = tx->phys_addr; - mbx.cnsmr_index = tx->hw_cons_phys_addr; + mbx.phys_addr_low = LSD(tx->phys_addr); + mbx.phys_addr_high = MSD(tx->phys_addr); + mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr); + mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr); mbx.size = tx->num_desc; - if (adapter->flags & QLCNIC_MSIX_ENABLED) - msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id; - else + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) + msix_vector = adapter->max_sds_rings + ring; + else + msix_vector = adapter->max_sds_rings - 1; + msix_id = ahw->intr_tbl[msix_vector].id; + } else { msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); + } + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) mbx.intr_id = msix_id; else @@@ -1196,8 -1195,15 +1195,15 @@@ mbx.src = 0;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + + if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[0] |= (0x3 << 29); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp); + cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT; - cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES; + cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp; buf = &cmd.req.arg[6]; memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx)); /* send the mailbox command*/ @@@ -1210,7 -1216,8 +1216,8 @@@ mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2]; tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod; tx->ctx_id = mbx_out->ctx_id; - if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src; tx->crb_intr_mask = ahw->pci_base0 + intr_mask; } @@@ -1373,12 -1380,60 +1380,60 @@@ mbx_err } }
+ int qlcnic_83xx_set_led(struct net_device *netdev, + enum ethtool_phys_id_state state) + { + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err = -EIO, active = 1; + + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + netdev_warn(netdev, + "LED test is not supported in non-privileged mode\n"); + return -EOPNOTSUPP; + } + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) + return -EBUSY; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + break; + + err = qlcnic_83xx_config_led(adapter, active, 0); + if (err) + netdev_err(netdev, "Failed to set LED blink state\n"); + break; + case ETHTOOL_ID_INACTIVE: + active = 0; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + break; + + err = qlcnic_83xx_config_led(adapter, active, 0); + if (err) + netdev_err(netdev, "Failed to reset LED blink state\n"); + break; + + default: + return -EINVAL; + } + + if (!active || err) + clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); + + return err; + } + void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter, int enable) { struct qlcnic_cmd_args cmd; int status;
+ if (qlcnic_sriov_vf_check(adapter)) + return; + if (enable) { qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC); cmd.req.arg[1] = BIT_0 | BIT_31; @@@ -1441,24 -1496,35 +1496,35 @@@ int qlcnic_83xx_setup_link_event(struc return err; }
+ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter, + u32 *interface_id) + { + if (qlcnic_sriov_pf_check(adapter)) { + qlcnic_pf_set_interface_id_promisc(adapter, interface_id); + } else { + if (!qlcnic_sriov_vf_check(adapter)) + *interface_id = adapter->recv_ctx->context_id << 16; + } + } + int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) { int err; - u32 temp; + u32 temp = 0; struct qlcnic_cmd_args cmd;
if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return -EIO;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); - temp = adapter->recv_ctx->context_id << 16; + qlcnic_83xx_set_interface_id_promisc(adapter, &temp); cmd.req.arg[1] = (mode ? 1 : 0) | temp; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_info(&adapter->pdev->dev, "Promiscous mode config failed\n"); - qlcnic_free_mbx_args(&cmd);
+ qlcnic_free_mbx_args(&cmd); return err; }
@@@ -1500,12 -1566,6 +1566,12 @@@ int qlcnic_83xx_loopback_test(struct ne } } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
+ /* Make sure carrier is off and queue is stopped during loopback */ + if (netif_running(netdev)) { + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + ret = qlcnic_do_lb_test(adapter, mode);
qlcnic_83xx_clear_lb_mode(adapter, mode); @@@ -1604,21 -1664,31 +1670,31 @@@ int qlcnic_83xx_clear_lb_mode(struct ql return status; }
+ static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, + u32 *interface_id) + { + if (qlcnic_sriov_pf_check(adapter)) { + qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id); + } else { + if (!qlcnic_sriov_vf_check(adapter)) + *interface_id = adapter->recv_ctx->context_id << 16; + } + } + void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int mode) { int err; - u32 temp, temp_ip; + u32 temp = 0, temp_ip; struct qlcnic_cmd_args cmd;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR); - if (mode == QLCNIC_IP_UP) { - temp = adapter->recv_ctx->context_id << 16; + qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp); + + if (mode == QLCNIC_IP_UP) cmd.req.arg[1] = 1 | temp; - } else { - temp = adapter->recv_ctx->context_id << 16; + else cmd.req.arg[1] = 2 | temp; - }
/* * Adapter needs IP address in network byte order. @@@ -1635,6 -1705,7 +1711,7 @@@ dev_err(&adapter->netdev->dev, "could not notify %s IP 0x%x request\n", (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip); + qlcnic_free_mbx_args(&cmd); }
@@@ -1701,11 -1772,22 +1778,22 @@@ int qlcnic_83xx_config_rss(struct qlcni
}
+ static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter, + u32 *interface_id) + { + if (qlcnic_sriov_pf_check(adapter)) { + qlcnic_pf_set_interface_id_macaddr(adapter, interface_id); + } else { + if (!qlcnic_sriov_vf_check(adapter)) + *interface_id = adapter->recv_ctx->context_id << 16; + } + } + int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, __le16 vlan_id, u8 op) { int err; - u32 *buf; + u32 *buf, temp = 0; struct qlcnic_cmd_args cmd; struct qlcnic_macvlan_mbx mv;
@@@ -1715,11 -1797,17 +1803,17 @@@ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); if (err) return err; - cmd.req.arg[1] = op | (1 << 8) | - (adapter->recv_ctx->context_id << 16);
+ cmd.req.arg[1] = op | (1 << 8); + qlcnic_83xx_set_interface_id_macaddr(adapter, &temp); + cmd.req.arg[1] |= temp; mv.vlan = le16_to_cpu(vlan_id); - memcpy(&mv.mac, addr, ETH_ALEN); + mv.mac_addr0 = addr[0]; + mv.mac_addr1 = addr[1]; + mv.mac_addr2 = addr[2]; + mv.mac_addr3 = addr[3]; + mv.mac_addr4 = addr[4]; + mv.mac_addr5 = addr[5]; buf = &cmd.req.arg[2]; memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); err = qlcnic_issue_cmd(adapter, &cmd); @@@ -2008,14 -2096,17 +2102,17 @@@ int qlcnic_83xx_get_pci_info(struct qlc int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type) { int i, index, err; - bool type; u8 max_ints; - u32 val, temp; + u32 val, temp, type; struct qlcnic_cmd_args cmd;
max_ints = adapter->ahw->num_msix - 1; qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT); cmd.req.arg[1] = max_ints; + + if (qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16; + for (i = 0, index = 2; i < max_ints; i++) { type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; val = type | (adapter->ahw->intr_tbl[i].type << 4); @@@ -2169,7 -2260,7 +2266,7 @@@ static int qlcnic_83xx_poll_flash_statu return 0; }
- static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter) + int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter) { int ret; u32 cmd; @@@ -2187,7 -2278,7 +2284,7 @@@ return 0; }
- static int qlcnic_83xx_disable_flash_write_op(struct qlcnic_adapter *adapter) + int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter) { int ret;
@@@ -2261,7 -2352,7 +2358,7 @@@ int qlcnic_83xx_erase_flash_sector(stru return -EIO;
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { - ret = qlcnic_83xx_enable_flash_write_op(adapter); + ret = qlcnic_83xx_enable_flash_write(adapter); if (ret) { qlcnic_83xx_unlock_flash(adapter); dev_err(&adapter->pdev->dev, @@@ -2303,7 -2394,7 +2400,7 @@@ }
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { - ret = qlcnic_83xx_disable_flash_write_op(adapter); + ret = qlcnic_83xx_disable_flash_write(adapter); if (ret) { qlcnic_83xx_unlock_flash(adapter); dev_err(&adapter->pdev->dev, @@@ -2343,8 -2434,8 +2440,8 @@@ int qlcnic_83xx_flash_bulk_write(struc u32 temp; int ret = -EIO;
- if ((count < QLC_83XX_FLASH_BULK_WRITE_MIN) || - (count > QLC_83XX_FLASH_BULK_WRITE_MAX)) { + if ((count < QLC_83XX_FLASH_WRITE_MIN) || + (count > QLC_83XX_FLASH_WRITE_MAX)) { dev_err(&adapter->pdev->dev, "%s: Invalid word count\n", __func__); return -EIO; @@@ -2622,13 -2713,19 +2719,19 @@@ int qlcnic_83xx_flash_read32(struct qlc
int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) { + u8 pci_func; int err; u32 config = 0, state; struct qlcnic_cmd_args cmd; struct qlcnic_hardware_context *ahw = adapter->ahw;
- state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(ahw->pci_func)); - if (!QLC_83xx_FUNC_VAL(state, ahw->pci_func)) { + if (qlcnic_sriov_vf_check(adapter)) + pci_func = adapter->portnum; + else + pci_func = ahw->pci_func; + + state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func)); + if (!QLC_83xx_FUNC_VAL(state, pci_func)) { dev_info(&adapter->pdev->dev, "link state down\n"); return config; } @@@ -2786,7 -2883,6 +2889,7 @@@ static u64 *qlcnic_83xx_fill_stats(stru void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) { struct qlcnic_cmd_args cmd; + struct net_device *netdev = adapter->netdev; int ret = 0;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); @@@ -2796,7 -2892,7 +2899,7 @@@ data = qlcnic_83xx_fill_stats(adapter, &cmd, data, QLC_83XX_STAT_TX, &ret); if (ret) { - dev_info(&adapter->pdev->dev, "Error getting MAC stats\n"); + netdev_err(netdev, "Error getting Tx stats\n"); goto out; } /* Get MAC stats */ @@@ -2806,7 -2902,8 +2909,7 @@@ data = qlcnic_83xx_fill_stats(adapter, &cmd, data, QLC_83XX_STAT_MAC, &ret); if (ret) { - dev_info(&adapter->pdev->dev, - "Error getting Rx stats\n"); + netdev_err(netdev, "Error getting MAC stats\n"); goto out; } /* Get Rx stats */ @@@ -2816,7 -2913,8 +2919,7 @@@ data = qlcnic_83xx_fill_stats(adapter, &cmd, data, QLC_83XX_STAT_RX, &ret); if (ret) - dev_info(&adapter->pdev->dev, - "Error getting Tx stats\n"); + netdev_err(netdev, "Error getting Rx stats\n"); out: qlcnic_free_mbx_args(&cmd); } diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 5fa847f,a85ca63a..910346d --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@@ -9,6 -9,7 +9,7 @@@ #include <linux/if_vlan.h> #include <net/ip.h> #include <linux/ipv6.h> + #include <net/checksum.h>
#include "qlcnic.h"
@@@ -146,7 -147,10 +147,10 @@@ static inline u8 qlcnic_mac_hash(u64 ma static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter, u16 handle, u8 ring_id) { - if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) + unsigned short device = adapter->pdev->device; + + if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) return handle | (ring_id << 15); else return handle; @@@ -358,7 -362,8 +362,7 @@@ set_flags memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); } opcode = TX_ETHER_PKT; - if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && - skb_shinfo(skb)->gso_size > 0) { + if (skb_is_gso(skb)) { hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); first_desc->total_hdr_length = hdr_len; @@@ -1131,9 -1136,8 +1135,8 @@@ qlcnic_process_lro(struct qlcnic_adapte iph = (struct iphdr *)skb->data; th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + csum_replace2(&iph->check, iph->tot_len, htons(length)); iph->tot_len = htons(length); - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); }
th->psh = push; @@@ -1594,9 -1598,8 +1597,8 @@@ qlcnic_83xx_process_lro(struct qlcnic_a iph = (struct iphdr *)skb->data; th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + csum_replace2(&iph->check, iph->tot_len, htons(length)); iph->tot_len = htons(length); - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); }
th->psh = push; @@@ -1691,6 -1694,29 +1693,29 @@@ skip return count; }
+ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget) + { + int tx_complete; + int work_done; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_adapter *adapter; + struct qlcnic_host_tx_ring *tx_ring; + + sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); + adapter = sds_ring->adapter; + /* tx ring count = 1 */ + tx_ring = adapter->tx_ring; + + tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); + work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); + if ((work_done < budget) && tx_complete) { + napi_complete(&sds_ring->napi); + qlcnic_83xx_enable_intr(adapter, sds_ring); + } + + return work_done; + } + static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) { int tx_complete; @@@ -1768,7 -1794,8 +1793,8 @@@ void qlcnic_83xx_napi_enable(struct qlc qlcnic_83xx_enable_intr(adapter, sds_ring); }
- if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; napi_enable(&tx_ring->napi); @@@ -1795,7 -1822,8 +1821,8 @@@ void qlcnic_83xx_napi_disable(struct ql napi_disable(&sds_ring->napi); }
- if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; qlcnic_83xx_disable_tx_intr(adapter, tx_ring); @@@ -1808,7 -1836,7 +1835,7 @@@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) { - int ring, max_sds_rings; + int ring, max_sds_rings, temp; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; @@@ -1819,14 -1847,23 +1846,23 @@@ max_sds_rings = adapter->max_sds_rings; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; - if (adapter->flags & QLCNIC_MSIX_ENABLED) - netif_napi_add(netdev, &sds_ring->napi, - qlcnic_83xx_rx_poll, - QLCNIC_NETDEV_WEIGHT * 2); - else + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) { + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_83xx_rx_poll, + QLCNIC_NETDEV_WEIGHT * 2); + } else { + temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings; + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_83xx_msix_sriov_vf_poll, + temp); + } + + } else { netif_napi_add(netdev, &sds_ring->napi, qlcnic_83xx_poll, QLCNIC_NETDEV_WEIGHT / max_sds_rings); + } }
if (qlcnic_alloc_tx_rings(adapter, netdev)) { @@@ -1834,7 -1871,8 +1870,8 @@@ return -ENOMEM; }
- if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_add(netdev, &tx_ring->napi, @@@ -1860,7 -1898,8 +1897,8 @@@ void qlcnic_83xx_napi_del(struct qlcnic
qlcnic_free_sds_rings(adapter->recv_ctx);
- if ((adapter->flags & QLCNIC_MSIX_ENABLED)) { + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_del(&tx_ring->napi); diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 5ef328a,c77675d..4e22e79 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@@ -21,8 -21,6 +21,6 @@@ #include <linux/aer.h> #include <linux/log2.h>
- #include <linux/sysfs.h> - #define QLC_STATUS_UNSUPPORTED_CMD -2
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) @@@ -200,10 -198,10 +198,10 @@@ beacon_err }
err = qlcnic_config_led(adapter, b_state, b_rate); - if (!err) + if (!err) { err = len; - else ahw->beacon_state = b_state; + }
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) qlcnic_diag_free_res(adapter->netdev, max_sds_rings); @@@ -886,6 -884,244 +884,244 @@@ static ssize_t qlcnic_sysfs_read_pci_co return size; }
+ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) + { + unsigned char *p_read_buf; + int ret, count; + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + + if (!size) + return QL_STATUS_INVALID_PARAM; + if (!buf) + return QL_STATUS_INVALID_PARAM; + + count = size / sizeof(u32); + + if (size % sizeof(u32)) + count++; + + p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); + if (!p_read_buf) + return -ENOMEM; + if (qlcnic_83xx_lock_flash(adapter) != 0) { + kfree(p_read_buf); + return -EIO; + } + + ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf, + count); + + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + kfree(p_read_buf); + return ret; + } + + qlcnic_83xx_unlock_flash(adapter); + memcpy(buf, p_read_buf, size); + kfree(p_read_buf); + + return size; + } + + static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter, + char *buf, loff_t offset, + size_t size) + { + int i, ret, count; + unsigned char *p_cache, *p_src; + + p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); + if (!p_cache) + return -ENOMEM; + + memcpy(p_cache, buf, size); + p_src = p_cache; + count = size / sizeof(u32); + + if (qlcnic_83xx_lock_flash(adapter) != 0) { + kfree(p_cache); + return -EIO; + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_enable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) { + ret = qlcnic_83xx_flash_bulk_write(adapter, offset, + (u32 *)p_src, + QLC_83XX_FLASH_WRITE_MAX); + + if (ret) { + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + + p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX; + offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX; + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + + return 0; + } + + static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter, + char *buf, loff_t offset, size_t size) + { + int i, ret, count; + unsigned char *p_cache, *p_src; + + p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); + if (!p_cache) + return -ENOMEM; + + memcpy(p_cache, buf, size); + p_src = p_cache; + count = size / sizeof(u32); + + if (qlcnic_83xx_lock_flash(adapter) != 0) { + kfree(p_cache); + return -EIO; + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_enable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + for (i = 0; i < count; i++) { + ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src); + if (ret) { + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + + p_src = p_src + sizeof(u32); + offset = offset + sizeof(u32); + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + + return 0; + } + + static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) + { + int ret; + static int flash_mode; + unsigned long data; + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + + if (!buf) + return QL_STATUS_INVALID_PARAM; + + ret = kstrtoul(buf, 16, &data); + + switch (data) { + case QLC_83XX_FLASH_SECTOR_ERASE_CMD: + flash_mode = QLC_83XX_ERASE_MODE; + ret = qlcnic_83xx_erase_flash_sector(adapter, offset); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s failed at %d\n", __func__, __LINE__); + return -EIO; + } + break; + + case QLC_83XX_FLASH_BULK_WRITE_CMD: + flash_mode = QLC_83XX_BULK_WRITE_MODE; + break; + + case QLC_83XX_FLASH_WRITE_CMD: + flash_mode = QLC_83XX_WRITE_MODE; + break; + default: + if (flash_mode == QLC_83XX_BULK_WRITE_MODE) { + ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf, + offset, size); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s failed at %d\n", + __func__, __LINE__); + return -EIO; + } + } + + if (flash_mode == QLC_83XX_WRITE_MODE) { + ret = qlcnic_83xx_sysfs_flash_write(adapter, buf, + offset, size); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s failed at %d\n", __func__, + __LINE__); + return -EIO; + } + } + } + + return size; + } + static struct device_attribute dev_attr_bridged_mode = { .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, .show = qlcnic_show_bridged_mode, @@@ -960,6 -1196,13 +1196,13 @@@ static struct bin_attribute bin_attr_pm .write = qlcnic_sysfs_write_pm_config, };
+ static struct bin_attribute bin_attr_flash = { + .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_83xx_sysfs_flash_read_handler, + .write = qlcnic_83xx_sysfs_flash_write_handler, + }; + void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; @@@ -1048,10 -1291,18 +1291,18 @@@ void qlcnic_82xx_remove_sysfs(struct ql
void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter) { + struct device *dev = &adapter->pdev->dev; + qlcnic_create_diag_entries(adapter); + + if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash)) + dev_info(dev, "failed to create flash sysfs entry\n"); }
void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter) { + struct device *dev = &adapter->pdev->dev; + qlcnic_remove_diag_entries(adapter); + sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash); } diff --combined drivers/net/ethernet/qlogic/qlge/qlge_main.c index 8033555,1dd778a..2ff776d --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@@ -1211,8 -1211,6 +1211,6 @@@ static void ql_update_sbq(struct ql_ada netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE); if (sbq_desc->p.skb == NULL) { - netif_err(qdev, probe, qdev->ndev, - "Couldn't get an skb.\n"); rx_ring->sbq_clean_idx = clean_idx; return; } @@@ -1434,13 -1432,11 +1432,13 @@@ map_error }
/* Categorizing receive firmware frame errors */ -static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err) +static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, + struct rx_ring *rx_ring) { struct nic_stats *stats = &qdev->nic_stats;
stats->rx_err_count++; + rx_ring->rx_errors++;
switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { case IB_MAC_IOCB_RSP_ERR_CODE_ERR: @@@ -1476,12 -1472,6 +1474,12 @@@ static void ql_process_mac_rx_gro_page( struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); struct napi_struct *napi = &rx_ring->napi;
+ /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + put_page(lbq_desc->p.pg_chunk.page); + return; + } napi->dev = qdev->ndev;
skb = napi_get_frags(napi); @@@ -1527,8 -1517,6 +1525,6 @@@ static void ql_process_mac_rx_page(stru
skb = netdev_alloc_skb(ndev, length); if (!skb) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get an skb, need to unwind!.\n"); rx_ring->rx_dropped++; put_page(lbq_desc->p.pg_chunk.page); return; @@@ -1537,12 -1525,6 +1533,12 @@@ addr = lbq_desc->p.pg_chunk.va; prefetch(addr);
+ /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + goto err_out; + } + /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ @@@ -1619,8 -1601,6 +1615,6 @@@ static void ql_process_mac_rx_skb(struc /* Allocate new_skb and copy */ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); if (new_skb == NULL) { - netif_err(qdev, probe, qdev->ndev, - "No skb available, drop the packet.\n"); rx_ring->rx_dropped++; return; } @@@ -1628,13 -1608,6 +1622,13 @@@ memcpy(skb_put(new_skb, length), skb->data, length); skb = new_skb;
+ /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + dev_kfree_skb_any(skb); + return; + } + /* loopback self test for ethtool */ if (test_bit(QL_SELFTEST, &qdev->flags)) { ql_check_lb_frame(qdev, skb); @@@ -1940,13 -1913,6 +1934,13 @@@ static void ql_process_mac_split_rx_int return; }
+ /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + dev_kfree_skb_any(skb); + return; + } + /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ @@@ -2028,6 -1994,12 +2022,6 @@@ static unsigned long ql_process_mac_rx_
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
- /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - ql_categorize_rx_err(qdev, ib_mac_rsp->flags2); - return (unsigned long)length; - } - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { /* The data and headers are split into * separate buffers. diff --combined drivers/net/ethernet/sfc/falcon.c index defed0e,4486102..71998e7 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@@ -1528,7 -1528,7 +1528,7 @@@ static int falcon_probe_nic(struct efx_ return 0;
fail6: - BUG_ON(i2c_del_adapter(&board->i2c_adap)); + i2c_del_adapter(&board->i2c_adap); memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); fail5: efx_nic_free_buffer(efx, &efx->irq_status); @@@ -1546,10 -1546,6 +1546,6 @@@
static void falcon_init_rx_cfg(struct efx_nic *efx) { - /* Prior to Siena the RX DMA engine will split each frame at - * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to - * be so large that that never happens. */ - const unsigned huge_buf_size = (3 * 4096) >> 5; /* RX control FIFO thresholds (32 entries) */ const unsigned ctrl_xon_thr = 20; const unsigned ctrl_xoff_thr = 25; @@@ -1557,10 -1553,15 +1553,15 @@@
efx_reado(efx, ®, FR_AZ_RX_CFG); if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { - /* Data FIFO size is 5.5K */ + /* Data FIFO size is 5.5K. The RX DMA engine only + * supports scattering for user-mode queues, but will + * split DMA writes at intervals of RX_USR_BUF_SIZE + * (32-byte units) even for kernel-mode queues. We + * set it to be so large that that never happens. + */ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, - huge_buf_size); + (3 * 4096) >> 5); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); @@@ -1569,7 -1570,7 +1570,7 @@@ /* Data FIFO size is 80K; register fields moved */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, - huge_buf_size); + EFX_RX_USR_BUF_SIZE >> 5); /* Send XON and XOFF at ~3 * max MTU away from empty/full */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); @@@ -1665,11 -1666,13 +1666,11 @@@ static void falcon_remove_nic(struct ef { struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_board *board = falcon_board(efx); - int rc;
board->type->fini(efx);
/* Remove I2C adapter and clear it in preparation for a retry */ - rc = i2c_del_adapter(&board->i2c_adap); - BUG_ON(rc); + i2c_del_adapter(&board->i2c_adap); memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
efx_nic_free_buffer(efx, &efx->irq_status); @@@ -1813,6 -1816,7 +1814,7 @@@ const struct efx_nic_type falcon_a1_nic .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_padding = 0x24, + .can_rx_scatter = false, .max_interrupt_mode = EFX_INT_MODE_MSI, .phys_addr_channels = 4, .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, @@@ -1863,6 -1867,7 +1865,7 @@@ const struct efx_nic_type falcon_b0_nic .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_hash_size = 0x10, .rx_buffer_padding = 0, + .can_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy * interrupt handler only supports 32 diff --combined drivers/net/ethernet/ti/cpsw.c index 4781d3d,1d74042..b113634 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@@ -126,6 -126,13 +126,13 @@@ do { #define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) #define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
+ #define CPSW_INTPACEEN (0x3f << 16) + #define CPSW_INTPRESCALE_MASK (0x7FF << 0) + #define CPSW_CMINTMAX_CNT 63 + #define CPSW_CMINTMIN_CNT 2 + #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) + #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) + #define cpsw_enable_irq(priv) \ do { \ u32 i; \ @@@ -139,6 -146,10 +146,10 @@@ disable_irq_nosync(priv->irqs_table[i]); \ } while (0);
+ #define cpsw_slave_index(priv) \ + ((priv->data.dual_emac) ? priv->emac_port : \ + priv->data.active_slave) + static int debug_level; module_param(debug_level, int, 0); MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); @@@ -160,6 -171,15 +171,15 @@@ struct cpsw_wr_regs u32 rx_en; u32 tx_en; u32 misc_en; + u32 mem_allign1[8]; + u32 rx_thresh_stat; + u32 rx_stat; + u32 tx_stat; + u32 misc_stat; + u32 mem_allign2[8]; + u32 rx_imax; + u32 tx_imax; + };
struct cpsw_ss_regs { @@@ -314,6 -334,8 +334,8 @@@ struct cpsw_priv struct cpsw_host_regs __iomem *host_port_regs; u32 msg_enable; u32 version; + u32 coal_intvl; + u32 bus_freq_mhz; struct net_device_stats stats; int rx_packet_max; int host_port; @@@ -612,6 -634,77 +634,77 @@@ static void cpsw_adjust_link(struct net } }
+ static int cpsw_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) + { + struct cpsw_priv *priv = netdev_priv(ndev); + + coal->rx_coalesce_usecs = priv->coal_intvl; + return 0; + } + + static int cpsw_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) + { + struct cpsw_priv *priv = netdev_priv(ndev); + u32 int_ctrl; + u32 num_interrupts = 0; + u32 prescale = 0; + u32 addnl_dvdr = 1; + u32 coal_intvl = 0; + + if (!coal->rx_coalesce_usecs) + return -EINVAL; + + coal_intvl = coal->rx_coalesce_usecs; + + int_ctrl = readl(&priv->wr_regs->int_control); + prescale = priv->bus_freq_mhz * 4; + + if (coal_intvl < CPSW_CMINTMIN_INTVL) + coal_intvl = CPSW_CMINTMIN_INTVL; + + if (coal_intvl > CPSW_CMINTMAX_INTVL) { + /* Interrupt pacer works with 4us Pulse, we can + * throttle further by dilating the 4us pulse. + */ + addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; + + if (addnl_dvdr > 1) { + prescale *= addnl_dvdr; + if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) + coal_intvl = (CPSW_CMINTMAX_INTVL + * addnl_dvdr); + } else { + addnl_dvdr = 1; + coal_intvl = CPSW_CMINTMAX_INTVL; + } + } + + num_interrupts = (1000 * addnl_dvdr) / coal_intvl; + writel(num_interrupts, &priv->wr_regs->rx_imax); + writel(num_interrupts, &priv->wr_regs->tx_imax); + + int_ctrl |= CPSW_INTPACEEN; + int_ctrl &= (~CPSW_INTPRESCALE_MASK); + int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); + writel(int_ctrl, &priv->wr_regs->int_control); + + cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + priv = netdev_priv(priv->slaves[i].ndev); + priv->coal_intvl = coal_intvl; + } + } else { + priv->coal_intvl = coal_intvl; + } + + return 0; + } + static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) { static char *leader = "........................................"; @@@ -834,6 -927,14 +927,14 @@@ static int cpsw_ndo_open(struct net_dev cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); }
+ /* Enable Interrupt pacing if configured */ + if (priv->coal_intvl != 0) { + struct ethtool_coalesce coal; + + coal.rx_coalesce_usecs = (priv->coal_intvl << 4); + cpsw_set_coalesce(ndev, &coal); + } + cpdma_ctlr_start(priv->dma); cpsw_intr_enable(priv); napi_enable(&priv->napi); @@@ -942,7 -1043,7 +1043,7 @@@ static void cpsw_ndo_change_rx_flags(st
static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) { - struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; + struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave]; u32 ts_en, seq_id;
if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { @@@ -971,7 -1072,7 +1072,7 @@@ static void cpsw_hwtstamp_v2(struct cps if (priv->data.dual_emac) slave = &priv->slaves[priv->emac_port]; else - slave = &priv->slaves[priv->data.cpts_active_slave]; + slave = &priv->slaves[priv->data.active_slave];
ctrl = slave_read(slave, CPSW2_CONTROL); ctrl &= ~CTRL_ALL_TS_MASK; @@@ -1056,14 -1157,26 +1157,26 @@@ static int cpsw_hwtstamp_ioctl(struct n
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { + struct cpsw_priv *priv = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(req); + int slave_no = cpsw_slave_index(priv); + if (!netif_running(dev)) return -EINVAL;
+ switch (cmd) { #ifdef CONFIG_TI_CPTS - if (cmd == SIOCSHWTSTAMP) + case SIOCSHWTSTAMP: return cpsw_hwtstamp_ioctl(dev, req); #endif - return -ENOTSUPP; + case SIOCGMIIPHY: + data->phy_id = priv->slaves[slave_no].phy->addr; + break; + default: + return -ENOTSUPP; + } + + return 0; }
static void cpsw_ndo_tx_timeout(struct net_device *ndev) @@@ -1244,12 -1357,39 +1357,39 @@@ static int cpsw_get_ts_info(struct net_ return 0; }
+ static int cpsw_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) + { + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd); + else + return -EOPNOTSUPP; + } + + static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) + { + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd); + else + return -EOPNOTSUPP; + } + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, .set_msglevel = cpsw_set_msglevel, .get_link = ethtool_op_get_link, .get_ts_info = cpsw_get_ts_info, + .get_settings = cpsw_get_settings, + .set_settings = cpsw_set_settings, + .get_coalesce = cpsw_get_coalesce, + .set_coalesce = cpsw_set_coalesce, };
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, @@@ -1282,12 -1422,12 +1422,12 @@@ static int cpsw_probe_dt(struct cpsw_pl } data->slaves = prop;
- if (of_property_read_u32(node, "cpts_active_slave", &prop)) { - pr_err("Missing cpts_active_slave property in the DT.\n"); + if (of_property_read_u32(node, "active_slave", &prop)) { + pr_err("Missing active_slave property in the DT.\n"); ret = -EINVAL; goto error_ret; } - data->cpts_active_slave = prop; + data->active_slave = prop;
if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { pr_err("Missing cpts_clock_mult property in the DT.\n"); @@@ -1380,7 -1520,7 +1520,7 @@@ memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
if (data->dual_emac) { - if (of_property_read_u32(node, "dual_emac_res_vlan", + if (of_property_read_u32(slave_node, "dual_emac_res_vlan", &prop)) { pr_err("Missing dual_emac_res_vlan in DT.\n"); slave_data->dual_emac_res_vlan = i+1; @@@ -1437,6 -1577,9 +1577,9 @@@ static int cpsw_probe_dual_emac(struct priv_sl2->slaves = priv->slaves; priv_sl2->clk = priv->clk;
+ priv_sl2->coal_intvl = 0; + priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; + priv_sl2->cpsw_res = priv->cpsw_res; priv_sl2->regs = priv->regs; priv_sl2->host_port = priv->host_port; @@@ -1546,6 -1689,8 +1689,8 @@@ static int cpsw_probe(struct platform_d ret = -ENODEV; goto clean_slave_ret; } + priv->coal_intvl = 0; + priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!priv->cpsw_res) { diff --combined drivers/net/tun.c index 729ed53,316c759..66109a2 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@@ -409,14 -409,12 +409,12 @@@ static void __tun_detach(struct tun_fil { struct tun_file *ntfile; struct tun_struct *tun; - struct net_device *dev;
tun = rtnl_dereference(tfile->tun);
if (tun && !tfile->detached) { u16 index = tfile->queue_index; BUG_ON(index >= tun->numqueues); - dev = tun->dev;
rcu_assign_pointer(tun->tfiles[index], tun->tfiles[tun->numqueues - 1]); @@@ -1205,6 -1203,8 +1203,8 @@@ static ssize_t tun_get_user(struct tun_ }
skb_reset_network_header(skb); + skb_probe_transport_header(skb, 0); + rxhash = skb_get_rxhash(skb); netif_rx_ni(skb);
@@@ -1594,7 -1594,7 +1594,7 @@@ static int tun_set_iff(struct net *net
if (tun->flags & TUN_TAP_MQ && (tun->numqueues + tun->numdisabled > 1)) - return err; + return -EBUSY; } else { char *name; @@@ -1656,6 -1656,7 +1656,7 @@@ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | TUN_USER_FEATURES; dev->features = dev->hw_features; + dev->vlan_features = dev->features;
INIT_LIST_HEAD(&tun->disabled); err = tun_attach(tun, file); diff --combined drivers/net/wireless/ath/wil6210/debugfs.c index 76c7694,4be07f5..727b1f5 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@@ -216,7 -216,7 +216,7 @@@ DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32 wil_debugfs_iomem_x32_set, "0x%08llx\n");
static struct dentry *wil_debugfs_create_iomem_x32(const char *name, - mode_t mode, + umode_t mode, struct dentry *parent, void __iomem *value) { @@@ -312,14 -312,6 +312,6 @@@ static const struct file_operations fop .llseek = seq_lseek, };
- static int wil_default_open(struct inode *inode, struct file *file) - { - if (inode->i_private) - file->private_data = inode->i_private; - - return 0; - } - static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@@ -361,13 -353,13 +353,13 @@@
static const struct file_operations fops_ioblob = { .read = wil_read_file_ioblob, - .open = wil_default_open, + .open = simple_open, .llseek = default_llseek, };
static struct dentry *wil_debugfs_create_ioblob(const char *name, - mode_t mode, + umode_t mode, struct dentry *parent, struct debugfs_blob_wrapper *blob) { @@@ -396,7 -388,7 +388,7 @@@ static ssize_t wil_write_file_reset(str
static const struct file_operations fops_reset = { .write = wil_write_file_reset, - .open = wil_default_open, + .open = simple_open, }; /*---------Tx descriptor------------*/
@@@ -526,7 -518,50 +518,50 @@@ static ssize_t wil_write_file_ssid(stru static const struct file_operations fops_ssid = { .read = wil_read_file_ssid, .write = wil_write_file_ssid, - .open = wil_default_open, + .open = simple_open, + }; + + /*---------temp------------*/ + static void print_temp(struct seq_file *s, const char *prefix, u32 t) + { + switch (t) { + case 0: + case ~(u32)0: + seq_printf(s, "%s N/A\n", prefix); + break; + default: + seq_printf(s, "%s %d.%03d\n", prefix, t / 1000, t % 1000); + break; + } + } + + static int wil_temp_debugfs_show(struct seq_file *s, void *data) + { + struct wil6210_priv *wil = s->private; + u32 t_m, t_r; + + int rc = wmi_get_temperature(wil, &t_m, &t_r); + if (rc) { + seq_printf(s, "Failed\n"); + return 0; + } + + print_temp(s, "MAC temperature :", t_m); + print_temp(s, "Radio temperature :", t_r); + + return 0; + } + + static int wil_temp_seq_open(struct inode *inode, struct file *file) + { + return single_open(file, wil_temp_debugfs_show, inode->i_private); + } + + static const struct file_operations fops_temp = { + .open = wil_temp_seq_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, };
/*----------------*/ @@@ -563,6 -598,7 +598,7 @@@ int wil6210_debugfs_init(struct wil6210 debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset); + debugfs_create_file("temp", S_IRUGO, dbg, wil, &fops_temp);
wil->rgf_blob.data = (void * __force)wil->csr + 0; wil->rgf_blob.size = 0xa000; diff --combined drivers/net/wireless/b43/phy_n.c index b70f220,f9339e7..63cca9c --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@@ -2789,10 -2789,6 +2789,6 @@@ static void b43_nphy_iq_cal_gain_params * Tx and Rx **************************************************/
- void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) - {//TODO - } - static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev) {//TODO } @@@ -4892,7 -4888,7 +4888,7 @@@ static void b43_nphy_superswitch_init(s }
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */ - int b43_phy_initn(struct b43_wldev *dev) + static int b43_phy_initn(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; struct b43_phy *phy = &dev->phy; @@@ -5165,8 -5161,7 +5161,8 @@@ static void b43_nphy_pmu_spur_avoid(str #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: - /* FIXME */ + ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco, + avoid); break; #endif } diff --combined drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 78da3ef,6269920..e4f1f3c --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@@ -26,6 -26,7 +26,7 @@@ #include <brcmu_wifi.h> #include "dhd.h" #include "dhd_dbg.h" + #include "tracepoint.h" #include "fwil_types.h" #include "p2p.h" #include "wl_cfg80211.h" @@@ -182,64 -183,6 +183,6 @@@ static struct ieee80211_channel __wl_5g CHAN5G(216, 0), };
- static struct ieee80211_channel __wl_5ghz_n_channels[] = { - CHAN5G(32, 0), CHAN5G(34, 0), - CHAN5G(36, 0), CHAN5G(38, 0), - CHAN5G(40, 0), CHAN5G(42, 0), - CHAN5G(44, 0), CHAN5G(46, 0), - CHAN5G(48, 0), CHAN5G(50, 0), - CHAN5G(52, 0), CHAN5G(54, 0), - CHAN5G(56, 0), CHAN5G(58, 0), - CHAN5G(60, 0), CHAN5G(62, 0), - CHAN5G(64, 0), CHAN5G(66, 0), - CHAN5G(68, 0), CHAN5G(70, 0), - CHAN5G(72, 0), CHAN5G(74, 0), - CHAN5G(76, 0), CHAN5G(78, 0), - CHAN5G(80, 0), CHAN5G(82, 0), - CHAN5G(84, 0), CHAN5G(86, 0), - CHAN5G(88, 0), CHAN5G(90, 0), - CHAN5G(92, 0), CHAN5G(94, 0), - CHAN5G(96, 0), CHAN5G(98, 0), - CHAN5G(100, 0), CHAN5G(102, 0), - CHAN5G(104, 0), CHAN5G(106, 0), - CHAN5G(108, 0), CHAN5G(110, 0), - CHAN5G(112, 0), CHAN5G(114, 0), - CHAN5G(116, 0), CHAN5G(118, 0), - CHAN5G(120, 0), CHAN5G(122, 0), - CHAN5G(124, 0), CHAN5G(126, 0), - CHAN5G(128, 0), CHAN5G(130, 0), - CHAN5G(132, 0), CHAN5G(134, 0), - CHAN5G(136, 0), CHAN5G(138, 0), - CHAN5G(140, 0), CHAN5G(142, 0), - CHAN5G(144, 0), CHAN5G(145, 0), - CHAN5G(146, 0), CHAN5G(147, 0), - CHAN5G(148, 0), CHAN5G(149, 0), - CHAN5G(150, 0), CHAN5G(151, 0), - CHAN5G(152, 0), CHAN5G(153, 0), - CHAN5G(154, 0), CHAN5G(155, 0), - CHAN5G(156, 0), CHAN5G(157, 0), - CHAN5G(158, 0), CHAN5G(159, 0), - CHAN5G(160, 0), CHAN5G(161, 0), - CHAN5G(162, 0), CHAN5G(163, 0), - CHAN5G(164, 0), CHAN5G(165, 0), - CHAN5G(166, 0), CHAN5G(168, 0), - CHAN5G(170, 0), CHAN5G(172, 0), - CHAN5G(174, 0), CHAN5G(176, 0), - CHAN5G(178, 0), CHAN5G(180, 0), - CHAN5G(182, 0), CHAN5G(184, 0), - CHAN5G(186, 0), CHAN5G(188, 0), - CHAN5G(190, 0), CHAN5G(192, 0), - CHAN5G(194, 0), CHAN5G(196, 0), - CHAN5G(198, 0), CHAN5G(200, 0), - CHAN5G(202, 0), CHAN5G(204, 0), - CHAN5G(206, 0), CHAN5G(208, 0), - CHAN5G(210, 0), CHAN5G(212, 0), - CHAN5G(214, 0), CHAN5G(216, 0), - CHAN5G(218, 0), CHAN5G(220, 0), - CHAN5G(222, 0), CHAN5G(224, 0), - CHAN5G(226, 0), CHAN5G(228, 0), - }; - static struct ieee80211_supported_band __wl_band_2ghz = { .band = IEEE80211_BAND_2GHZ, .channels = __wl_2ghz_channels, @@@ -256,12 -199,28 +199,28 @@@ static struct ieee80211_supported_band .n_bitrates = wl_a_rates_size, };
- static struct ieee80211_supported_band __wl_band_5ghz_n = { - .band = IEEE80211_BAND_5GHZ, - .channels = __wl_5ghz_n_channels, - .n_channels = ARRAY_SIZE(__wl_5ghz_n_channels), - .bitrates = wl_a_rates, - .n_bitrates = wl_a_rates_size, + /* This is to override regulatory domains defined in cfg80211 module (reg.c) + * By default world regulatory domain defined in reg.c puts the flags + * NL80211_RRF_PASSIVE_SCAN and NL80211_RRF_NO_IBSS for 5GHz channels (for + * 36..48 and 149..165). With respect to these flags, wpa_supplicant doesn't + * start p2p operations on 5GHz channels. All the changes in world regulatory + * domain are to be done here. + */ + static const struct ieee80211_regdomain brcmf_regdom = { + .n_reg_rules = 4, + .alpha2 = "99", + .reg_rules = { + /* IEEE 802.11b/g, channels 1..11 */ + REG_RULE(2412-10, 2472+10, 40, 6, 20, 0), + /* If any */ + /* IEEE 802.11 channel 14 - Only JP enables + * this and for 802.11b only + */ + REG_RULE(2484-10, 2484+10, 20, 6, 20, 0), + /* IEEE 802.11a, channel 36..64 */ + REG_RULE(5150-10, 5350+10, 40, 6, 20, 0), + /* IEEE 802.11a, channel 100..165 */ + REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), } };
static const u32 __wl_cipher_suites[] = { @@@ -523,17 -482,16 +482,16 @@@ static struct wireless_dev *brcmf_cfg80 return ERR_PTR(-EOPNOTSUPP); case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_P2P_DEVICE: return brcmf_p2p_add_vif(wiphy, name, type, flags, params); case NL80211_IFTYPE_UNSPECIFIED: - case NL80211_IFTYPE_P2P_DEVICE: default: return ERR_PTR(-EINVAL); } }
- void brcmf_set_mpc(struct net_device *ndev, int mpc) + void brcmf_set_mpc(struct brcmf_if *ifp, int mpc) { - struct brcmf_if *ifp = netdev_priv(ndev); s32 err = 0;
if (check_vif_up(ifp->vif)) { @@@ -546,10 -504,9 +504,9 @@@ } }
- s32 - brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, - struct net_device *ndev, - bool aborted, bool fw_abort) + s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, + struct brcmf_if *ifp, bool aborted, + bool fw_abort) { struct brcmf_scan_params_le params_le; struct cfg80211_scan_request *scan_request; @@@ -580,7 -537,7 +537,7 @@@ /* Scan is aborted by setting channel_list[0] to -1 */ params_le.channel_list[0] = cpu_to_le16(-1); /* E-Scan (or anyother type) can be aborted by SCAN */ - err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN, + err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN, ¶ms_le, sizeof(params_le)); if (err) brcmf_err("Scan abort failed\n"); @@@ -594,12 -551,12 +551,12 @@@ cfg->sched_escan = false; if (!aborted) cfg80211_sched_scan_results(cfg_to_wiphy(cfg)); - brcmf_set_mpc(ndev, 1); + brcmf_set_mpc(ifp, 1); } else if (scan_request) { brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n", aborted ? "Aborted" : "Done"); cfg80211_scan_done(scan_request, aborted); - brcmf_set_mpc(ndev, 1); + brcmf_set_mpc(ifp, 1); } if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n"); @@@ -619,9 -576,9 +576,9 @@@ int brcmf_cfg80211_del_iface(struct wip
if (ndev) { if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status) && - cfg->escan_info.ndev == ndev) - brcmf_notify_escan_complete(cfg, ndev, true, - true); + cfg->escan_info.ifp == netdev_priv(ndev)) + brcmf_notify_escan_complete(cfg, netdev_priv(ndev), + true, true);
brcmf_fil_iovar_int_set(netdev_priv(ndev), "mpc", 1); } @@@ -637,9 -594,9 +594,9 @@@ return -EOPNOTSUPP; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_P2P_DEVICE: return brcmf_p2p_del_vif(wiphy, wdev); case NL80211_IFTYPE_UNSPECIFIED: - case NL80211_IFTYPE_P2P_DEVICE: default: return -EINVAL; } @@@ -803,7 -760,7 +760,7 @@@ static void brcmf_escan_prep(struct brc }
static s32 - brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev, + brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, struct cfg80211_scan_request *request, u16 action) { s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE + @@@ -832,8 -789,7 +789,7 @@@ params->action = cpu_to_le16(action); params->sync_id = cpu_to_le16(0x1234);
- err = brcmf_fil_iovar_data_set(netdev_priv(ndev), "escan", - params, params_size); + err = brcmf_fil_iovar_data_set(ifp, "escan", params, params_size); if (err) { if (err == -EBUSY) brcmf_dbg(INFO, "system busy : escan canceled\n"); @@@ -848,7 -804,7 +804,7 @@@ exit
static s32 brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy, - struct net_device *ndev, struct cfg80211_scan_request *request) + struct brcmf_if *ifp, struct cfg80211_scan_request *request) { s32 err; u32 passive_scan; @@@ -856,35 -812,35 +812,35 @@@ struct escan_info *escan = &cfg->escan_info;
brcmf_dbg(SCAN, "Enter\n"); - escan->ndev = ndev; + escan->ifp = ifp; escan->wiphy = wiphy; escan->escan_state = WL_ESCAN_STATE_SCANNING; passive_scan = cfg->active_scan ? 0 : 1; - err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PASSIVE_SCAN, + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN, passive_scan); if (err) { brcmf_err("error (%d)\n", err); return err; } - brcmf_set_mpc(ndev, 0); + brcmf_set_mpc(ifp, 0); results = (struct brcmf_scan_results *)cfg->escan_info.escan_buf; results->version = 0; results->count = 0; results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
- err = escan->run(cfg, ndev, request, WL_ESCAN_ACTION_START); + err = escan->run(cfg, ifp, request, WL_ESCAN_ACTION_START); if (err) - brcmf_set_mpc(ndev, 1); + brcmf_set_mpc(ifp, 1); return err; }
static s32 - brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev, + brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif, struct cfg80211_scan_request *request, struct cfg80211_ssid *this_ssid) { - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev); + struct brcmf_if *ifp = vif->ifp; + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct cfg80211_ssid *ssids; struct brcmf_cfg80211_scan_req *sr = &cfg->scan_req_int; u32 passive_scan; @@@ -910,10 -866,8 +866,8 @@@ }
/* If scan req comes for p2p0, send it over primary I/F */ - if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) { - ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; - ndev = ifp->ndev; - } + if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) + vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
/* Arm scan timeout timer */ mod_timer(&cfg->escan_timeout, jiffies + @@@ -934,11 -888,11 +888,11 @@@ set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); if (escan_req) { cfg->escan_info.run = brcmf_run_escan; - err = brcmf_p2p_scan_prep(wiphy, request, ifp->vif); + err = brcmf_p2p_scan_prep(wiphy, request, vif); if (err) goto scan_out;
- err = brcmf_do_escan(cfg, wiphy, ndev, request); + err = brcmf_do_escan(cfg, wiphy, vif->ifp, request); if (err) goto scan_out; } else { @@@ -962,7 -916,7 +916,7 @@@ brcmf_err("WLC_SET_PASSIVE_SCAN error (%d)\n", err); goto scan_out; } - brcmf_set_mpc(ndev, 0); + brcmf_set_mpc(ifp, 0); err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN, &sr->ssid_le, sizeof(sr->ssid_le)); if (err) { @@@ -972,7 -926,7 +926,7 @@@ else brcmf_err("WLC_SCAN error (%d)\n", err);
- brcmf_set_mpc(ndev, 1); + brcmf_set_mpc(ifp, 1); goto scan_out; } } @@@ -990,16 -944,15 +944,15 @@@ scan_out static s32 brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { - struct net_device *ndev = request->wdev->netdev; + struct brcmf_cfg80211_vif *vif; s32 err = 0;
brcmf_dbg(TRACE, "Enter\n"); - - if (!check_vif_up(container_of(request->wdev, - struct brcmf_cfg80211_vif, wdev))) + vif = container_of(request->wdev, struct brcmf_cfg80211_vif, wdev); + if (!check_vif_up(vif)) return -EIO;
- err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL); + err = brcmf_cfg80211_escan(wiphy, vif, request, NULL);
if (err) brcmf_err("scan error (%d)\n", err); @@@ -2510,7 -2463,7 +2463,7 @@@ void brcmf_abort_scanning(struct brcmf_ set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status); if (cfg->scan_request) { escan->escan_state = WL_ESCAN_STATE_IDLE; - brcmf_notify_escan_complete(cfg, escan->ndev, true, true); + brcmf_notify_escan_complete(cfg, escan->ifp, true, true); } clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); clear_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status); @@@ -2522,7 -2475,7 +2475,7 @@@ static void brcmf_cfg80211_escan_timeou container_of(work, struct brcmf_cfg80211_info, escan_timeout_work);
- brcmf_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); + brcmf_notify_escan_complete(cfg, cfg->escan_info.ifp, true, true); }
static void brcmf_escan_timeout(unsigned long data) @@@ -2573,7 -2526,6 +2526,6 @@@ brcmf_cfg80211_escan_handler(struct brc const struct brcmf_event_msg *e, void *data) { struct brcmf_cfg80211_info *cfg = ifp->drvr->config; - struct net_device *ndev = ifp->ndev; s32 status; s32 err = 0; struct brcmf_escan_result_le *escan_result_le; @@@ -2586,9 -2538,8 +2538,8 @@@
status = e->status;
- if (!ndev || !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { - brcmf_err("scan not ready ndev %p drv_status %x\n", ndev, - !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)); + if (!test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { + brcmf_err("scan not ready, bssidx=%d\n", ifp->bssidx); return -EPERM; }
@@@ -2659,7 -2610,7 +2610,7 @@@ cfg->escan_info.escan_buf; brcmf_inform_bss(cfg); aborted = status != BRCMF_E_STATUS_SUCCESS; - brcmf_notify_escan_complete(cfg, ndev, aborted, + brcmf_notify_escan_complete(cfg, ifp, aborted, false); } else brcmf_dbg(SCAN, "Ignored scan complete result 0x%x\n", @@@ -2737,7 -2688,7 +2688,7 @@@ static s32 brcmf_cfg80211_suspend(struc brcmf_abort_scanning(cfg);
/* Turn off watchdog timer */ - brcmf_set_mpc(ndev, 1); + brcmf_set_mpc(netdev_priv(ndev), 1);
exit: brcmf_dbg(TRACE, "Exit\n"); @@@ -2895,7 -2846,6 +2846,6 @@@ brcmf_notify_sched_scan_results(struct const struct brcmf_event_msg *e, void *data) { struct brcmf_cfg80211_info *cfg = ifp->drvr->config; - struct net_device *ndev = ifp->ndev; struct brcmf_pno_net_info_le *netinfo, *netinfo_start; struct cfg80211_scan_request *request = NULL; struct cfg80211_ssid *ssid = NULL; @@@ -2979,7 -2929,7 +2929,7 @@@ }
set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); - err = brcmf_do_escan(cfg, wiphy, ndev, request); + err = brcmf_do_escan(cfg, wiphy, ifp, request); if (err) { clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); goto out_err; @@@ -3051,16 -3001,16 +3001,16 @@@ brcmf_cfg80211_sched_scan_start(struct int i; int ret = 0;
- brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n", + brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n", request->n_match_sets, request->n_ssids); if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status); return -EAGAIN; }
- if (!request || !request->n_ssids || !request->n_match_sets) { + if (!request->n_ssids || !request->n_match_sets) { brcmf_err("Invalid sched scan req!! n_ssids:%d\n", - request ? request->n_ssids : 0); + request->n_ssids); return -EINVAL; }
@@@ -3136,7 -3086,7 +3086,7 @@@ static int brcmf_cfg80211_sched_scan_st brcmf_dbg(SCAN, "enter\n"); brcmf_dev_pno_clean(ndev); if (cfg->sched_escan) - brcmf_notify_escan_complete(cfg, ndev, true, true); + brcmf_notify_escan_complete(cfg, netdev_priv(ndev), true, true); return 0; }
@@@ -3708,7 -3658,7 +3658,7 @@@ brcmf_cfg80211_start_ap(struct wiphy *w ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len); }
- brcmf_set_mpc(ndev, 0); + brcmf_set_mpc(ifp, 0);
/* find the RSN_IE */ rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, @@@ -3816,7 -3766,7 +3766,7 @@@
exit: if (err) - brcmf_set_mpc(ndev, 1); + brcmf_set_mpc(ifp, 1); return err; }
@@@ -3856,7 -3806,7 +3806,7 @@@ static int brcmf_cfg80211_stop_ap(struc if (err < 0) brcmf_err("bss_enable config failed %d\n", err); } - brcmf_set_mpc(ndev, 1); + brcmf_set_mpc(ifp, 1); set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
@@@ -3913,13 -3863,13 +3863,13 @@@ brcmf_cfg80211_mgmt_frame_register(stru struct wireless_dev *wdev, u16 frame_type, bool reg) { - struct brcmf_if *ifp = netdev_priv(wdev->netdev); - struct brcmf_cfg80211_vif *vif = ifp->vif; + struct brcmf_cfg80211_vif *vif; u16 mgmt_type;
brcmf_dbg(TRACE, "Enter, frame_type %04x, reg=%d\n", frame_type, reg);
mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4; + vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); if (reg) vif->mgmt_rx_reg |= BIT(mgmt_type); else @@@ -3935,7 -3885,6 +3885,6 @@@ brcmf_cfg80211_mgmt_tx(struct wiphy *wi { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); const struct ieee80211_mgmt *mgmt; - struct brcmf_if *ifp; struct brcmf_cfg80211_vif *vif; s32 err = 0; s32 ie_offset; @@@ -3971,8 -3920,7 +3920,7 @@@ ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; ie_len = len - ie_offset; - ifp = netdev_priv(wdev->netdev); - vif = ifp->vif; + vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif; err = brcmf_vif_set_mgmt_ie(vif, @@@ -4007,7 -3955,7 +3955,7 @@@ *cookie, le16_to_cpu(action_frame->len), chan->center_freq);
- ack = brcmf_p2p_send_action_frame(cfg, wdev->netdev, + ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg), af_params);
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack, @@@ -4079,6 -4027,8 +4027,8 @@@ static struct cfg80211_ops wl_cfg80211_ .mgmt_tx = brcmf_cfg80211_mgmt_tx, .remain_on_channel = brcmf_p2p_remain_on_channel, .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel, + .start_p2p_device = brcmf_p2p_start_device, + .stop_p2p_device = brcmf_p2p_stop_device, #ifdef CONFIG_NL80211_TESTMODE .testmode_cmd = brcmf_cfg80211_testmode #endif @@@ -4128,6 -4078,10 +4078,6 @@@ static const struct ieee80211_iface_lim }, { .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_DEVICE) - }, - { - .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) }, @@@ -4162,6 -4116,11 +4112,11 @@@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4) + }, + [NL80211_IFTYPE_P2P_DEVICE] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) } };
@@@ -4183,17 -4142,11 +4138,10 @@@ static struct wiphy *brcmf_setup_wiphy( BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_P2P_DEVICE); + BIT(NL80211_IFTYPE_P2P_GO); wiphy->iface_combinations = brcmf_iface_combos; wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; - wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set - * it as 11a by default. - * This will be updated with - * 11n phy tables in - * "ifconfig up" - * if phy has 11n capability - */ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wiphy->cipher_suites = __wl_cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); @@@ -4203,6 -4156,9 +4151,9 @@@ wiphy->mgmt_stypes = brcmf_txrx_stypes; wiphy->max_remain_on_channel_duration = 5000; brcmf_wiphy_pno_params(wiphy); + brcmf_dbg(INFO, "Registering custom regulatory\n"); + wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; + wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom); err = wiphy_register(wiphy); if (err < 0) { brcmf_err("Could not register wiphy device (%d)\n", err); @@@ -4621,9 -4577,11 +4572,11 @@@ static s32 brcmf_notify_vif_event(struc
ifp->vif = vif; vif->ifp = ifp; - vif->wdev.netdev = ifp->ndev; - ifp->ndev->ieee80211_ptr = &vif->wdev; - SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy)); + if (ifp->ndev) { + vif->wdev.netdev = ifp->ndev; + ifp->ndev->ieee80211_ptr = &vif->wdev; + SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy)); + } mutex_unlock(&event->vif_event_lock); wake_up(&event->vif_wq); return 0; @@@ -4926,34 -4884,248 +4879,248 @@@ dongle_scantime_out return err; }
- static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg) + + static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap) + { + struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct ieee80211_channel *band_chan_arr; + struct brcmf_chanspec_list *list; + s32 err; + u8 *pbuf; + u32 i, j; + u32 total; + u16 chanspec; + enum ieee80211_band band; + u32 channel; + u32 *n_cnt; + bool ht40_allowed; + u32 index; + u32 ht40_flag; + bool update; + u32 array_size; + + pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL); + + if (pbuf == NULL) + return -ENOMEM; + + list = (struct brcmf_chanspec_list *)pbuf; + + err = brcmf_fil_iovar_data_get(ifp, "chanspecs", pbuf, + BRCMF_DCMD_MEDLEN); + if (err) { + brcmf_err("get chanspecs error (%d)\n", err); + goto exit; + } + + __wl_band_2ghz.n_channels = 0; + __wl_band_5ghz_a.n_channels = 0; + + total = le32_to_cpu(list->count); + for (i = 0; i < total; i++) { + chanspec = (u16)le32_to_cpu(list->element[i]); + channel = CHSPEC_CHANNEL(chanspec); + + if (CHSPEC_IS40(chanspec)) { + if (CHSPEC_SB_UPPER(chanspec)) + channel += CH_10MHZ_APART; + else + channel -= CH_10MHZ_APART; + } else if (CHSPEC_IS80(chanspec)) { + brcmf_dbg(INFO, "HT80 center channel : %d\n", + channel); + continue; + } + if (CHSPEC_IS2G(chanspec) && (channel >= CH_MIN_2G_CHANNEL) && + (channel <= CH_MAX_2G_CHANNEL)) { + band_chan_arr = __wl_2ghz_channels; + array_size = ARRAY_SIZE(__wl_2ghz_channels); + n_cnt = &__wl_band_2ghz.n_channels; + band = IEEE80211_BAND_2GHZ; + ht40_allowed = (bw_cap == WLC_N_BW_40ALL); + } else if (CHSPEC_IS5G(chanspec) && + channel >= CH_MIN_5G_CHANNEL) { + band_chan_arr = __wl_5ghz_a_channels; + array_size = ARRAY_SIZE(__wl_5ghz_a_channels); + n_cnt = &__wl_band_5ghz_a.n_channels; + band = IEEE80211_BAND_5GHZ; + ht40_allowed = !(bw_cap == WLC_N_BW_20ALL); + } else { + brcmf_err("Invalid channel Sepc. 0x%x.\n", chanspec); + continue; + } + if (!ht40_allowed && CHSPEC_IS40(chanspec)) + continue; + update = false; + for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) { + if (band_chan_arr[j].hw_value == channel) { + update = true; + break; + } + } + if (update) + index = j; + else + index = *n_cnt; + if (index < array_size) { + band_chan_arr[index].center_freq = + ieee80211_channel_to_frequency(channel, band); + band_chan_arr[index].hw_value = channel; + + if (CHSPEC_IS40(chanspec) && ht40_allowed) { + /* assuming the order is HT20, HT40 Upper, + * HT40 lower from chanspecs + */ + ht40_flag = band_chan_arr[index].flags & + IEEE80211_CHAN_NO_HT40; + if (CHSPEC_SB_UPPER(chanspec)) { + if (ht40_flag == IEEE80211_CHAN_NO_HT40) + band_chan_arr[index].flags &= + ~IEEE80211_CHAN_NO_HT40; + band_chan_arr[index].flags |= + IEEE80211_CHAN_NO_HT40PLUS; + } else { + /* It should be one of + * IEEE80211_CHAN_NO_HT40 or + * IEEE80211_CHAN_NO_HT40PLUS + */ + band_chan_arr[index].flags &= + ~IEEE80211_CHAN_NO_HT40; + if (ht40_flag == IEEE80211_CHAN_NO_HT40) + band_chan_arr[index].flags |= + IEEE80211_CHAN_NO_HT40MINUS; + } + } else { + band_chan_arr[index].flags = + IEEE80211_CHAN_NO_HT40; + if (band == IEEE80211_BAND_2GHZ) + channel |= WL_CHANSPEC_BAND_2G; + else + channel |= WL_CHANSPEC_BAND_5G; + channel |= WL_CHANSPEC_BW_20; + err = brcmf_fil_bsscfg_int_get(ifp, + "per_chan_info", + &channel); + if (!err) { + if (channel & WL_CHAN_RADAR) + band_chan_arr[index].flags |= + (IEEE80211_CHAN_RADAR | + IEEE80211_CHAN_NO_IBSS); + if (channel & WL_CHAN_PASSIVE) + band_chan_arr[index].flags |= + IEEE80211_CHAN_PASSIVE_SCAN; + } + } + if (!update) + (*n_cnt)++; + } + } + exit: + kfree(pbuf); + return err; + } + + + static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg) { struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); struct wiphy *wiphy; s32 phy_list; + u32 band_list[3]; + u32 nmode; + u32 bw_cap = 0; s8 phy; - s32 err = 0; + s32 err; + u32 nband; + s32 i; + struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS]; + s32 index;
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_PHYLIST, &phy_list, sizeof(phy_list)); if (err) { - brcmf_err("error (%d)\n", err); + brcmf_err("BRCMF_C_GET_PHYLIST error (%d)\n", err); return err; }
phy = ((char *)&phy_list)[0]; - brcmf_dbg(INFO, "%c phy\n", phy); - if (phy == 'n' || phy == 'a') { - wiphy = cfg_to_wiphy(cfg); - wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n; + brcmf_dbg(INFO, "BRCMF_C_GET_PHYLIST reported: %c phy\n", phy); + + + err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, + &band_list, sizeof(band_list)); + if (err) { + brcmf_err("BRCMF_C_GET_BANDLIST error (%d)\n", err); + return err; } + brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n", + band_list[0], band_list[1], band_list[2]); + + err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode); + if (err) { + brcmf_err("nmode error (%d)\n", err); + } else { + err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &bw_cap); + if (err) + brcmf_err("mimo_bw_cap error (%d)\n", err); + } + brcmf_dbg(INFO, "nmode=%d, mimo_bw_cap=%d\n", nmode, bw_cap); + + err = brcmf_construct_reginfo(cfg, bw_cap); + if (err) { + brcmf_err("brcmf_construct_reginfo failed (%d)\n", err); + return err; + } + + nband = band_list[0]; + memset(bands, 0, sizeof(bands)); + + for (i = 1; i <= nband && i < ARRAY_SIZE(band_list); i++) { + index = -1; + if ((band_list[i] == WLC_BAND_5G) && + (__wl_band_5ghz_a.n_channels > 0)) { + index = IEEE80211_BAND_5GHZ; + bands[index] = &__wl_band_5ghz_a; + if ((bw_cap == WLC_N_BW_40ALL) || + (bw_cap == WLC_N_BW_20IN2G_40IN5G)) + bands[index]->ht_cap.cap |= + IEEE80211_HT_CAP_SGI_40; + } else if ((band_list[i] == WLC_BAND_2G) && + (__wl_band_2ghz.n_channels > 0)) { + index = IEEE80211_BAND_2GHZ; + bands[index] = &__wl_band_2ghz; + if (bw_cap == WLC_N_BW_40ALL) + bands[index]->ht_cap.cap |= + IEEE80211_HT_CAP_SGI_40; + } + + if ((index >= 0) && nmode) { + bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; + bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; + bands[index]->ht_cap.ht_supported = true; + bands[index]->ht_cap.ampdu_factor = + IEEE80211_HT_MAX_AMPDU_64K; + bands[index]->ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_16; + /* An HT shall support all EQM rates for one spatial + * stream + */ + bands[index]->ht_cap.mcs.rx_mask[0] = 0xff; + } + } + + wiphy = cfg_to_wiphy(cfg); + wiphy->bands[IEEE80211_BAND_2GHZ] = bands[IEEE80211_BAND_2GHZ]; + wiphy->bands[IEEE80211_BAND_5GHZ] = bands[IEEE80211_BAND_5GHZ]; + wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
return err; }
+ static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_info *cfg) { - return wl_update_wiphybands(cfg); + return brcmf_update_wiphybands(cfg); }
static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) diff --combined drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index e2340b2,cd83786..b530139 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@@ -1,5 -1,6 +1,6 @@@ /* * Copyright (c) 2010 Broadcom Corporation + * Copyright (c) 2013 Hauke Mehrtens hauke@hauke-m.de * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@@ -34,6 -35,7 +35,7 @@@ #include "mac80211_if.h" #include "main.h" #include "debug.h" + #include "led.h"
#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */ #define BRCMS_FLUSH_TIMEOUT 500 /* msec */ @@@ -274,130 -276,6 +276,131 @@@ static void brcms_set_basic_rate(struc } }
+/** + * This function frees the WL per-device resources. + * + * This function frees resources owned by the WL device pointed to + * by the wl parameter. + * + * precondition: can both be called locked and unlocked + * + */ +static void brcms_free(struct brcms_info *wl) +{ + struct brcms_timer *t, *next; + + /* free ucode data */ + if (wl->fw.fw_cnt) + brcms_ucode_data_free(&wl->ucode); + if (wl->irq) + free_irq(wl->irq, wl); + + /* kill dpc */ + tasklet_kill(&wl->tasklet); + + if (wl->pub) { + brcms_debugfs_detach(wl->pub); + brcms_c_module_unregister(wl->pub, "linux", wl); + } + + /* free common resources */ + if (wl->wlc) { + brcms_c_detach(wl->wlc); + wl->wlc = NULL; + wl->pub = NULL; + } + + /* virtual interface deletion is deferred so we cannot spinwait */ + + /* wait for all pending callbacks to complete */ + while (atomic_read(&wl->callbacks) > 0) + schedule(); + + /* free timers */ + for (t = wl->timers; t; t = next) { + next = t->next; +#ifdef DEBUG + kfree(t->name); +#endif + kfree(t); + } +} + +/* +* called from both kernel as from this kernel module (error flow on attach) +* precondition: perimeter lock is not acquired. +*/ +static void brcms_remove(struct bcma_device *pdev) +{ + struct ieee80211_hw *hw = bcma_get_drvdata(pdev); + struct brcms_info *wl = hw->priv; + + if (wl->wlc) { ++ brcms_led_unregister(wl); + wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); + wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); + ieee80211_unregister_hw(hw); + } + + brcms_free(wl); + + bcma_set_drvdata(pdev, NULL); + ieee80211_free_hw(hw); +} + +/* + * Precondition: Since this function is called in brcms_pci_probe() context, + * no locking is required. + */ +static void brcms_release_fw(struct brcms_info *wl) +{ + int i; + for (i = 0; i < MAX_FW_IMAGES; i++) { + release_firmware(wl->fw.fw_bin[i]); + release_firmware(wl->fw.fw_hdr[i]); + } +} + +/* + * Precondition: Since this function is called in brcms_pci_probe() context, + * no locking is required. + */ +static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev) +{ + int status; + struct device *device = &pdev->dev; + char fw_name[100]; + int i; + + memset(&wl->fw, 0, sizeof(struct brcms_firmware)); + for (i = 0; i < MAX_FW_IMAGES; i++) { + if (brcms_firmwares[i] == NULL) + break; + sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], + UCODE_LOADER_API_VER); + status = request_firmware(&wl->fw.fw_bin[i], fw_name, device); + if (status) { + wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", + KBUILD_MODNAME, fw_name); + return status; + } + sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i], + UCODE_LOADER_API_VER); + status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device); + if (status) { + wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", + KBUILD_MODNAME, fw_name); + return status; + } + wl->fw.hdr_num_entries[i] = + wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); + } + wl->fw.fw_cnt = i; + status = brcms_ucode_data_init(wl, &wl->ucode); + brcms_release_fw(wl); + return status; +} + static void brcms_ops_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) @@@ -430,14 -308,6 +433,14 @@@ static int brcms_ops_start(struct ieee8 if (!blocked) wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
+ if (!wl->ucode.bcm43xx_bomminor) { + err = brcms_request_fw(wl, wl->wlc->hw->d11core); + if (err) { + brcms_remove(wl->wlc->hw->d11core); + return -ENOENT; + } + } + spin_lock_bh(&wl->lock); /* avoid acknowledging frames before a non-monitor device is added */ wl->mute_tx = true; @@@ -487,18 -357,26 +490,26 @@@ brcms_ops_add_interface(struct ieee8021 { struct brcms_info *wl = hw->priv;
- /* Just STA for now */ - if (vif->type != NL80211_IFTYPE_STATION) { + /* Just STA, AP and ADHOC for now */ + if (vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_ADHOC) { brcms_err(wl->wlc->hw->d11core, - "%s: Attempt to add type %d, only STA for now\n", + "%s: Attempt to add type %d, only STA, AP and AdHoc for now\n", __func__, vif->type); return -EOPNOTSUPP; }
spin_lock_bh(&wl->lock); - memcpy(wl->pub->cur_etheraddr, vif->addr, sizeof(vif->addr)); wl->mute_tx = false; brcms_c_mute(wl->wlc, false); + if (vif->type == NL80211_IFTYPE_STATION) + brcms_c_start_station(wl->wlc, vif->addr); + else if (vif->type == NL80211_IFTYPE_AP) + brcms_c_start_ap(wl->wlc, vif->addr, vif->bss_conf.bssid, + vif->bss_conf.ssid, vif->bss_conf.ssid_len); + else if (vif->type == NL80211_IFTYPE_ADHOC) + brcms_c_start_adhoc(wl->wlc, vif->addr); spin_unlock_bh(&wl->lock);
return 0; @@@ -650,14 -528,43 +661,43 @@@ brcms_ops_bss_info_changed(struct ieee8 brcms_c_set_addrmatch(wl->wlc, RCM_BSSID_OFFSET, info->bssid); spin_unlock_bh(&wl->lock); } - if (changed & BSS_CHANGED_BEACON) + if (changed & BSS_CHANGED_SSID) { + /* BSSID changed, for whatever reason (IBSS and managed mode) */ + spin_lock_bh(&wl->lock); + brcms_c_set_ssid(wl->wlc, info->ssid, info->ssid_len); + spin_unlock_bh(&wl->lock); + } + if (changed & BSS_CHANGED_BEACON) { /* Beacon data changed, retrieve new beacon (beaconing modes) */ - brcms_err(core, "%s: beacon changed\n", __func__); + struct sk_buff *beacon; + u16 tim_offset = 0; + + spin_lock_bh(&wl->lock); + beacon = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL); + brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset, + info->dtim_period); + spin_unlock_bh(&wl->lock); + } + + if (changed & BSS_CHANGED_AP_PROBE_RESP) { + struct sk_buff *probe_resp; + + spin_lock_bh(&wl->lock); + probe_resp = ieee80211_proberesp_get(hw, vif); + brcms_c_set_new_probe_resp(wl->wlc, probe_resp); + spin_unlock_bh(&wl->lock); + }
if (changed & BSS_CHANGED_BEACON_ENABLED) { /* Beaconing should be enabled/disabled (beaconing modes) */ brcms_err(core, "%s: Beacon enabled: %s\n", __func__, info->enable_beacon ? "true" : "false"); + if (info->enable_beacon && + hw->wiphy->flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) { + brcms_c_enable_probe_resp(wl->wlc, true); + } else { + brcms_c_enable_probe_resp(wl->wlc, false); + } }
if (changed & BSS_CHANGED_CQM) { @@@ -855,7 -762,7 +895,7 @@@ static bool brcms_tx_flush_completed(st return result; }
- static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop) + static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop) { struct brcms_info *wl = hw->priv; int ret; @@@ -870,6 -777,28 +910,28 @@@ "ret=%d\n", jiffies_to_msecs(ret)); }
+ static u64 brcms_ops_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) + { + struct brcms_info *wl = hw->priv; + u64 tsf; + + spin_lock_bh(&wl->lock); + tsf = brcms_c_tsf_get(wl->wlc); + spin_unlock_bh(&wl->lock); + + return tsf; + } + + static void brcms_ops_set_tsf(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u64 tsf) + { + struct brcms_info *wl = hw->priv; + + spin_lock_bh(&wl->lock); + brcms_c_tsf_set(wl->wlc, tsf); + spin_unlock_bh(&wl->lock); + } + static const struct ieee80211_ops brcms_ops = { .tx = brcms_ops_tx, .start = brcms_ops_start, @@@ -886,6 -815,8 +948,8 @@@ .ampdu_action = brcms_ops_ampdu_action, .rfkill_poll = brcms_ops_rfkill_poll, .flush = brcms_ops_flush, + .get_tsf = brcms_ops_get_tsf, + .set_tsf = brcms_ops_set_tsf, };
void brcms_dpc(unsigned long data) @@@ -925,6 -856,129 +989,6 @@@ wake_up(&wl->tx_flush_wq); }
-/* - * Precondition: Since this function is called in brcms_pci_probe() context, - * no locking is required. - */ -static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev) -{ - int status; - struct device *device = &pdev->dev; - char fw_name[100]; - int i; - - memset(&wl->fw, 0, sizeof(struct brcms_firmware)); - for (i = 0; i < MAX_FW_IMAGES; i++) { - if (brcms_firmwares[i] == NULL) - break; - sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], - UCODE_LOADER_API_VER); - status = request_firmware(&wl->fw.fw_bin[i], fw_name, device); - if (status) { - wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", - KBUILD_MODNAME, fw_name); - return status; - } - sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i], - UCODE_LOADER_API_VER); - status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device); - if (status) { - wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", - KBUILD_MODNAME, fw_name); - return status; - } - wl->fw.hdr_num_entries[i] = - wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); - } - wl->fw.fw_cnt = i; - return brcms_ucode_data_init(wl, &wl->ucode); -} - -/* - * Precondition: Since this function is called in brcms_pci_probe() context, - * no locking is required. - */ -static void brcms_release_fw(struct brcms_info *wl) -{ - int i; - for (i = 0; i < MAX_FW_IMAGES; i++) { - release_firmware(wl->fw.fw_bin[i]); - release_firmware(wl->fw.fw_hdr[i]); - } -} - -/** - * This function frees the WL per-device resources. - * - * This function frees resources owned by the WL device pointed to - * by the wl parameter. - * - * precondition: can both be called locked and unlocked - * - */ -static void brcms_free(struct brcms_info *wl) -{ - struct brcms_timer *t, *next; - - /* free ucode data */ - if (wl->fw.fw_cnt) - brcms_ucode_data_free(&wl->ucode); - if (wl->irq) - free_irq(wl->irq, wl); - - /* kill dpc */ - tasklet_kill(&wl->tasklet); - - if (wl->pub) { - brcms_debugfs_detach(wl->pub); - brcms_c_module_unregister(wl->pub, "linux", wl); - } - - /* free common resources */ - if (wl->wlc) { - brcms_c_detach(wl->wlc); - wl->wlc = NULL; - wl->pub = NULL; - } - - /* virtual interface deletion is deferred so we cannot spinwait */ - - /* wait for all pending callbacks to complete */ - while (atomic_read(&wl->callbacks) > 0) - schedule(); - - /* free timers */ - for (t = wl->timers; t; t = next) { - next = t->next; -#ifdef DEBUG - kfree(t->name); -#endif - kfree(t); - } -} - -/* -* called from both kernel as from this kernel module (error flow on attach) -* precondition: perimeter lock is not acquired. -*/ -static void brcms_remove(struct bcma_device *pdev) -{ - struct ieee80211_hw *hw = bcma_get_drvdata(pdev); - struct brcms_info *wl = hw->priv; - - if (wl->wlc) { - brcms_led_unregister(wl); - wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); - wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); - ieee80211_unregister_hw(hw); - } - - brcms_free(wl); - - bcma_set_drvdata(pdev, NULL); - ieee80211_free_hw(hw); -} - static irqreturn_t brcms_isr(int irq, void *dev_id) { struct brcms_info *wl; @@@ -1004,7 -1058,16 +1068,16 @@@ static int ieee_hw_init(struct ieee8021
/* channel change time is dependent on chip and band */ hw->channel_change_time = 7 * 1000; - hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_ADHOC); + + /* + * deactivate sending probe responses by ucude, because this will + * cause problems when WPS is used. + * + * hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; + */
hw->rate_control_algorithm = "minstrel_ht";
@@@ -1057,8 -1120,18 +1130,8 @@@ static struct brcms_info *brcms_attach( spin_lock_init(&wl->lock); spin_lock_init(&wl->isr_lock);
- /* prepare ucode */ - if (brcms_request_fw(wl, pdev) < 0) { - wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in " - "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm"); - brcms_release_fw(wl); - brcms_remove(pdev); - return NULL; - } - /* common load-time initialization */ wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err); - brcms_release_fw(wl); if (!wl->wlc) { wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n", KBUILD_MODNAME, err); @@@ -1151,6 -1224,8 +1224,8 @@@ static int brcms_bcma_probe(struct bcma pr_err("%s: brcms_attach failed!\n", __func__); return -ENODEV; } + brcms_led_register(wl); + return 0; }
diff --combined drivers/net/wireless/ray_cs.c index a6f660c,ebada81..9b557a1 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@@ -144,7 -144,7 +144,7 @@@ static int psm static char *essid;
/* Default to encapsulation unless translation requested */ - static int translate = 1; + static bool translate = 1;
static int country = USA;
@@@ -178,7 -178,7 +178,7 @@@ module_param(hop_dwell, int, 0) module_param(beacon_period, int, 0); module_param(psm, int, 0); module_param(essid, charp, 0); - module_param(translate, int, 0); + module_param(translate, bool, 0); module_param(country, int, 0); module_param(sniffer, int, 0); module_param(bc, int, 0); @@@ -953,7 -953,7 +953,7 @@@ static int translate_frame(ray_dev_t *l unsigned char *data, int len) { __be16 proto = ((struct ethhdr *)data)->h_proto; - if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */ + if (ntohs(proto) >= ETH_P_802_3_MIN) { /* DIX II ethernet frame */ pr_debug("ray_cs translate_frame DIX II\n"); /* Copy LLC header to card buffer */ memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc)); @@@ -1353,7 -1353,7 +1353,7 @@@ static int ray_get_range(struct net_dev static int ray_set_framing(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - translate = *(extra); /* Set framing mode */ + translate = !!*(extra); /* Set framing mode */
return 0; } @@@ -2778,7 -2778,7 +2778,7 @@@ static ssize_t int_proc_write(struct fi nr = nr * 10 + c; p++; } while (--len); - *(int *)PDE(file_inode(file))->data = nr; + *(int *)PDE_DATA(file_inode(file)) = nr; return count; }
diff --combined drivers/net/wireless/rt2x00/rt2x00mmio.c index d84a680,d84a680..9fe9a36 --- a/drivers/net/wireless/rt2x00/rt2x00mmio.c +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c @@@ -123,12 -123,12 +123,10 @@@ static int rt2x00pci_alloc_queue_dma(st */ addr = dma_alloc_coherent(rt2x00dev->dev, queue->limit * queue->desc_size, -- &dma, GFP_KERNEL); ++ &dma, GFP_KERNEL | __GFP_ZERO); if (!addr) return -ENOMEM;
-- memset(addr, 0, queue->limit * queue->desc_size); -- /* * Initialize all queue entries to contain valid addresses. */ diff --combined drivers/scsi/csiostor/csio_hw.c index 7dbaf58,a0b4c89..1936055 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@@ -61,7 -61,7 +61,7 @@@ int csio_msi = 2 static int dev_num;
/* FCoE Adapter types & its description */ - static const struct csio_adap_desc csio_fcoe_adapters[] = { + static const struct csio_adap_desc csio_t4_fcoe_adapters[] = { {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"}, {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"}, {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"}, @@@ -77,7 -77,38 +77,38 @@@ {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"}, {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"}, {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"}, - {"T4 FPGA", "Chelsio T4 FPGA [FCoE]"} + {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, + {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"}, + {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"}, + {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"}, + {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"}, + {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"}, + {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"}, + {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"}, + {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"} + }; + + static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { + {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, + {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, + {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"}, + {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, + {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, + {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, + {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"}, + {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"}, + {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"}, + {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"}, + {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"}, + {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"}, + {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"}, + {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"}, + {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"}, + {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, + {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, + {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, + {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, + {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"} };
static void csio_mgmtm_cleanup(struct csio_mgmtm *); @@@ -124,7 -155,7 +155,7 @@@ int csio_is_hw_removing(struct csio_hw * at the time it indicated completion is stored there. Returns 0 if the * operation completes and -EAGAIN otherwise. */ - static int + int csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, int polarity, int attempts, int delay, uint32_t *valp) { @@@ -145,6 -176,24 +176,24 @@@ } }
+ /* + * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register + * @hw: the adapter + * @addr: the indirect TP register address + * @mask: specifies the field within the register to modify + * @val: new value for the field + * + * Sets a field of an indirect TP register to the given value. + */ + void + csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, + unsigned int mask, unsigned int val) + { + csio_wr_reg32(hw, addr, TP_PIO_ADDR); + val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask; + csio_wr_reg32(hw, val, TP_PIO_DATA); + } + void csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, uint32_t value) @@@ -157,242 -206,22 +206,22 @@@
}
static int csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) { - return csio_memory_rw(hw, mtype, addr, len, buf, 0); + return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype, + addr, len, buf, 0); }
/* * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */ - #define EEPROM_MAX_RD_POLL 40 - #define EEPROM_MAX_WR_POLL 6 - #define EEPROM_STAT_ADDR 0x7bfc - #define VPD_BASE 0x400 - #define VPD_BASE_OLD 0 - #define VPD_LEN 512 + #define EEPROM_MAX_RD_POLL 40 + #define EEPROM_MAX_WR_POLL 6 + #define EEPROM_STAT_ADDR 0x7bfc + #define VPD_BASE 0x400 + #define VPD_BASE_OLD 0 + #define VPD_LEN 1024 #define VPD_INFO_FLD_HDR_SIZE 3
/* @@@ -817,23 -646,6 +646,6 @@@ out return 0; }
- /* - * csio_hw_flash_cfg_addr - return the address of the flash - * configuration file - * @hw: the HW module - * - * Return the address within the flash where the Firmware Configuration - * File is stored. - */ - static unsigned int - csio_hw_flash_cfg_addr(struct csio_hw *hw) - { - if (hw->params.sf_size == 0x100000) - return FPGA_FLASH_CFG_OFFSET; - else - return FLASH_CFG_OFFSET; - } - static void csio_hw_print_fw_version(struct csio_hw *hw, char *str) { @@@ -898,13 -710,13 +710,13 @@@ csio_hw_check_fw_version(struct csio_h minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev); micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);
- if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ + if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */ csio_err(hw, "card FW has major version %u, driver wants %u\n", - major, FW_VERSION_MAJOR); + major, FW_VERSION_MAJOR(hw)); return -EINVAL; }
- if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) + if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw)) return 0; /* perfect match */
/* Minor/micro version mismatch */ @@@ -1044,7 -856,7 +856,7 @@@ static voi csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range) { uint16_t val; - uint32_t pcie_cap; + int pcie_cap;
if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) { pci_read_config_word(hw->pdev, @@@ -1056,84 -868,6 +868,6 @@@ } }
- - /* - * Return the specified PCI-E Configuration Space register from our Physical - * Function. We try first via a Firmware LDST Command since we prefer to let - * the firmware own all of these registers, but if that fails we go for it - * directly ourselves. - */ - static uint32_t - csio_read_pcie_cfg4(struct csio_hw *hw, int reg) - { - u32 val = 0; - struct csio_mb *mbp; - int rv; - struct fw_ldst_cmd *ldst_cmd; - - mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); - if (!mbp) { - CSIO_INC_STATS(hw, n_err_nomem); - pci_read_config_dword(hw->pdev, reg, &val); - return val; - } - - csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg); - - rv = csio_mb_issue(hw, mbp); - - /* - * If the LDST Command suucceeded, exctract the returned register - * value. Otherwise read it directly ourself. - */ - if (rv == 0) { - ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb); - val = ntohl(ldst_cmd->u.pcie.data[0]); - } else - pci_read_config_dword(hw->pdev, reg, &val); - - mempool_free(mbp, hw->mb_mempool); - - return val; - } /* csio_read_pcie_cfg4 */ - - static int - csio_hw_set_mem_win(struct csio_hw *hw) - { - u32 bar0; - - /* - * Truncation intentional: we only read the bottom 32-bits of the - * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to - * read BAR0 instead of using pci_resource_start() because we could be - * operating from within a Virtual Machine which is trapping our - * accesses to our Configuration Space and we need to set up the PCI-E - * Memory Window decoders with the actual addresses which will be - * coming across the PCI-E link. - */ - bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0); - bar0 &= PCI_BASE_ADDRESS_MEM_MASK; - - /* - * Set up memory window for accessing adapter memory ranges. (Read - * back MA register to ensure that changes propagate before we attempt - * to use the new values.) - */ - csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) | - WINDOW(ilog2(MEMWIN0_APERTURE) - 10), - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0)); - csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) | - WINDOW(ilog2(MEMWIN1_APERTURE) - 10), - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1)); - csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) | - WINDOW(ilog2(MEMWIN2_APERTURE) - 10), - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2)); - csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2)); - return 0; - } /* csio_hw_set_mem_win */ - - - /*****************************************************************************/ /* HW State machine assists */ /*****************************************************************************/ @@@ -1234,7 -968,9 +968,9 @@@ retry for (;;) { uint32_t pcie_fw;
+ spin_unlock_irq(&hw->lock); msleep(50); + spin_lock_irq(&hw->lock); waiting -= 50;
/* @@@ -2121,9 -1857,9 +1857,9 @@@ csio_hw_flash_config(struct csio_hw *hw uint32_t *cfg_data; int value_to_add = 0;
- if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) { - csio_err(hw, "could not find config file " CSIO_CF_FNAME - ",err: %d\n", ret); + if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) { + csio_err(hw, "could not find config file %s, err: %d\n", + CSIO_CF_FNAME(hw), ret); return -ENOENT; }
@@@ -2147,9 -1883,24 +1883,24 @@@
ret = csio_memory_write(hw, mtype, maddr, cf->size + value_to_add, cfg_data); + + if ((ret == 0) && (value_to_add != 0)) { + union { + u32 word; + char buf[4]; + } last; + size_t size = cf->size & ~0x3; + int i; + + last.word = cfg_data[size >> 2]; + for (i = value_to_add; i < 4; i++) + last.buf[i] = 0; + ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word); + } if (ret == 0) { - csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n"); - strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64); + csio_info(hw, "config file upgraded to %s\n", + CSIO_CF_FNAME(hw)); + snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw)); }
leave: @@@ -2179,7 -1930,7 +1930,7 @@@ csio_hw_use_fwconfig(struct csio_hw *hw { unsigned int mtype, maddr; int rv; - uint32_t finiver, finicsum, cfcsum; + uint32_t finiver = 0, finicsum = 0, cfcsum = 0; int using_flash; char path[64];
@@@ -2207,7 -1958,7 +1958,7 @@@ * config file from flash. */ mtype = FW_MEMTYPE_CF_FLASH; - maddr = csio_hw_flash_cfg_addr(hw); + maddr = hw->chip_ops->chip_flash_cfg_addr(hw); using_flash = 1; } else { /* @@@ -2346,30 -2097,32 +2097,32 @@@ csio_hw_flash_fw(struct csio_hw *hw struct pci_dev *pci_dev = hw->pdev; struct device *dev = &pci_dev->dev ;
- if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) { - csio_err(hw, "could not find firmware image " CSIO_FW_FNAME - ",err: %d\n", ret); + if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) { + csio_err(hw, "could not find firmware image %s, err: %d\n", + CSIO_FW_FNAME(hw), ret); return -EINVAL; }
hdr = (const struct fw_hdr *)fw->data; fw_ver = ntohl(hdr->fw_ver); - if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR) + if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw)) return -EINVAL; /* wrong major version, won't do */
/* * If the flash FW is unusable or we found something newer, load it. */ - if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR || + if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) || fw_ver > hw->fwrev) { ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size, /*force=*/false); if (!ret) - csio_info(hw, "firmware upgraded to version %pI4 from " - CSIO_FW_FNAME "\n", &hdr->fw_ver); + csio_info(hw, + "firmware upgraded to version %pI4 from %s\n", + &hdr->fw_ver, CSIO_FW_FNAME(hw)); else csio_err(hw, "firmware upgrade failed! err=%d\n", ret); - } + } else + ret = -EINVAL;
release_firmware(fw);
@@@ -2410,7 -2163,7 +2163,7 @@@ csio_hw_configure(struct csio_hw *hw /* Set pci completion timeout value to 4 seconds. */ csio_set_pcie_completion_timeout(hw, 0xd);
- csio_hw_set_mem_win(hw); + hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
rv = csio_hw_get_fw_version(hw, &hw->fwrev); if (rv != 0) @@@ -2478,6 -2231,8 +2231,8 @@@ } else { if (hw->fw_state == CSIO_DEV_STATE_INIT) {
+ hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; + /* device parameters */ rv = csio_get_device_params(hw); if (rv != 0) @@@ -2651,7 -2406,7 +2406,7 @@@ csio_hw_intr_disable(struct csio_hw *hw
}
- static void + void csio_hw_fatal_err(struct csio_hw *hw) { csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0); @@@ -2990,14 -2745,6 +2745,6 @@@ csio_hws_pcierr(struct csio_hw *hw, enu /* END: HW SM */ /*****************************************************************************/
- /* Slow path handlers */ - struct intr_info { - unsigned int mask; /* bits to check in interrupt status */ - const char *msg; /* message to print or NULL */ - short stat_idx; /* stat counter to increment or -1 */ - unsigned short fatal; /* whether the condition reported is fatal */ - }; - /* * csio_handle_intr_status - table driven interrupt handler * @hw: HW instance @@@ -3011,7 -2758,7 +2758,7 @@@ * by an entry specifying mask 0. Returns the number of fatal interrupt * conditions. */ - static int + int csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, const struct intr_info *acts) { @@@ -3038,80 -2785,6 +2785,6 @@@ }
/* - * Interrupt handler for the PCIE module. - */ - static void - csio_pcie_intr_handler(struct csio_hw *hw) - { - static struct intr_info sysbus_intr_info[] = { - { RNPP, "RXNP array parity error", -1, 1 }, - { RPCP, "RXPC array parity error", -1, 1 }, - { RCIP, "RXCIF array parity error", -1, 1 }, - { RCCP, "Rx completions control array parity error", -1, 1 }, - { RFTP, "RXFT array parity error", -1, 1 }, - { 0, NULL, 0, 0 } - }; - static struct intr_info pcie_port_intr_info[] = { - { TPCP, "TXPC array parity error", -1, 1 }, - { TNPP, "TXNP array parity error", -1, 1 }, - { TFTP, "TXFT array parity error", -1, 1 }, - { TCAP, "TXCA array parity error", -1, 1 }, - { TCIP, "TXCIF array parity error", -1, 1 }, - { RCAP, "RXCA array parity error", -1, 1 }, - { OTDD, "outbound request TLP discarded", -1, 1 }, - { RDPE, "Rx data parity error", -1, 1 }, - { TDUE, "Tx uncorrectable data error", -1, 1 }, - { 0, NULL, 0, 0 } - }; - static struct intr_info pcie_intr_info[] = { - { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, - { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, - { MSIDATAPERR, "MSI data parity error", -1, 1 }, - { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, - { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, - { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, - { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, - { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, - { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, - { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, - { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, - { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, - { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, - { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, - { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, - { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, - { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, - { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, - { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, - { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, - { FIDPERR, "PCI FID parity error", -1, 1 }, - { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, - { MATAGPERR, "PCI MA tag parity error", -1, 1 }, - { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, - { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, - { RXWRPERR, "PCI Rx write parity error", -1, 1 }, - { RPLPERR, "PCI replay buffer parity error", -1, 1 }, - { PCIESINT, "PCI core secondary fault", -1, 1 }, - { PCIEPINT, "PCI core primary fault", -1, 1 }, - { UNXSPLCPLERR, "PCI unexpected split completion error", -1, - 0 }, - { 0, NULL, 0, 0 } - }; - - int fat; - - fat = csio_handle_intr_status(hw, - PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, - sysbus_intr_info) + - csio_handle_intr_status(hw, - PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, - pcie_port_intr_info) + - csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info); - if (fat) - csio_hw_fatal_err(hw); - } - - /* * TP interrupt handler. */ static void csio_tp_intr_handler(struct csio_hw *hw) @@@ -3517,7 -3190,7 +3190,7 @@@ static void csio_ncsi_intr_handler(stru */ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) { - uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); + uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; if (!v) @@@ -3527,7 -3200,7 +3200,7 @@@ csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); if (v & RXFIFO_PRTY_ERR) csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); - csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); + csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port)); csio_hw_fatal_err(hw); }
@@@ -3596,7 -3269,7 +3269,7 @@@ csio_hw_slow_intr_handler(struct csio_h csio_xgmac_intr_handler(hw, 3);
if (cause & PCIE) - csio_pcie_intr_handler(hw); + hw->chip_ops->chip_pcie_intr_handler(hw);
if (cause & MC) csio_mem_intr_handler(hw, MEM_MC); @@@ -3892,6 -3565,7 +3565,6 @@@ csio_process_fwevtq_entry(struct csio_h struct csio_fl_dma_buf *flb, void *priv) { __u8 op; - __be64 *data; void *msg = NULL; uint32_t msg_len = 0; bool msg_sg = 0; @@@ -3907,6 -3581,8 +3580,6 @@@ msg = (void *) flb; msg_len = flb->totlen; msg_sg = 1; - - data = (__be64 *) msg; } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
CSIO_INC_STATS(hw, n_cpl_fw6_msg); @@@ -3914,6 -3590,8 +3587,6 @@@ msg = (void *)((uintptr_t)wr + sizeof(__be64)); msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : sizeof(struct cpl_fw4_msg); - - data = (__be64 *) msg; } else { csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); CSIO_INC_STATS(hw, n_cpl_unexp); @@@ -4257,6 -3935,7 +3930,7 @@@ csio_hw_get_device_id(struct csio_hw *h &hw->params.pci.device_id);
csio_dev_id_cached(hw); + hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
} /* csio_hw_get_device_id */
@@@ -4275,19 -3954,21 +3949,21 @@@ csio_hw_set_description(struct csio_hw prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
- if (prot_type == CSIO_FPGA) { + if (prot_type == CSIO_T4_FCOE_ASIC) { + memcpy(hw->hw_ver, + csio_t4_fcoe_adapters[adap_type].model_no, 16); memcpy(hw->model_desc, - csio_fcoe_adapters[13].description, 32); - } else if (prot_type == CSIO_T4_FCOE_ASIC) { + csio_t4_fcoe_adapters[adap_type].description, + 32); + } else if (prot_type == CSIO_T5_FCOE_ASIC) { memcpy(hw->hw_ver, - csio_fcoe_adapters[adap_type].model_no, 16); + csio_t5_fcoe_adapters[adap_type].model_no, 16); memcpy(hw->model_desc, - csio_fcoe_adapters[adap_type].description, 32); + csio_t5_fcoe_adapters[adap_type].description, + 32); } else { char tempName[32] = "Chelsio FCoE Controller"; memcpy(hw->model_desc, tempName, 32); - - CSIO_DB_ASSERT(0); } } } /* csio_hw_set_description */ @@@ -4316,6 -3997,9 +3992,9 @@@ csio_hw_init(struct csio_hw *hw
strcpy(hw->name, CSIO_HW_NAME);
+ /* Initialize the HW chip ops with T4/T5 specific ops */ + hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops; + /* Set the model & its description */
ven_id = hw->params.pci.vendor_id; diff --combined drivers/scsi/scsi_transport_iscsi.c index ce06e87,2e38165..47799a3 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@@ -25,7 -25,6 +25,7 @@@ #include <linux/slab.h> #include <linux/bsg-lib.h> #include <linux/idr.h> +#include <linux/list.h> #include <net/tcp.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> @@@ -461,689 -460,6 +461,689 @@@ void iscsi_destroy_iface(struct iscsi_i EXPORT_SYMBOL_GPL(iscsi_destroy_iface);
/* + * Interface to display flash node params to sysfs + */ + +#define ISCSI_FLASHNODE_ATTR(_prefix, _name, _mode, _show, _store) \ +struct device_attribute dev_attr_##_prefix##_##_name = \ + __ATTR(_name, _mode, _show, _store) + +/* flash node session attrs show */ +#define iscsi_flashnode_sess_attr_show(type, name, param) \ +static ssize_t \ +show_##type##_##name(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct iscsi_bus_flash_session *fnode_sess = \ + iscsi_dev_to_flash_session(dev);\ + struct iscsi_transport *t = fnode_sess->transport; \ + return t->get_flashnode_param(fnode_sess, param, buf); \ +} \ + + +#define iscsi_flashnode_sess_attr(type, name, param) \ + iscsi_flashnode_sess_attr_show(type, name, param) \ +static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \ + show_##type##_##name, NULL); + +/* Flash node session attributes */ + +iscsi_flashnode_sess_attr(fnode, auto_snd_tgt_disable, + ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE); +iscsi_flashnode_sess_attr(fnode, discovery_session, + ISCSI_FLASHNODE_DISCOVERY_SESS); +iscsi_flashnode_sess_attr(fnode, portal_type, ISCSI_FLASHNODE_PORTAL_TYPE); +iscsi_flashnode_sess_attr(fnode, entry_enable, ISCSI_FLASHNODE_ENTRY_EN); +iscsi_flashnode_sess_attr(fnode, immediate_data, ISCSI_FLASHNODE_IMM_DATA_EN); +iscsi_flashnode_sess_attr(fnode, initial_r2t, ISCSI_FLASHNODE_INITIAL_R2T_EN); +iscsi_flashnode_sess_attr(fnode, data_seq_in_order, + ISCSI_FLASHNODE_DATASEQ_INORDER); +iscsi_flashnode_sess_attr(fnode, data_pdu_in_order, + ISCSI_FLASHNODE_PDU_INORDER); +iscsi_flashnode_sess_attr(fnode, chap_auth, ISCSI_FLASHNODE_CHAP_AUTH_EN); +iscsi_flashnode_sess_attr(fnode, discovery_logout, + ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN); +iscsi_flashnode_sess_attr(fnode, bidi_chap, ISCSI_FLASHNODE_BIDI_CHAP_EN); +iscsi_flashnode_sess_attr(fnode, discovery_auth_optional, + ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL); +iscsi_flashnode_sess_attr(fnode, erl, ISCSI_FLASHNODE_ERL); +iscsi_flashnode_sess_attr(fnode, first_burst_len, ISCSI_FLASHNODE_FIRST_BURST); +iscsi_flashnode_sess_attr(fnode, def_time2wait, ISCSI_FLASHNODE_DEF_TIME2WAIT); +iscsi_flashnode_sess_attr(fnode, def_time2retain, + ISCSI_FLASHNODE_DEF_TIME2RETAIN); +iscsi_flashnode_sess_attr(fnode, max_outstanding_r2t, ISCSI_FLASHNODE_MAX_R2T); +iscsi_flashnode_sess_attr(fnode, isid, ISCSI_FLASHNODE_ISID); +iscsi_flashnode_sess_attr(fnode, tsid, ISCSI_FLASHNODE_TSID); +iscsi_flashnode_sess_attr(fnode, max_burst_len, ISCSI_FLASHNODE_MAX_BURST); +iscsi_flashnode_sess_attr(fnode, def_taskmgmt_tmo, + ISCSI_FLASHNODE_DEF_TASKMGMT_TMO); +iscsi_flashnode_sess_attr(fnode, targetalias, ISCSI_FLASHNODE_ALIAS); +iscsi_flashnode_sess_attr(fnode, targetname, ISCSI_FLASHNODE_NAME); +iscsi_flashnode_sess_attr(fnode, tpgt, ISCSI_FLASHNODE_TPGT); +iscsi_flashnode_sess_attr(fnode, discovery_parent_idx, + ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX); +iscsi_flashnode_sess_attr(fnode, discovery_parent_type, + ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE); +iscsi_flashnode_sess_attr(fnode, chap_in_idx, ISCSI_FLASHNODE_CHAP_IN_IDX); +iscsi_flashnode_sess_attr(fnode, chap_out_idx, ISCSI_FLASHNODE_CHAP_OUT_IDX); +iscsi_flashnode_sess_attr(fnode, username, ISCSI_FLASHNODE_USERNAME); +iscsi_flashnode_sess_attr(fnode, username_in, ISCSI_FLASHNODE_USERNAME_IN); +iscsi_flashnode_sess_attr(fnode, password, ISCSI_FLASHNODE_PASSWORD); +iscsi_flashnode_sess_attr(fnode, password_in, ISCSI_FLASHNODE_PASSWORD_IN); +iscsi_flashnode_sess_attr(fnode, is_boot_target, ISCSI_FLASHNODE_IS_BOOT_TGT); + +static struct attribute *iscsi_flashnode_sess_attrs[] = { + &dev_attr_fnode_auto_snd_tgt_disable.attr, + &dev_attr_fnode_discovery_session.attr, + &dev_attr_fnode_portal_type.attr, + &dev_attr_fnode_entry_enable.attr, + &dev_attr_fnode_immediate_data.attr, + &dev_attr_fnode_initial_r2t.attr, + &dev_attr_fnode_data_seq_in_order.attr, + &dev_attr_fnode_data_pdu_in_order.attr, + &dev_attr_fnode_chap_auth.attr, + &dev_attr_fnode_discovery_logout.attr, + &dev_attr_fnode_bidi_chap.attr, + &dev_attr_fnode_discovery_auth_optional.attr, + &dev_attr_fnode_erl.attr, + &dev_attr_fnode_first_burst_len.attr, + &dev_attr_fnode_def_time2wait.attr, + &dev_attr_fnode_def_time2retain.attr, + &dev_attr_fnode_max_outstanding_r2t.attr, + &dev_attr_fnode_isid.attr, + &dev_attr_fnode_tsid.attr, + &dev_attr_fnode_max_burst_len.attr, + &dev_attr_fnode_def_taskmgmt_tmo.attr, + &dev_attr_fnode_targetalias.attr, + &dev_attr_fnode_targetname.attr, + &dev_attr_fnode_tpgt.attr, + &dev_attr_fnode_discovery_parent_idx.attr, + &dev_attr_fnode_discovery_parent_type.attr, + &dev_attr_fnode_chap_in_idx.attr, + &dev_attr_fnode_chap_out_idx.attr, + &dev_attr_fnode_username.attr, + &dev_attr_fnode_username_in.attr, + &dev_attr_fnode_password.attr, + &dev_attr_fnode_password_in.attr, + &dev_attr_fnode_is_boot_target.attr, + NULL, +}; + +static umode_t iscsi_flashnode_sess_attr_is_visible(struct kobject *kobj, + struct attribute *attr, + int i) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct iscsi_bus_flash_session *fnode_sess = + iscsi_dev_to_flash_session(dev); + struct iscsi_transport *t = fnode_sess->transport; + int param; + + if (attr == &dev_attr_fnode_auto_snd_tgt_disable.attr) { + param = ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE; + } else if (attr == &dev_attr_fnode_discovery_session.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_SESS; + } else if (attr == &dev_attr_fnode_portal_type.attr) { + param = ISCSI_FLASHNODE_PORTAL_TYPE; + } else if (attr == &dev_attr_fnode_entry_enable.attr) { + param = ISCSI_FLASHNODE_ENTRY_EN; + } else if (attr == &dev_attr_fnode_immediate_data.attr) { + param = ISCSI_FLASHNODE_IMM_DATA_EN; + } else if (attr == &dev_attr_fnode_initial_r2t.attr) { + param = ISCSI_FLASHNODE_INITIAL_R2T_EN; + } else if (attr == &dev_attr_fnode_data_seq_in_order.attr) { + param = ISCSI_FLASHNODE_DATASEQ_INORDER; + } else if (attr == &dev_attr_fnode_data_pdu_in_order.attr) { + param = ISCSI_FLASHNODE_PDU_INORDER; + } else if (attr == &dev_attr_fnode_chap_auth.attr) { + param = ISCSI_FLASHNODE_CHAP_AUTH_EN; + } else if (attr == &dev_attr_fnode_discovery_logout.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN; + } else if (attr == &dev_attr_fnode_bidi_chap.attr) { + param = ISCSI_FLASHNODE_BIDI_CHAP_EN; + } else if (attr == &dev_attr_fnode_discovery_auth_optional.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL; + } else if (attr == &dev_attr_fnode_erl.attr) { + param = ISCSI_FLASHNODE_ERL; + } else if (attr == &dev_attr_fnode_first_burst_len.attr) { + param = ISCSI_FLASHNODE_FIRST_BURST; + } else if (attr == &dev_attr_fnode_def_time2wait.attr) { + param = ISCSI_FLASHNODE_DEF_TIME2WAIT; + } else if (attr == &dev_attr_fnode_def_time2retain.attr) { + param = ISCSI_FLASHNODE_DEF_TIME2RETAIN; + } else if (attr == &dev_attr_fnode_max_outstanding_r2t.attr) { + param = ISCSI_FLASHNODE_MAX_R2T; + } else if (attr == &dev_attr_fnode_isid.attr) { + param = ISCSI_FLASHNODE_ISID; + } else if (attr == &dev_attr_fnode_tsid.attr) { + param = ISCSI_FLASHNODE_TSID; + } else if (attr == &dev_attr_fnode_max_burst_len.attr) { + param = ISCSI_FLASHNODE_MAX_BURST; + } else if (attr == &dev_attr_fnode_def_taskmgmt_tmo.attr) { + param = ISCSI_FLASHNODE_DEF_TASKMGMT_TMO; + } else if (attr == &dev_attr_fnode_targetalias.attr) { + param = ISCSI_FLASHNODE_ALIAS; + } else if (attr == &dev_attr_fnode_targetname.attr) { + param = ISCSI_FLASHNODE_NAME; + } else if (attr == &dev_attr_fnode_tpgt.attr) { + param = ISCSI_FLASHNODE_TPGT; + } else if (attr == &dev_attr_fnode_discovery_parent_idx.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX; + } else if (attr == &dev_attr_fnode_discovery_parent_type.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE; + } else if (attr == &dev_attr_fnode_chap_in_idx.attr) { + param = ISCSI_FLASHNODE_CHAP_IN_IDX; + } else if (attr == &dev_attr_fnode_chap_out_idx.attr) { + param = ISCSI_FLASHNODE_CHAP_OUT_IDX; + } else if (attr == &dev_attr_fnode_username.attr) { + param = ISCSI_FLASHNODE_USERNAME; + } else if (attr == &dev_attr_fnode_username_in.attr) { + param = ISCSI_FLASHNODE_USERNAME_IN; + } else if (attr == &dev_attr_fnode_password.attr) { + param = ISCSI_FLASHNODE_PASSWORD; + } else if (attr == &dev_attr_fnode_password_in.attr) { + param = ISCSI_FLASHNODE_PASSWORD_IN; + } else if (attr == &dev_attr_fnode_is_boot_target.attr) { + param = ISCSI_FLASHNODE_IS_BOOT_TGT; + } else { + WARN_ONCE(1, "Invalid flashnode session attr"); + return 0; + } + + return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param); +} + +static struct attribute_group iscsi_flashnode_sess_attr_group = { + .attrs = iscsi_flashnode_sess_attrs, + .is_visible = iscsi_flashnode_sess_attr_is_visible, +}; + +static const struct attribute_group *iscsi_flashnode_sess_attr_groups[] = { + &iscsi_flashnode_sess_attr_group, + NULL, +}; + +static void iscsi_flashnode_sess_release(struct device *dev) +{ + struct iscsi_bus_flash_session *fnode_sess = + iscsi_dev_to_flash_session(dev); + + kfree(fnode_sess->targetname); + kfree(fnode_sess->targetalias); + kfree(fnode_sess->portal_type); + kfree(fnode_sess); +} + +struct device_type iscsi_flashnode_sess_dev_type = { + .name = "iscsi_flashnode_sess_dev_type", + .groups = iscsi_flashnode_sess_attr_groups, + .release = iscsi_flashnode_sess_release, +}; + +/* flash node connection attrs show */ +#define iscsi_flashnode_conn_attr_show(type, name, param) \ +static ssize_t \ +show_##type##_##name(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);\ + struct iscsi_bus_flash_session *fnode_sess = \ + iscsi_flash_conn_to_flash_session(fnode_conn);\ + struct iscsi_transport *t = fnode_conn->transport; \ + return t->get_flashnode_param(fnode_sess, param, buf); \ +} \ + + +#define iscsi_flashnode_conn_attr(type, name, param) \ + iscsi_flashnode_conn_attr_show(type, name, param) \ +static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \ + show_##type##_##name, NULL); + +/* Flash node connection attributes */ + +iscsi_flashnode_conn_attr(fnode, is_fw_assigned_ipv6, + ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6); +iscsi_flashnode_conn_attr(fnode, header_digest, ISCSI_FLASHNODE_HDR_DGST_EN); +iscsi_flashnode_conn_attr(fnode, data_digest, ISCSI_FLASHNODE_DATA_DGST_EN); +iscsi_flashnode_conn_attr(fnode, snack_req, ISCSI_FLASHNODE_SNACK_REQ_EN); +iscsi_flashnode_conn_attr(fnode, tcp_timestamp_stat, + ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT); +iscsi_flashnode_conn_attr(fnode, tcp_nagle_disable, + ISCSI_FLASHNODE_TCP_NAGLE_DISABLE); +iscsi_flashnode_conn_attr(fnode, tcp_wsf_disable, + ISCSI_FLASHNODE_TCP_WSF_DISABLE); +iscsi_flashnode_conn_attr(fnode, tcp_timer_scale, + ISCSI_FLASHNODE_TCP_TIMER_SCALE); +iscsi_flashnode_conn_attr(fnode, tcp_timestamp_enable, + ISCSI_FLASHNODE_TCP_TIMESTAMP_EN); +iscsi_flashnode_conn_attr(fnode, fragment_disable, + ISCSI_FLASHNODE_IP_FRAG_DISABLE); +iscsi_flashnode_conn_attr(fnode, keepalive_tmo, ISCSI_FLASHNODE_KEEPALIVE_TMO); +iscsi_flashnode_conn_attr(fnode, port, ISCSI_FLASHNODE_PORT); +iscsi_flashnode_conn_attr(fnode, ipaddress, ISCSI_FLASHNODE_IPADDR); +iscsi_flashnode_conn_attr(fnode, max_recv_dlength, + ISCSI_FLASHNODE_MAX_RECV_DLENGTH); +iscsi_flashnode_conn_attr(fnode, max_xmit_dlength, + ISCSI_FLASHNODE_MAX_XMIT_DLENGTH); +iscsi_flashnode_conn_attr(fnode, local_port, ISCSI_FLASHNODE_LOCAL_PORT); +iscsi_flashnode_conn_attr(fnode, ipv4_tos, ISCSI_FLASHNODE_IPV4_TOS); +iscsi_flashnode_conn_attr(fnode, ipv6_traffic_class, ISCSI_FLASHNODE_IPV6_TC); +iscsi_flashnode_conn_attr(fnode, ipv6_flow_label, + ISCSI_FLASHNODE_IPV6_FLOW_LABEL); +iscsi_flashnode_conn_attr(fnode, redirect_ipaddr, + ISCSI_FLASHNODE_REDIRECT_IPADDR); +iscsi_flashnode_conn_attr(fnode, max_segment_size, + ISCSI_FLASHNODE_MAX_SEGMENT_SIZE); +iscsi_flashnode_conn_attr(fnode, link_local_ipv6, + ISCSI_FLASHNODE_LINK_LOCAL_IPV6); +iscsi_flashnode_conn_attr(fnode, tcp_xmit_wsf, ISCSI_FLASHNODE_TCP_XMIT_WSF); +iscsi_flashnode_conn_attr(fnode, tcp_recv_wsf, ISCSI_FLASHNODE_TCP_RECV_WSF); +iscsi_flashnode_conn_attr(fnode, statsn, ISCSI_FLASHNODE_STATSN); +iscsi_flashnode_conn_attr(fnode, exp_statsn, ISCSI_FLASHNODE_EXP_STATSN); + +static struct attribute *iscsi_flashnode_conn_attrs[] = { + &dev_attr_fnode_is_fw_assigned_ipv6.attr, + &dev_attr_fnode_header_digest.attr, + &dev_attr_fnode_data_digest.attr, + &dev_attr_fnode_snack_req.attr, + &dev_attr_fnode_tcp_timestamp_stat.attr, + &dev_attr_fnode_tcp_nagle_disable.attr, + &dev_attr_fnode_tcp_wsf_disable.attr, + &dev_attr_fnode_tcp_timer_scale.attr, + &dev_attr_fnode_tcp_timestamp_enable.attr, + &dev_attr_fnode_fragment_disable.attr, + &dev_attr_fnode_max_recv_dlength.attr, + &dev_attr_fnode_max_xmit_dlength.attr, + &dev_attr_fnode_keepalive_tmo.attr, + &dev_attr_fnode_port.attr, + &dev_attr_fnode_ipaddress.attr, + &dev_attr_fnode_redirect_ipaddr.attr, + &dev_attr_fnode_max_segment_size.attr, + &dev_attr_fnode_local_port.attr, + &dev_attr_fnode_ipv4_tos.attr, + &dev_attr_fnode_ipv6_traffic_class.attr, + &dev_attr_fnode_ipv6_flow_label.attr, + &dev_attr_fnode_link_local_ipv6.attr, + &dev_attr_fnode_tcp_xmit_wsf.attr, + &dev_attr_fnode_tcp_recv_wsf.attr, + &dev_attr_fnode_statsn.attr, + &dev_attr_fnode_exp_statsn.attr, + NULL, +}; + +static umode_t iscsi_flashnode_conn_attr_is_visible(struct kobject *kobj, + struct attribute *attr, + int i) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev); + struct iscsi_transport *t = fnode_conn->transport; + int param; + + if (attr == &dev_attr_fnode_is_fw_assigned_ipv6.attr) { + param = ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6; + } else if (attr == &dev_attr_fnode_header_digest.attr) { + param = ISCSI_FLASHNODE_HDR_DGST_EN; + } else if (attr == &dev_attr_fnode_data_digest.attr) { + param = ISCSI_FLASHNODE_DATA_DGST_EN; + } else if (attr == &dev_attr_fnode_snack_req.attr) { + param = ISCSI_FLASHNODE_SNACK_REQ_EN; + } else if (attr == &dev_attr_fnode_tcp_timestamp_stat.attr) { + param = ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT; + } else if (attr == &dev_attr_fnode_tcp_nagle_disable.attr) { + param = ISCSI_FLASHNODE_TCP_NAGLE_DISABLE; + } else if (attr == &dev_attr_fnode_tcp_wsf_disable.attr) { + param = ISCSI_FLASHNODE_TCP_WSF_DISABLE; + } else if (attr == &dev_attr_fnode_tcp_timer_scale.attr) { + param = ISCSI_FLASHNODE_TCP_TIMER_SCALE; + } else if (attr == &dev_attr_fnode_tcp_timestamp_enable.attr) { + param = ISCSI_FLASHNODE_TCP_TIMESTAMP_EN; + } else if (attr == &dev_attr_fnode_fragment_disable.attr) { + param = ISCSI_FLASHNODE_IP_FRAG_DISABLE; + } else if (attr == &dev_attr_fnode_max_recv_dlength.attr) { + param = ISCSI_FLASHNODE_MAX_RECV_DLENGTH; + } else if (attr == &dev_attr_fnode_max_xmit_dlength.attr) { + param = ISCSI_FLASHNODE_MAX_XMIT_DLENGTH; + } else if (attr == &dev_attr_fnode_keepalive_tmo.attr) { + param = ISCSI_FLASHNODE_KEEPALIVE_TMO; + } else if (attr == &dev_attr_fnode_port.attr) { + param = ISCSI_FLASHNODE_PORT; + } else if (attr == &dev_attr_fnode_ipaddress.attr) { + param = ISCSI_FLASHNODE_IPADDR; + } else if (attr == &dev_attr_fnode_redirect_ipaddr.attr) { + param = ISCSI_FLASHNODE_REDIRECT_IPADDR; + } else if (attr == &dev_attr_fnode_max_segment_size.attr) { + param = ISCSI_FLASHNODE_MAX_SEGMENT_SIZE; + } else if (attr == &dev_attr_fnode_local_port.attr) { + param = ISCSI_FLASHNODE_LOCAL_PORT; + } else if (attr == &dev_attr_fnode_ipv4_tos.attr) { + param = ISCSI_FLASHNODE_IPV4_TOS; + } else if (attr == &dev_attr_fnode_ipv6_traffic_class.attr) { + param = ISCSI_FLASHNODE_IPV6_TC; + } else if (attr == &dev_attr_fnode_ipv6_flow_label.attr) { + param = ISCSI_FLASHNODE_IPV6_FLOW_LABEL; + } else if (attr == &dev_attr_fnode_link_local_ipv6.attr) { + param = ISCSI_FLASHNODE_LINK_LOCAL_IPV6; + } else if (attr == &dev_attr_fnode_tcp_xmit_wsf.attr) { + param = ISCSI_FLASHNODE_TCP_XMIT_WSF; + } else if (attr == &dev_attr_fnode_tcp_recv_wsf.attr) { + param = ISCSI_FLASHNODE_TCP_RECV_WSF; + } else if (attr == &dev_attr_fnode_statsn.attr) { + param = ISCSI_FLASHNODE_STATSN; + } else if (attr == &dev_attr_fnode_exp_statsn.attr) { + param = ISCSI_FLASHNODE_EXP_STATSN; + } else { + WARN_ONCE(1, "Invalid flashnode connection attr"); + return 0; + } + + return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param); +} + +static struct attribute_group iscsi_flashnode_conn_attr_group = { + .attrs = iscsi_flashnode_conn_attrs, + .is_visible = iscsi_flashnode_conn_attr_is_visible, +}; + +static const struct attribute_group *iscsi_flashnode_conn_attr_groups[] = { + &iscsi_flashnode_conn_attr_group, + NULL, +}; + +static void iscsi_flashnode_conn_release(struct device *dev) +{ + struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev); + + kfree(fnode_conn->ipaddress); + kfree(fnode_conn->redirect_ipaddr); + kfree(fnode_conn->link_local_ipv6_addr); + kfree(fnode_conn); +} + +struct device_type iscsi_flashnode_conn_dev_type = { + .name = "iscsi_flashnode_conn_dev_type", + .groups = iscsi_flashnode_conn_attr_groups, + .release = iscsi_flashnode_conn_release, +}; + +struct bus_type iscsi_flashnode_bus; + +int iscsi_flashnode_bus_match(struct device *dev, + struct device_driver *drv) +{ + if (dev->bus == &iscsi_flashnode_bus) + return 1; + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match); + +struct bus_type iscsi_flashnode_bus = { + .name = "iscsi_flashnode", + .match = &iscsi_flashnode_bus_match, +}; + +/** + * iscsi_create_flashnode_sess - Add flashnode session entry in sysfs + * @shost: pointer to host data + * @index: index of flashnode to add in sysfs + * @transport: pointer to transport data + * @dd_size: total size to allocate + * + * Adds a sysfs entry for the flashnode session attributes + * + * Returns: + * pointer to allocated flashnode sess on sucess + * %NULL on failure + */ +struct iscsi_bus_flash_session * +iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index, + struct iscsi_transport *transport, + int dd_size) +{ + struct iscsi_bus_flash_session *fnode_sess; + int err; + + fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL); + if (!fnode_sess) + return NULL; + + fnode_sess->transport = transport; + fnode_sess->target_id = index; + fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type; + fnode_sess->dev.bus = &iscsi_flashnode_bus; + fnode_sess->dev.parent = &shost->shost_gendev; + dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u", + shost->host_no, index); + + err = device_register(&fnode_sess->dev); + if (err) + goto free_fnode_sess; + + if (dd_size) + fnode_sess->dd_data = &fnode_sess[1]; + + return fnode_sess; + +free_fnode_sess: + kfree(fnode_sess); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess); + +/** + * iscsi_create_flashnode_conn - Add flashnode conn entry in sysfs + * @shost: pointer to host data + * @fnode_sess: pointer to the parent flashnode session entry + * @transport: pointer to transport data + * @dd_size: total size to allocate + * + * Adds a sysfs entry for the flashnode connection attributes + * + * Returns: + * pointer to allocated flashnode conn on success + * %NULL on failure + */ +struct iscsi_bus_flash_conn * +iscsi_create_flashnode_conn(struct Scsi_Host *shost, + struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_transport *transport, + int dd_size) +{ + struct iscsi_bus_flash_conn *fnode_conn; + int err; + + fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL); + if (!fnode_conn) + return NULL; + + fnode_conn->transport = transport; + fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type; + fnode_conn->dev.bus = &iscsi_flashnode_bus; + fnode_conn->dev.parent = &fnode_sess->dev; + dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0", + shost->host_no, fnode_sess->target_id); + + err = device_register(&fnode_conn->dev); + if (err) + goto free_fnode_conn; + + if (dd_size) + fnode_conn->dd_data = &fnode_conn[1]; + + return fnode_conn; + +free_fnode_conn: + kfree(fnode_conn); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn); + +/** + * iscsi_is_flashnode_conn_dev - verify passed device is to be flashnode conn + * @dev: device to verify + * @data: pointer to data containing value to use for verification + * + * Verifies if the passed device is flashnode conn device + * + * Returns: + * 1 on success + * 0 on failure + */ +int iscsi_is_flashnode_conn_dev(struct device *dev, void *data) +{ + return dev->bus == &iscsi_flashnode_bus; +} +EXPORT_SYMBOL_GPL(iscsi_is_flashnode_conn_dev); + +static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn) +{ + device_unregister(&fnode_conn->dev); + return 0; +} + +static int flashnode_match_index(struct device *dev, void *data) +{ + struct iscsi_bus_flash_session *fnode_sess = NULL; + int ret = 0; + + if (!iscsi_flashnode_bus_match(dev, NULL)) + goto exit_match_index; + + fnode_sess = iscsi_dev_to_flash_session(dev); + ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0; + +exit_match_index: + return ret; +} + +/** + * iscsi_get_flashnode_by_index -finds flashnode session entry by index + * @shost: pointer to host data + * @data: pointer to data containing value to use for comparison + * @fn: function pointer that does actual comparison + * + * Finds the flashnode session object for the passed index + * + * Returns: + * pointer to found flashnode session object on success + * %NULL on failure + */ +static struct iscsi_bus_flash_session * +iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data, + int (*fn)(struct device *dev, void *data)) +{ + struct iscsi_bus_flash_session *fnode_sess = NULL; + struct device *dev; + + dev = device_find_child(&shost->shost_gendev, data, fn); + if (dev) + fnode_sess = iscsi_dev_to_flash_session(dev); + + return fnode_sess; +} + +/** + * iscsi_find_flashnode_sess - finds flashnode session entry + * @shost: pointer to host data + * @data: pointer to data containing value to use for comparison + * @fn: function pointer that does actual comparison + * + * Finds the flashnode session object comparing the data passed using logic + * defined in passed function pointer + * + * Returns: + * pointer to found flashnode session device object on success + * %NULL on failure + */ +struct device * +iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, + int (*fn)(struct device *dev, void *data)) +{ + struct device *dev; + + dev = device_find_child(&shost->shost_gendev, data, fn); + return dev; +} +EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); + +/** + * iscsi_find_flashnode_conn - finds flashnode connection entry + * @fnode_sess: pointer to parent flashnode session entry + * @data: pointer to data containing value to use for comparison + * @fn: function pointer that does actual comparison + * + * Finds the flashnode connection object comparing the data passed using logic + * defined in passed function pointer + * + * Returns: + * pointer to found flashnode connection device object on success + * %NULL on failure + */ +struct device * +iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess, + void *data, + int (*fn)(struct device *dev, void *data)) +{ + struct device *dev; + + dev = device_find_child(&fnode_sess->dev, data, fn); + return dev; +} +EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn); + +static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data) +{ + if (!iscsi_is_flashnode_conn_dev(dev, NULL)) + return 0; + + return iscsi_destroy_flashnode_conn(iscsi_dev_to_flash_conn(dev)); +} + +/** + * iscsi_destroy_flashnode_sess - destory flashnode session entry + * @fnode_sess: pointer to flashnode session entry to be destroyed + * + * Deletes the flashnode session entry and all children flashnode connection + * entries from sysfs + */ +void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess) +{ + int err; + + err = device_for_each_child(&fnode_sess->dev, NULL, + iscsi_iter_destroy_flashnode_conn_fn); + if (err) + pr_err("Could not delete all connections for %s. Error %d.\n", + fnode_sess->dev.kobj.name, err); + + device_unregister(&fnode_sess->dev); +} +EXPORT_SYMBOL_GPL(iscsi_destroy_flashnode_sess); + +static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data) +{ + if (!iscsi_flashnode_bus_match(dev, NULL)) + return 0; + + iscsi_destroy_flashnode_sess(iscsi_dev_to_flash_session(dev)); + return 0; +} + +/** + * iscsi_destroy_all_flashnode - destory all flashnode session entries + * @shost: pointer to host data + * + * Destroys all the flashnode session entries and all corresponding children + * flashnode connection entries from sysfs + */ +void iscsi_destroy_all_flashnode(struct Scsi_Host *shost) +{ + device_for_each_child(&shost->shost_gendev, NULL, + iscsi_iter_destroy_flashnode_fn); +} +EXPORT_SYMBOL_GPL(iscsi_destroy_all_flashnode); + +/* * BSG support */ /** @@@ -2028,8 -1344,8 +2028,8 @@@ int iscsi_recv_pdu(struct iscsi_cls_con struct iscsi_uevent *ev; char *pdu; struct iscsi_internal *priv; - int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) + - data_size); + int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_hdr) + + data_size);
priv = iscsi_if_transport_lookup(conn->transport); if (!priv) @@@ -2044,7 -1360,7 +2044,7 @@@ }
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); memset(ev, 0, sizeof(*ev)); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_RECV_PDU; @@@ -2065,7 -1381,7 +2065,7 @@@ int iscsi_offload_mesg(struct Scsi_Hos struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; - int len = NLMSG_SPACE(sizeof(*ev) + data_size); + int len = nlmsg_total_size(sizeof(*ev) + data_size);
skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { @@@ -2074,7 -1390,7 +2074,7 @@@ }
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); memset(ev, 0, sizeof(*ev)); ev->type = type; ev->transport_handle = iscsi_handle(transport); @@@ -2099,7 -1415,7 +2099,7 @@@ void iscsi_conn_error_event(struct iscs struct sk_buff *skb; struct iscsi_uevent *ev; struct iscsi_internal *priv; - int len = NLMSG_SPACE(sizeof(*ev)); + int len = nlmsg_total_size(sizeof(*ev));
priv = iscsi_if_transport_lookup(conn->transport); if (!priv) @@@ -2113,7 -1429,7 +2113,7 @@@ }
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_ERROR; ev->r.connerror.error = error; @@@ -2134,7 -1450,7 +2134,7 @@@ void iscsi_conn_login_event(struct iscs struct sk_buff *skb; struct iscsi_uevent *ev; struct iscsi_internal *priv; - int len = NLMSG_SPACE(sizeof(*ev)); + int len = nlmsg_total_size(sizeof(*ev));
priv = iscsi_if_transport_lookup(conn->transport); if (!priv) @@@ -2148,7 -1464,7 +2148,7 @@@ }
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE; ev->r.conn_login.state = state; @@@ -2168,7 -1484,7 +2168,7 @@@ void iscsi_post_host_event(uint32_t hos struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; - int len = NLMSG_SPACE(sizeof(*ev) + data_size); + int len = nlmsg_total_size(sizeof(*ev) + data_size);
skb = alloc_skb(len, GFP_NOIO); if (!skb) { @@@ -2178,7 -1494,7 +2178,7 @@@ }
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(transport); ev->type = ISCSI_KEVENT_HOST_EVENT; ev->r.host_event.host_no = host_no; @@@ -2199,7 -1515,7 +2199,7 @@@ void iscsi_ping_comp_event(uint32_t hos struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; - int len = NLMSG_SPACE(sizeof(*ev) + data_size); + int len = nlmsg_total_size(sizeof(*ev) + data_size);
skb = alloc_skb(len, GFP_NOIO); if (!skb) { @@@ -2208,7 -1524,7 +2208,7 @@@ }
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(transport); ev->type = ISCSI_KEVENT_PING_COMP; ev->r.ping_comp.host_no = host_no; @@@ -2227,7 -1543,7 +2227,7 @@@ iscsi_if_send_reply(uint32_t group, in { struct sk_buff *skb; struct nlmsghdr *nlh; - int len = NLMSG_SPACE(size); + int len = nlmsg_total_size(size); int flags = multi ? NLM_F_MULTI : 0; int t = done ? NLMSG_DONE : type;
@@@ -2239,24 -1555,24 +2239,24 @@@
nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); nlh->nlmsg_flags = flags; - memcpy(NLMSG_DATA(nlh), payload, size); + memcpy(nlmsg_data(nlh), payload, size); return iscsi_multicast_skb(skb, group, GFP_ATOMIC); }
static int iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) { - struct iscsi_uevent *ev = NLMSG_DATA(nlh); + struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_stats *stats; struct sk_buff *skbstat; struct iscsi_cls_conn *conn; struct nlmsghdr *nlhstat; struct iscsi_uevent *evstat; struct iscsi_internal *priv; - int len = NLMSG_SPACE(sizeof(*ev) + - sizeof(struct iscsi_stats) + - sizeof(struct iscsi_stats_custom) * - ISCSI_STATS_CUSTOM_MAX); + int len = nlmsg_total_size(sizeof(*ev) + + sizeof(struct iscsi_stats) + + sizeof(struct iscsi_stats_custom) * + ISCSI_STATS_CUSTOM_MAX); int err = 0;
priv = iscsi_if_transport_lookup(transport); @@@ -2279,7 -1595,7 +2279,7 @@@
nlhstat = __nlmsg_put(skbstat, 0, 0, 0, (len - sizeof(*nlhstat)), 0); - evstat = NLMSG_DATA(nlhstat); + evstat = nlmsg_data(nlhstat); memset(evstat, 0, sizeof(*evstat)); evstat->transport_handle = iscsi_handle(conn->transport); evstat->type = nlh->nlmsg_type; @@@ -2292,12 -1608,12 +2292,12 @@@ memset(stats, 0, sizeof(*stats));
transport->get_stats(conn, stats); - actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) + - sizeof(struct iscsi_stats) + - sizeof(struct iscsi_stats_custom) * - stats->custom_length); + actual_size = nlmsg_total_size(sizeof(struct iscsi_uevent) + + sizeof(struct iscsi_stats) + + sizeof(struct iscsi_stats_custom) * + stats->custom_length); actual_size -= sizeof(*nlhstat); - actual_size = NLMSG_LENGTH(actual_size); + actual_size = nlmsg_msg_size(actual_size); skb_trim(skbstat, NLMSG_ALIGN(actual_size)); nlhstat->nlmsg_len = actual_size;
@@@ -2321,7 -1637,7 +2321,7 @@@ int iscsi_session_event(struct iscsi_cl struct iscsi_uevent *ev; struct sk_buff *skb; struct nlmsghdr *nlh; - int rc, len = NLMSG_SPACE(sizeof(*ev)); + int rc, len = nlmsg_total_size(sizeof(*ev));
priv = iscsi_if_transport_lookup(session->transport); if (!priv) @@@ -2337,7 -1653,7 +2337,7 @@@ }
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(session->transport);
ev->type = event; @@@ -2689,7 -2005,7 +2689,7 @@@ iscsi_send_ping(struct iscsi_transport static int iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh) { - struct iscsi_uevent *ev = NLMSG_DATA(nlh); + struct iscsi_uevent *ev = nlmsg_data(nlh); struct Scsi_Host *shost = NULL; struct iscsi_chap_rec *chap_rec; struct iscsi_internal *priv; @@@ -2708,7 -2024,7 +2708,7 @@@ return -EINVAL;
chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec)); - len = NLMSG_SPACE(sizeof(*ev) + chap_buf_size); + len = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
shost = scsi_host_lookup(ev->u.get_chap.host_no); if (!shost) { @@@ -2729,7 -2045,7 +2729,7 @@@
nlhchap = __nlmsg_put(skbchap, 0, 0, 0, (len - sizeof(*nlhchap)), 0); - evchap = NLMSG_DATA(nlhchap); + evchap = nlmsg_data(nlhchap); memset(evchap, 0, sizeof(*evchap)); evchap->transport_handle = iscsi_handle(transport); evchap->type = nlh->nlmsg_type; @@@ -2742,7 -2058,7 +2742,7 @@@ err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx, &evchap->u.get_chap.num_entries, buf);
- actual_size = NLMSG_SPACE(sizeof(*ev) + chap_buf_size); + actual_size = nlmsg_total_size(sizeof(*ev) + chap_buf_size); skb_trim(skbchap, NLMSG_ALIGN(actual_size)); nlhchap->nlmsg_len = actual_size;
@@@ -2776,299 -2092,11 +2776,299 @@@ static int iscsi_delete_chap(struct isc return err; }
+static const struct { + enum iscsi_discovery_parent_type value; + char *name; +} iscsi_discovery_parent_names[] = { + {ISCSI_DISC_PARENT_UNKNOWN, "Unknown" }, + {ISCSI_DISC_PARENT_SENDTGT, "Sendtarget" }, + {ISCSI_DISC_PARENT_ISNS, "isns" }, +}; + +char *iscsi_get_discovery_parent_name(int parent_type) +{ + int i; + char *state = "Unknown!"; + + for (i = 0; i < ARRAY_SIZE(iscsi_discovery_parent_names); i++) { + if (iscsi_discovery_parent_names[i].value & parent_type) { + state = iscsi_discovery_parent_names[i].name; + break; + } + } + return state; +} +EXPORT_SYMBOL_GPL(iscsi_get_discovery_parent_name); + +static int iscsi_set_flashnode_param(struct iscsi_transport *transport, + struct iscsi_uevent *ev, uint32_t len) +{ + char *data = (char *)ev + sizeof(*ev); + struct Scsi_Host *shost; + struct iscsi_bus_flash_session *fnode_sess; + struct iscsi_bus_flash_conn *fnode_conn; + struct device *dev; + uint32_t *idx; + int err = 0; + + if (!transport->set_flashnode_param) { + err = -ENOSYS; + goto exit_set_fnode; + } + + shost = scsi_host_lookup(ev->u.set_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.set_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + idx = &ev->u.set_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx, + flashnode_match_index); + if (!fnode_sess) { + pr_err("%s could not find flashnode %u for host no %u\n", + __func__, *idx, ev->u.set_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + dev = iscsi_find_flashnode_conn(fnode_sess, NULL, + iscsi_is_flashnode_conn_dev); + if (!dev) { + err = -ENODEV; + goto put_host; + } + + fnode_conn = iscsi_dev_to_flash_conn(dev); + err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len); + +put_host: + scsi_host_put(shost); + +exit_set_fnode: + return err; +} + +static int iscsi_new_flashnode(struct iscsi_transport *transport, + struct iscsi_uevent *ev, uint32_t len) +{ + char *data = (char *)ev + sizeof(*ev); + struct Scsi_Host *shost; + int index; + int err = 0; + + if (!transport->new_flashnode) { + err = -ENOSYS; + goto exit_new_fnode; + } + + shost = scsi_host_lookup(ev->u.new_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.new_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + index = transport->new_flashnode(shost, data, len); + + if (index >= 0) + ev->r.new_flashnode_ret.flashnode_idx = index; + else + err = -EIO; + +put_host: + scsi_host_put(shost); + +exit_new_fnode: + return err; +} + +static int iscsi_del_flashnode(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_bus_flash_session *fnode_sess; + uint32_t *idx; + int err = 0; + + if (!transport->del_flashnode) { + err = -ENOSYS; + goto exit_del_fnode; + } + + shost = scsi_host_lookup(ev->u.del_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.del_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + idx = &ev->u.del_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx, + flashnode_match_index); + if (!fnode_sess) { + pr_err("%s could not find flashnode %u for host no %u\n", + __func__, *idx, ev->u.del_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + err = transport->del_flashnode(fnode_sess); + +put_host: + scsi_host_put(shost); + +exit_del_fnode: + return err; +} + +static int iscsi_login_flashnode(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_bus_flash_session *fnode_sess; + struct iscsi_bus_flash_conn *fnode_conn; + struct device *dev; + uint32_t *idx; + int err = 0; + + if (!transport->login_flashnode) { + err = -ENOSYS; + goto exit_login_fnode; + } + + shost = scsi_host_lookup(ev->u.login_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.login_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + idx = &ev->u.login_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx, + flashnode_match_index); + if (!fnode_sess) { + pr_err("%s could not find flashnode %u for host no %u\n", + __func__, *idx, ev->u.login_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + dev = iscsi_find_flashnode_conn(fnode_sess, NULL, + iscsi_is_flashnode_conn_dev); + if (!dev) { + err = -ENODEV; + goto put_host; + } + + fnode_conn = iscsi_dev_to_flash_conn(dev); + err = transport->login_flashnode(fnode_sess, fnode_conn); + +put_host: + scsi_host_put(shost); + +exit_login_fnode: + return err; +} + +static int iscsi_logout_flashnode(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_bus_flash_session *fnode_sess; + struct iscsi_bus_flash_conn *fnode_conn; + struct device *dev; + uint32_t *idx; + int err = 0; + + if (!transport->logout_flashnode) { + err = -ENOSYS; + goto exit_logout_fnode; + } + + shost = scsi_host_lookup(ev->u.logout_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.logout_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + idx = &ev->u.logout_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx, + flashnode_match_index); + if (!fnode_sess) { + pr_err("%s could not find flashnode %u for host no %u\n", + __func__, *idx, ev->u.logout_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + dev = iscsi_find_flashnode_conn(fnode_sess, NULL, + iscsi_is_flashnode_conn_dev); + if (!dev) { + err = -ENODEV; + goto put_host; + } + + fnode_conn = iscsi_dev_to_flash_conn(dev); + + err = transport->logout_flashnode(fnode_sess, fnode_conn); + +put_host: + scsi_host_put(shost); + +exit_logout_fnode: + return err; +} + +static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_cls_session *session; + int err = 0; + + if (!transport->logout_flashnode_sid) { + err = -ENOSYS; + goto exit_logout_sid; + } + + shost = scsi_host_lookup(ev->u.logout_flashnode_sid.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.logout_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid); + if (!session) { + pr_err("%s could not find session id %u\n", + __func__, ev->u.logout_flashnode_sid.sid); + err = -EINVAL; + goto put_host; + } + + err = transport->logout_flashnode_sid(session); + +put_host: + scsi_host_put(shost); + +exit_logout_sid: + return err; +} + static int iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; - struct iscsi_uevent *ev = NLMSG_DATA(nlh); + struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; struct iscsi_cls_session *session; @@@ -3218,27 -2246,6 +3218,27 @@@ case ISCSI_UEVENT_DELETE_CHAP: err = iscsi_delete_chap(transport, ev); break; + case ISCSI_UEVENT_SET_FLASHNODE_PARAMS: + err = iscsi_set_flashnode_param(transport, ev, + nlmsg_attrlen(nlh, + sizeof(*ev))); + break; + case ISCSI_UEVENT_NEW_FLASHNODE: + err = iscsi_new_flashnode(transport, ev, + nlmsg_attrlen(nlh, sizeof(*ev))); + break; + case ISCSI_UEVENT_DEL_FLASHNODE: + err = iscsi_del_flashnode(transport, ev); + break; + case ISCSI_UEVENT_LOGIN_FLASHNODE: + err = iscsi_login_flashnode(transport, ev); + break; + case ISCSI_UEVENT_LOGOUT_FLASHNODE: + err = iscsi_logout_flashnode(transport, ev); + break; + case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID: + err = iscsi_logout_flashnode_sid(transport, ev); + break; default: err = -ENOSYS; break; @@@ -3256,7 -2263,7 +3256,7 @@@ static voi iscsi_if_rx(struct sk_buff *skb) { mutex_lock(&rx_queue_mutex); - while (skb->len >= NLMSG_SPACE(0)) { + while (skb->len >= NLMSG_HDRLEN) { int err; uint32_t rlen; struct nlmsghdr *nlh; @@@ -3269,7 -2276,7 +3269,7 @@@ break; }
- ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); rlen = NLMSG_ALIGN(nlh->nlmsg_len); if (rlen > skb->len) rlen = skb->len; @@@ -3974,14 -2981,10 +3974,14 @@@ static __init int iscsi_transport_init( if (err) goto unregister_conn_class;
+ err = bus_register(&iscsi_flashnode_bus); + if (err) + goto unregister_session_class; + nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg); if (!nls) { err = -ENOBUFS; - goto unregister_session_class; + goto unregister_flashnode_bus; }
iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh"); @@@ -3992,8 -2995,6 +3992,8 @@@
release_nls: netlink_kernel_release(nls); +unregister_flashnode_bus: + bus_unregister(&iscsi_flashnode_bus); unregister_session_class: transport_class_unregister(&iscsi_session_class); unregister_conn_class: @@@ -4013,7 -3014,6 +4013,7 @@@ static void __exit iscsi_transport_exit { destroy_workqueue(iscsi_eh_timer_workq); netlink_kernel_release(nls); + bus_unregister(&iscsi_flashnode_bus); transport_class_unregister(&iscsi_connection_class); transport_class_unregister(&iscsi_session_class); transport_class_unregister(&iscsi_host_class); diff --combined drivers/ssb/driver_chipcommon_pmu.c index 7b0bce9,791da2c..23c5dbf --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@@ -110,8 -110,8 +110,8 @@@ static void ssb_pmu0_pllinit_r0(struct return; }
- ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n", - (crystalfreq / 1000), (crystalfreq % 1000)); + ssb_info("Programming PLL to %u.%03u MHz\n", + crystalfreq / 1000, crystalfreq % 1000);
/* First turn the PLL off. */ switch (bus->chip_id) { @@@ -138,7 -138,7 +138,7 @@@ } tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT) - ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n"); + ssb_emerg("Failed to turn the PLL off!\n");
/* Set PDIV in PLL control 0. */ pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0); @@@ -249,8 -249,8 +249,8 @@@ static void ssb_pmu1_pllinit_r0(struct return; }
- ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n", - (crystalfreq / 1000), (crystalfreq % 1000)); + ssb_info("Programming PLL to %u.%03u MHz\n", + crystalfreq / 1000, crystalfreq % 1000);
/* First turn the PLL off. */ switch (bus->chip_id) { @@@ -275,7 -275,7 +275,7 @@@ } tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT) - ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n"); + ssb_emerg("Failed to turn the PLL off!\n");
/* Set p1div and p2div. */ pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0); @@@ -349,9 -349,8 +349,8 @@@ static void ssb_pmu_pll_init(struct ssb case 43222: break; default: - ssb_printk(KERN_ERR PFX - "ERROR: PLL init unknown for device %04X\n", - bus->chip_id); + ssb_err("ERROR: PLL init unknown for device %04X\n", + bus->chip_id); } }
@@@ -472,9 -471,8 +471,8 @@@ static void ssb_pmu_resources_init(stru max_msk = 0xFFFFF; break; default: - ssb_printk(KERN_ERR PFX - "ERROR: PMU resource config unknown for device %04X\n", - bus->chip_id); + ssb_err("ERROR: PMU resource config unknown for device %04X\n", + bus->chip_id); }
if (updown_tab) { @@@ -526,8 -524,8 +524,8 @@@ void ssb_pmu_init(struct ssb_chipcommo pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP); cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION);
- ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n", - cc->pmu.rev, pmucap); + ssb_dbg("Found rev %u PMU (capabilities 0x%08X)\n", + cc->pmu.rev, pmucap);
if (cc->pmu.rev == 1) chipco_mask32(cc, SSB_CHIPCO_PMU_CTL, @@@ -638,9 -636,8 +636,8 @@@ u32 ssb_pmu_get_alp_clock(struct ssb_ch case 0x5354: ssb_pmu_get_alp_clock_clk0(cc); default: - ssb_printk(KERN_ERR PFX - "ERROR: PMU alp clock unknown for device %04X\n", - bus->chip_id); + ssb_err("ERROR: PMU alp clock unknown for device %04X\n", + bus->chip_id); return 0; } } @@@ -654,9 -651,8 +651,8 @@@ u32 ssb_pmu_get_cpu_clock(struct ssb_ch /* 5354 chip uses a non programmable PLL of frequency 240MHz */ return 240000000; default: - ssb_printk(KERN_ERR PFX - "ERROR: PMU cpu clock unknown for device %04X\n", - bus->chip_id); + ssb_err("ERROR: PMU cpu clock unknown for device %04X\n", + bus->chip_id); return 0; } } @@@ -669,38 -665,8 +665,37 @@@ u32 ssb_pmu_get_controlclock(struct ssb case 0x5354: return 120000000; default: - ssb_printk(KERN_ERR PFX - "ERROR: PMU controlclock unknown for device %04X\n", - bus->chip_id); + ssb_err("ERROR: PMU controlclock unknown for device %04X\n", + bus->chip_id); return 0; } } + +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid) +{ + u32 pmu_ctl = 0; + + switch (cc->dev->bus->chip_id) { + case 0x4322: + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070); + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a); + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854); + if (spuravoid == 1) + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828); + else + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828); + pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD; + break; + case 43222: + /* TODO: BCM43222 requires updating PLLs too */ + return; + default: + ssb_printk(KERN_ERR PFX + "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n", + cc->dev->bus->chip_id); + return; + } + + chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl); +} +EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate); diff --combined include/net/bluetooth/bluetooth.h index ea81f0e,ed6e955..4c0c7b0 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@@ -226,12 -226,13 +226,12 @@@ struct bt_sock_list struct hlist_head head; rwlock_t lock; #ifdef CONFIG_PROC_FS - struct file_operations fops; int (* custom_seq_show)(struct seq_file *, void *); #endif };
int bt_sock_register(int proto, const struct net_proto_family *ops); - int bt_sock_unregister(int proto); + void bt_sock_unregister(int proto); void bt_sock_link(struct bt_sock_list *l, struct sock *s); void bt_sock_unlink(struct bt_sock_list *l, struct sock *s); int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, @@@ -259,12 -260,22 +259,22 @@@ struct l2cap_ctrl __u8 retries; };
+ struct hci_dev; + + typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status); + + struct hci_req_ctrl { + bool start; + hci_req_complete_t complete; + }; + struct bt_skb_cb { __u8 pkt_type; __u8 incoming; __u16 expect; __u8 force_active; struct l2cap_ctrl control; + struct hci_req_ctrl req; }; #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
@@@ -318,7 -329,7 +328,7 @@@ extern void hci_sock_cleanup(void) extern int bt_sysfs_init(void); extern void bt_sysfs_cleanup(void);
-extern int bt_procfs_init(struct module* module, struct net *net, const char *name, +extern int bt_procfs_init(struct net *net, const char *name, struct bt_sock_list* sk_list, int (* seq_show)(struct seq_file *, void *)); extern void bt_procfs_cleanup(struct net *net, const char *name); diff --combined include/net/scm.h index b117081,5a4c6a9..8de2d37 --- a/include/net/scm.h +++ b/include/net/scm.h @@@ -26,7 -26,6 +26,6 @@@ struct scm_fp_list
struct scm_cookie { struct pid *pid; /* Skb credentials */ - const struct cred *cred; struct scm_fp_list *fp; /* Passed files */ struct scm_creds creds; /* Skb credentials */ #ifdef CONFIG_SECURITY_NETWORK @@@ -51,23 -50,18 +50,18 @@@ static __inline__ void unix_get_peersec #endif /* CONFIG_SECURITY_NETWORK */
static __inline__ void scm_set_cred(struct scm_cookie *scm, - struct pid *pid, const struct cred *cred) + struct pid *pid, kuid_t uid, kgid_t gid) { scm->pid = get_pid(pid); - scm->cred = cred ? get_cred(cred) : NULL; scm->creds.pid = pid_vnr(pid); - scm->creds.uid = cred ? cred->uid : INVALID_UID; - scm->creds.gid = cred ? cred->gid : INVALID_GID; + scm->creds.uid = uid; + scm->creds.gid = gid; }
static __inline__ void scm_destroy_cred(struct scm_cookie *scm) { put_pid(scm->pid); scm->pid = NULL; - - if (scm->cred) - put_cred(scm->cred); - scm->cred = NULL; }
static __inline__ void scm_destroy(struct scm_cookie *scm) @@@ -81,8 -75,10 +75,10 @@@ static __inline__ int scm_send(struct s struct scm_cookie *scm, bool forcecreds) { memset(scm, 0, sizeof(*scm)); + scm->creds.uid = INVALID_UID; + scm->creds.gid = INVALID_GID; if (forcecreds) - scm_set_cred(scm, task_tgid(current), current_cred()); - scm_set_cred(scm, task_tgid(current), current_euid(), current_egid()); ++ scm_set_cred(scm, task_tgid(current), current_uid(), current_gid()); unix_get_peersec_dgram(sock, scm); if (msg->msg_controllen <= 0) return 0; diff --combined kernel/signal.c index 598dc06,497330e..06ff776 --- a/kernel/signal.c +++ b/kernel/signal.c @@@ -32,6 -32,7 +32,7 @@@ #include <linux/user_namespace.h> #include <linux/uprobes.h> #include <linux/compat.h> + #include <linux/cn_proc.h> #define CREATE_TRACE_POINTS #include <trace/events/signal.h>
@@@ -2350,6 -2351,7 +2351,7 @@@ relock if (sig_kernel_coredump(signr)) { if (print_fatal_signals) print_fatal_signal(info->si_signo); + proc_coredump_connector(current); /* * If it was able to dump core, this kills all * other threads in the group and synchronizes with @@@ -2948,7 -2950,7 +2950,7 @@@ do_send_specific(pid_t tgid, pid_t pid
static int do_tkill(pid_t tgid, pid_t pid, int sig) { - struct siginfo info; + struct siginfo info = {};
info.si_signo = sig; info.si_errno = 0; diff --combined net/batman-adv/main.c index fa563e4,6277735..790e917 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@@ -35,6 -35,7 +35,7 @@@ #include "vis.h" #include "hash.h" #include "bat_algo.h" + #include "network-coding.h"
/* List manipulations on hardif_list have to be rtnl_lock()'ed, @@@ -70,6 -71,7 +71,7 @@@ static int __init batadv_init(void batadv_debugfs_init();
register_netdevice_notifier(&batadv_hard_if_notifier); + rtnl_link_register(&batadv_link_ops);
pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n", BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION); @@@ -80,6 -82,7 +82,7 @@@ static void __exit batadv_exit(void) { batadv_debugfs_destroy(); + rtnl_link_unregister(&batadv_link_ops); unregister_netdevice_notifier(&batadv_hard_if_notifier); batadv_hardif_remove_interfaces();
@@@ -135,6 -138,10 +138,10 @@@ int batadv_mesh_init(struct net_device if (ret < 0) goto err;
+ ret = batadv_nc_init(bat_priv); + if (ret < 0) + goto err; + atomic_set(&bat_priv->gw.reselect, 0); atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
@@@ -157,6 -164,7 +164,7 @@@ void batadv_mesh_free(struct net_devic
batadv_gw_node_purge(bat_priv); batadv_originator_free(bat_priv); + batadv_nc_free(bat_priv);
batadv_tt_free(bat_priv);
@@@ -169,7 -177,7 +177,7 @@@ atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); }
-int batadv_is_my_mac(const uint8_t *addr) +int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr) { const struct batadv_hard_iface *hard_iface;
@@@ -178,9 -186,6 +186,9 @@@ if (hard_iface->if_status != BATADV_IF_ACTIVE) continue;
+ if (hard_iface->soft_iface != bat_priv->soft_iface) + continue; + if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { rcu_read_unlock(); return 1; @@@ -414,7 -419,7 +422,7 @@@ int batadv_algo_seq_print_text(struct s { struct batadv_algo_ops *bat_algo_ops;
- seq_printf(seq, "Available routing algorithms:\n"); + seq_puts(seq, "Available routing algorithms:\n");
hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) { seq_printf(seq, "%s\n", bat_algo_ops->name); diff --combined net/batman-adv/main.h index d40910d,f90f5bc..59a0d6a --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@@ -26,7 -26,7 +26,7 @@@ #define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION - #define BATADV_SOURCE_VERSION "2013.1.0" + #define BATADV_SOURCE_VERSION "2013.2.0" #endif
/* B.A.T.M.A.N. parameters */ @@@ -105,6 -105,8 +105,8 @@@ #define BATADV_RESET_PROTECTION_MS 30000 #define BATADV_EXPECTED_SEQNO_RANGE 65536
+ #define BATADV_NC_NODE_TIMEOUT 10000 /* Milliseconds */ + enum batadv_mesh_state { BATADV_MESH_INACTIVE, BATADV_MESH_ACTIVE, @@@ -150,6 -152,7 +152,7 @@@ enum batadv_uev_type #include <linux/percpu.h> #include <linux/slab.h> #include <net/sock.h> /* struct sock */ + #include <net/rtnetlink.h> #include <linux/jiffies.h> #include <linux/seq_file.h> #include "types.h" @@@ -162,7 -165,7 +165,7 @@@ extern struct workqueue_struct *batadv_
int batadv_mesh_init(struct net_device *soft_iface); void batadv_mesh_free(struct net_device *soft_iface); -int batadv_is_my_mac(const uint8_t *addr); +int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr); struct batadv_hard_iface * batadv_seq_print_text_primary_if_get(struct seq_file *seq); int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, @@@ -185,6 -188,7 +188,7 @@@ __be32 batadv_skb_crc32(struct sk_buff * @BATADV_DBG_TT: translation table messages * @BATADV_DBG_BLA: bridge loop avoidance messages * @BATADV_DBG_DAT: ARP snooping and DAT related messages + * @BATADV_DBG_NC: network coding related messages * @BATADV_DBG_ALL: the union of all the above log levels */ enum batadv_dbg_level { @@@ -193,7 -197,8 +197,8 @@@ BATADV_DBG_TT = BIT(2), BATADV_DBG_BLA = BIT(3), BATADV_DBG_DAT = BIT(4), - BATADV_DBG_ALL = 31, + BATADV_DBG_NC = BIT(5), + BATADV_DBG_ALL = 63, };
#ifdef CONFIG_BATMAN_ADV_DEBUG @@@ -298,4 -303,10 +303,10 @@@ static inline uint64_t batadv_sum_count return sum; }
+ /* Define a macro to reach the control buffer of the skb. The members of the + * control buffer are defined in struct batadv_skb_cb in types.h. + * The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h. + */ + #define BATADV_SKB_CB(__skb) ((struct batadv_skb_cb *)&((__skb)->cb[0])) + #endif /* _NET_BATMAN_ADV_MAIN_H_ */ diff --combined net/batman-adv/network-coding.c index 0000000,6b9a544..f7c5430 mode 000000,100644..100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@@ -1,0 -1,1821 +1,1822 @@@ + /* Copyright (C) 2012-2013 B.A.T.M.A.N. contributors: + * + * Martin Hundebøll, Jeppe Ledet-Pedersen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + */ + + #include <linux/debugfs.h> + + #include "main.h" + #include "hash.h" + #include "network-coding.h" + #include "send.h" + #include "originator.h" + #include "hard-interface.h" + #include "routing.h" + + static struct lock_class_key batadv_nc_coding_hash_lock_class_key; + static struct lock_class_key batadv_nc_decoding_hash_lock_class_key; + + static void batadv_nc_worker(struct work_struct *work); + static int batadv_nc_recv_coded_packet(struct sk_buff *skb, + struct batadv_hard_iface *recv_if); + + /** + * batadv_nc_start_timer - initialise the nc periodic worker + * @bat_priv: the bat priv with all the soft interface information + */ + static void batadv_nc_start_timer(struct batadv_priv *bat_priv) + { + queue_delayed_work(batadv_event_workqueue, &bat_priv->nc.work, + msecs_to_jiffies(10)); + } + + /** + * batadv_nc_init - initialise coding hash table and start house keeping + * @bat_priv: the bat priv with all the soft interface information + */ + int batadv_nc_init(struct batadv_priv *bat_priv) + { + bat_priv->nc.timestamp_fwd_flush = jiffies; + bat_priv->nc.timestamp_sniffed_purge = jiffies; + + if (bat_priv->nc.coding_hash || bat_priv->nc.decoding_hash) + return 0; + + bat_priv->nc.coding_hash = batadv_hash_new(128); + if (!bat_priv->nc.coding_hash) + goto err; + + batadv_hash_set_lock_class(bat_priv->nc.coding_hash, + &batadv_nc_coding_hash_lock_class_key); + + bat_priv->nc.decoding_hash = batadv_hash_new(128); + if (!bat_priv->nc.decoding_hash) + goto err; + + batadv_hash_set_lock_class(bat_priv->nc.coding_hash, + &batadv_nc_decoding_hash_lock_class_key); + + /* Register our packet type */ + if (batadv_recv_handler_register(BATADV_CODED, + batadv_nc_recv_coded_packet) < 0) + goto err; + + INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); + batadv_nc_start_timer(bat_priv); + + return 0; + + err: + return -ENOMEM; + } + + /** + * batadv_nc_init_bat_priv - initialise the nc specific bat_priv variables + * @bat_priv: the bat priv with all the soft interface information + */ + void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv) + { + atomic_set(&bat_priv->network_coding, 1); + bat_priv->nc.min_tq = 200; + bat_priv->nc.max_fwd_delay = 10; + bat_priv->nc.max_buffer_time = 200; + } + + /** + * batadv_nc_init_orig - initialise the nc fields of an orig_node + * @orig_node: the orig_node which is going to be initialised + */ + void batadv_nc_init_orig(struct batadv_orig_node *orig_node) + { + INIT_LIST_HEAD(&orig_node->in_coding_list); + INIT_LIST_HEAD(&orig_node->out_coding_list); + spin_lock_init(&orig_node->in_coding_list_lock); + spin_lock_init(&orig_node->out_coding_list_lock); + } + + /** + * batadv_nc_node_free_rcu - rcu callback to free an nc node and remove + * its refcount on the orig_node + * @rcu: rcu pointer of the nc node + */ + static void batadv_nc_node_free_rcu(struct rcu_head *rcu) + { + struct batadv_nc_node *nc_node; + + nc_node = container_of(rcu, struct batadv_nc_node, rcu); + batadv_orig_node_free_ref(nc_node->orig_node); + kfree(nc_node); + } + + /** + * batadv_nc_node_free_ref - decrements the nc node refcounter and possibly + * frees it + * @nc_node: the nc node to free + */ + static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node) + { + if (atomic_dec_and_test(&nc_node->refcount)) + call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu); + } + + /** + * batadv_nc_path_free_ref - decrements the nc path refcounter and possibly + * frees it + * @nc_path: the nc node to free + */ + static void batadv_nc_path_free_ref(struct batadv_nc_path *nc_path) + { + if (atomic_dec_and_test(&nc_path->refcount)) + kfree_rcu(nc_path, rcu); + } + + /** + * batadv_nc_packet_free - frees nc packet + * @nc_packet: the nc packet to free + */ + static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet) + { + if (nc_packet->skb) + kfree_skb(nc_packet->skb); + + batadv_nc_path_free_ref(nc_packet->nc_path); + kfree(nc_packet); + } + + /** + * batadv_nc_to_purge_nc_node - checks whether an nc node has to be purged + * @bat_priv: the bat priv with all the soft interface information + * @nc_node: the nc node to check + * + * Returns true if the entry has to be purged now, false otherwise + */ + static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv, + struct batadv_nc_node *nc_node) + { + if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) + return true; + + return batadv_has_timed_out(nc_node->last_seen, BATADV_NC_NODE_TIMEOUT); + } + + /** + * batadv_nc_to_purge_nc_path_coding - checks whether an nc path has timed out + * @bat_priv: the bat priv with all the soft interface information + * @nc_path: the nc path to check + * + * Returns true if the entry has to be purged now, false otherwise + */ + static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv, + struct batadv_nc_path *nc_path) + { + if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) + return true; + + /* purge the path when no packets has been added for 10 times the + * max_fwd_delay time + */ + return batadv_has_timed_out(nc_path->last_valid, + bat_priv->nc.max_fwd_delay * 10); + } + + /** + * batadv_nc_to_purge_nc_path_decoding - checks whether an nc path has timed out + * @bat_priv: the bat priv with all the soft interface information + * @nc_path: the nc path to check + * + * Returns true if the entry has to be purged now, false otherwise + */ + static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv, + struct batadv_nc_path *nc_path) + { + if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) + return true; + + /* purge the path when no packets has been added for 10 times the + * max_buffer time + */ + return batadv_has_timed_out(nc_path->last_valid, + bat_priv->nc.max_buffer_time*10); + } + + /** + * batadv_nc_purge_orig_nc_nodes - go through list of nc nodes and purge stale + * entries + * @bat_priv: the bat priv with all the soft interface information + * @list: list of nc nodes + * @lock: nc node list lock + * @to_purge: function in charge to decide whether an entry has to be purged or + * not. This function takes the nc node as argument and has to return + * a boolean value: true if the entry has to be deleted, false + * otherwise + */ + static void + batadv_nc_purge_orig_nc_nodes(struct batadv_priv *bat_priv, + struct list_head *list, + spinlock_t *lock, + bool (*to_purge)(struct batadv_priv *, + struct batadv_nc_node *)) + { + struct batadv_nc_node *nc_node, *nc_node_tmp; + + /* For each nc_node in list */ + spin_lock_bh(lock); + list_for_each_entry_safe(nc_node, nc_node_tmp, list, list) { + /* if an helper function has been passed as parameter, + * ask it if the entry has to be purged or not + */ + if (to_purge && !to_purge(bat_priv, nc_node)) + continue; + + batadv_dbg(BATADV_DBG_NC, bat_priv, + "Removing nc_node %pM -> %pM\n", + nc_node->addr, nc_node->orig_node->orig); + list_del_rcu(&nc_node->list); + batadv_nc_node_free_ref(nc_node); + } + spin_unlock_bh(lock); + } + + /** + * batadv_nc_purge_orig - purges all nc node data attached of the given + * originator + * @bat_priv: the bat priv with all the soft interface information + * @orig_node: orig_node with the nc node entries to be purged + * @to_purge: function in charge to decide whether an entry has to be purged or + * not. This function takes the nc node as argument and has to return + * a boolean value: true is the entry has to be deleted, false + * otherwise + */ + void batadv_nc_purge_orig(struct batadv_priv *bat_priv, + struct batadv_orig_node *orig_node, + bool (*to_purge)(struct batadv_priv *, + struct batadv_nc_node *)) + { + /* Check ingoing nc_node's of this orig_node */ + batadv_nc_purge_orig_nc_nodes(bat_priv, &orig_node->in_coding_list, + &orig_node->in_coding_list_lock, + to_purge); + + /* Check outgoing nc_node's of this orig_node */ + batadv_nc_purge_orig_nc_nodes(bat_priv, &orig_node->out_coding_list, + &orig_node->out_coding_list_lock, + to_purge); + } + + /** + * batadv_nc_purge_orig_hash - traverse entire originator hash to check if they + * have timed out nc nodes + * @bat_priv: the bat priv with all the soft interface information + */ + static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv) + { + struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *head; + struct batadv_orig_node *orig_node; + uint32_t i; + + if (!hash) + return; + + /* For each orig_node */ + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, head, hash_entry) + batadv_nc_purge_orig(bat_priv, orig_node, + batadv_nc_to_purge_nc_node); + rcu_read_unlock(); + } + } + + /** + * batadv_nc_purge_paths - traverse all nc paths part of the hash and remove + * unused ones + * @bat_priv: the bat priv with all the soft interface information + * @hash: hash table containing the nc paths to check + * @to_purge: function in charge to decide whether an entry has to be purged or + * not. This function takes the nc node as argument and has to return + * a boolean value: true is the entry has to be deleted, false + * otherwise + */ + static void batadv_nc_purge_paths(struct batadv_priv *bat_priv, + struct batadv_hashtable *hash, + bool (*to_purge)(struct batadv_priv *, + struct batadv_nc_path *)) + { + struct hlist_head *head; + struct hlist_node *node_tmp; + struct batadv_nc_path *nc_path; + spinlock_t *lock; /* Protects lists in hash */ + uint32_t i; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + lock = &hash->list_locks[i]; + + /* For each nc_path in this bin */ + spin_lock_bh(lock); + hlist_for_each_entry_safe(nc_path, node_tmp, head, hash_entry) { + /* if an helper function has been passed as parameter, + * ask it if the entry has to be purged or not + */ + if (to_purge && !to_purge(bat_priv, nc_path)) + continue; + + /* purging an non-empty nc_path should never happen, but + * is observed under high CPU load. Delay the purging + * until next iteration to allow the packet_list to be + * emptied first. + */ + if (!unlikely(list_empty(&nc_path->packet_list))) { + net_ratelimited_function(printk, + KERN_WARNING + "Skipping free of non-empty nc_path (%pM -> %pM)!\n", + nc_path->prev_hop, + nc_path->next_hop); + continue; + } + + /* nc_path is unused, so remove it */ + batadv_dbg(BATADV_DBG_NC, bat_priv, + "Remove nc_path %pM -> %pM\n", + nc_path->prev_hop, nc_path->next_hop); + hlist_del_rcu(&nc_path->hash_entry); + batadv_nc_path_free_ref(nc_path); + } + spin_unlock_bh(lock); + } + } + + /** + * batadv_nc_hash_key_gen - computes the nc_path hash key + * @key: buffer to hold the final hash key + * @src: source ethernet mac address going into the hash key + * @dst: destination ethernet mac address going into the hash key + */ + static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src, + const char *dst) + { + memcpy(key->prev_hop, src, sizeof(key->prev_hop)); + memcpy(key->next_hop, dst, sizeof(key->next_hop)); + } + + /** + * batadv_nc_hash_choose - compute the hash value for an nc path + * @data: data to hash + * @size: size of the hash table + * + * Returns the selected index in the hash table for the given data. + */ + static uint32_t batadv_nc_hash_choose(const void *data, uint32_t size) + { + const struct batadv_nc_path *nc_path = data; + uint32_t hash = 0; + + hash = batadv_hash_bytes(hash, &nc_path->prev_hop, + sizeof(nc_path->prev_hop)); + hash = batadv_hash_bytes(hash, &nc_path->next_hop, + sizeof(nc_path->next_hop)); + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash % size; + } + + /** + * batadv_nc_hash_compare - comparing function used in the network coding hash + * tables + * @node: node in the local table + * @data2: second object to compare the node to + * + * Returns 1 if the two entry are the same, 0 otherwise + */ + static int batadv_nc_hash_compare(const struct hlist_node *node, + const void *data2) + { + const struct batadv_nc_path *nc_path1, *nc_path2; + + nc_path1 = container_of(node, struct batadv_nc_path, hash_entry); + nc_path2 = data2; + + /* Return 1 if the two keys are identical */ + if (memcmp(nc_path1->prev_hop, nc_path2->prev_hop, + sizeof(nc_path1->prev_hop)) != 0) + return 0; + + if (memcmp(nc_path1->next_hop, nc_path2->next_hop, + sizeof(nc_path1->next_hop)) != 0) + return 0; + + return 1; + } + + /** + * batadv_nc_hash_find - search for an existing nc path and return it + * @hash: hash table containing the nc path + * @data: search key + * + * Returns the nc_path if found, NULL otherwise. + */ + static struct batadv_nc_path * + batadv_nc_hash_find(struct batadv_hashtable *hash, + void *data) + { + struct hlist_head *head; + struct batadv_nc_path *nc_path, *nc_path_tmp = NULL; + int index; + + if (!hash) + return NULL; + + index = batadv_nc_hash_choose(data, hash->size); + head = &hash->table[index]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(nc_path, head, hash_entry) { + if (!batadv_nc_hash_compare(&nc_path->hash_entry, data)) + continue; + + if (!atomic_inc_not_zero(&nc_path->refcount)) + continue; + + nc_path_tmp = nc_path; + break; + } + rcu_read_unlock(); + + return nc_path_tmp; + } + + /** + * batadv_nc_send_packet - send non-coded packet and free nc_packet struct + * @nc_packet: the nc packet to send + */ + static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet) + { + batadv_send_skb_packet(nc_packet->skb, + nc_packet->neigh_node->if_incoming, + nc_packet->nc_path->next_hop); + nc_packet->skb = NULL; + batadv_nc_packet_free(nc_packet); + } + + /** + * batadv_nc_sniffed_purge - Checks timestamp of given sniffed nc_packet. + * @bat_priv: the bat priv with all the soft interface information + * @nc_path: the nc path the packet belongs to + * @nc_packet: the nc packet to be checked + * + * Checks whether the given sniffed (overheard) nc_packet has hit its buffering + * timeout. If so, the packet is no longer kept and the entry deleted from the + * queue. Has to be called with the appropriate locks. + * + * Returns false as soon as the entry in the fifo queue has not been timed out + * yet and true otherwise. + */ + static bool batadv_nc_sniffed_purge(struct batadv_priv *bat_priv, + struct batadv_nc_path *nc_path, + struct batadv_nc_packet *nc_packet) + { + unsigned long timeout = bat_priv->nc.max_buffer_time; + bool res = false; + + /* Packets are added to tail, so the remaining packets did not time + * out and we can stop processing the current queue + */ + if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE && + !batadv_has_timed_out(nc_packet->timestamp, timeout)) + goto out; + + /* purge nc packet */ + list_del(&nc_packet->list); + batadv_nc_packet_free(nc_packet); + + res = true; + + out: + return res; + } + + /** + * batadv_nc_fwd_flush - Checks the timestamp of the given nc packet. + * @bat_priv: the bat priv with all the soft interface information + * @nc_path: the nc path the packet belongs to + * @nc_packet: the nc packet to be checked + * + * Checks whether the given nc packet has hit its forward timeout. If so, the + * packet is no longer delayed, immediately sent and the entry deleted from the + * queue. Has to be called with the appropriate locks. + * + * Returns false as soon as the entry in the fifo queue has not been timed out + * yet and true otherwise. + */ + static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv, + struct batadv_nc_path *nc_path, + struct batadv_nc_packet *nc_packet) + { + unsigned long timeout = bat_priv->nc.max_fwd_delay; + + /* Packets are added to tail, so the remaining packets did not time + * out and we can stop processing the current queue + */ + if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE && + !batadv_has_timed_out(nc_packet->timestamp, timeout)) + return false; + + /* Send packet */ + batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD); + batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES, + nc_packet->skb->len + ETH_HLEN); + list_del(&nc_packet->list); + batadv_nc_send_packet(nc_packet); + + return true; + } + + /** + * batadv_nc_process_nc_paths - traverse given nc packet pool and free timed out + * nc packets + * @bat_priv: the bat priv with all the soft interface information + * @hash: to be processed hash table + * @process_fn: Function called to process given nc packet. Should return true + * to encourage this function to proceed with the next packet. + * Otherwise the rest of the current queue is skipped. + */ + static void + batadv_nc_process_nc_paths(struct batadv_priv *bat_priv, + struct batadv_hashtable *hash, + bool (*process_fn)(struct batadv_priv *, + struct batadv_nc_path *, + struct batadv_nc_packet *)) + { + struct hlist_head *head; + struct batadv_nc_packet *nc_packet, *nc_packet_tmp; + struct batadv_nc_path *nc_path; + bool ret; + int i; + + if (!hash) + return; + + /* Loop hash table bins */ + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + /* Loop coding paths */ + rcu_read_lock(); + hlist_for_each_entry_rcu(nc_path, head, hash_entry) { + /* Loop packets */ + spin_lock_bh(&nc_path->packet_list_lock); + list_for_each_entry_safe(nc_packet, nc_packet_tmp, + &nc_path->packet_list, list) { + ret = process_fn(bat_priv, nc_path, nc_packet); + if (!ret) + break; + } + spin_unlock_bh(&nc_path->packet_list_lock); + } + rcu_read_unlock(); + } + } + + /** + * batadv_nc_worker - periodic task for house keeping related to network coding + * @work: kernel work struct + */ + static void batadv_nc_worker(struct work_struct *work) + { + struct delayed_work *delayed_work; + struct batadv_priv_nc *priv_nc; + struct batadv_priv *bat_priv; + unsigned long timeout; + + delayed_work = container_of(work, struct delayed_work, work); + priv_nc = container_of(delayed_work, struct batadv_priv_nc, work); + bat_priv = container_of(priv_nc, struct batadv_priv, nc); + + batadv_nc_purge_orig_hash(bat_priv); + batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, + batadv_nc_to_purge_nc_path_coding); + batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash, + batadv_nc_to_purge_nc_path_decoding); + + timeout = bat_priv->nc.max_fwd_delay; + + if (batadv_has_timed_out(bat_priv->nc.timestamp_fwd_flush, timeout)) { + batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.coding_hash, + batadv_nc_fwd_flush); + bat_priv->nc.timestamp_fwd_flush = jiffies; + } + + if (batadv_has_timed_out(bat_priv->nc.timestamp_sniffed_purge, + bat_priv->nc.max_buffer_time)) { + batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.decoding_hash, + batadv_nc_sniffed_purge); + bat_priv->nc.timestamp_sniffed_purge = jiffies; + } + + /* Schedule a new check */ + batadv_nc_start_timer(bat_priv); + } + + /** + * batadv_can_nc_with_orig - checks whether the given orig node is suitable for + * coding or not + * @bat_priv: the bat priv with all the soft interface information + * @orig_node: neighboring orig node which may be used as nc candidate + * @ogm_packet: incoming ogm packet also used for the checks + * + * Returns true if: + * 1) The OGM must have the most recent sequence number. + * 2) The TTL must be decremented by one and only one. + * 3) The OGM must be received from the first hop from orig_node. + * 4) The TQ value of the OGM must be above bat_priv->nc.min_tq. + */ + static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv, + struct batadv_orig_node *orig_node, + struct batadv_ogm_packet *ogm_packet) + { + if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno)) + return false; + if (orig_node->last_ttl != ogm_packet->header.ttl + 1) + return false; + if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender)) + return false; + if (ogm_packet->tq < bat_priv->nc.min_tq) + return false; + + return true; + } + + /** + * batadv_nc_find_nc_node - search for an existing nc node and return it + * @orig_node: orig node originating the ogm packet + * @orig_neigh_node: neighboring orig node from which we received the ogm packet + * (can be equal to orig_node) + * @in_coding: traverse incoming or outgoing network coding list + * + * Returns the nc_node if found, NULL otherwise. + */ + static struct batadv_nc_node + *batadv_nc_find_nc_node(struct batadv_orig_node *orig_node, + struct batadv_orig_node *orig_neigh_node, + bool in_coding) + { + struct batadv_nc_node *nc_node, *nc_node_out = NULL; + struct list_head *list; + + if (in_coding) + list = &orig_neigh_node->in_coding_list; + else + list = &orig_neigh_node->out_coding_list; + + /* Traverse list of nc_nodes to orig_node */ + rcu_read_lock(); + list_for_each_entry_rcu(nc_node, list, list) { + if (!batadv_compare_eth(nc_node->addr, orig_node->orig)) + continue; + + if (!atomic_inc_not_zero(&nc_node->refcount)) + continue; + + /* Found a match */ + nc_node_out = nc_node; + break; + } + rcu_read_unlock(); + + return nc_node_out; + } + + /** + * batadv_nc_get_nc_node - retrieves an nc node or creates the entry if it was + * not found + * @bat_priv: the bat priv with all the soft interface information + * @orig_node: orig node originating the ogm packet + * @orig_neigh_node: neighboring orig node from which we received the ogm packet + * (can be equal to orig_node) + * @in_coding: traverse incoming or outgoing network coding list + * + * Returns the nc_node if found or created, NULL in case of an error. + */ + static struct batadv_nc_node + *batadv_nc_get_nc_node(struct batadv_priv *bat_priv, + struct batadv_orig_node *orig_node, + struct batadv_orig_node *orig_neigh_node, + bool in_coding) + { + struct batadv_nc_node *nc_node; + spinlock_t *lock; /* Used to lock list selected by "int in_coding" */ + struct list_head *list; + + /* Check if nc_node is already added */ + nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding); + + /* Node found */ + if (nc_node) + return nc_node; + + nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC); + if (!nc_node) + return NULL; + + if (!atomic_inc_not_zero(&orig_neigh_node->refcount)) + goto free; + + /* Initialize nc_node */ + INIT_LIST_HEAD(&nc_node->list); + memcpy(nc_node->addr, orig_node->orig, ETH_ALEN); + nc_node->orig_node = orig_neigh_node; + atomic_set(&nc_node->refcount, 2); + + /* Select ingoing or outgoing coding node */ + if (in_coding) { + lock = &orig_neigh_node->in_coding_list_lock; + list = &orig_neigh_node->in_coding_list; + } else { + lock = &orig_neigh_node->out_coding_list_lock; + list = &orig_neigh_node->out_coding_list; + } + + batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n", + nc_node->addr, nc_node->orig_node->orig); + + /* Add nc_node to orig_node */ + spin_lock_bh(lock); + list_add_tail_rcu(&nc_node->list, list); + spin_unlock_bh(lock); + + return nc_node; + + free: + kfree(nc_node); + return NULL; + } + + /** + * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node structs + * (best called on incoming OGMs) + * @bat_priv: the bat priv with all the soft interface information + * @orig_node: orig node originating the ogm packet + * @orig_neigh_node: neighboring orig node from which we received the ogm packet + * (can be equal to orig_node) + * @ogm_packet: incoming ogm packet + * @is_single_hop_neigh: orig_node is a single hop neighbor + */ + void batadv_nc_update_nc_node(struct batadv_priv *bat_priv, + struct batadv_orig_node *orig_node, + struct batadv_orig_node *orig_neigh_node, + struct batadv_ogm_packet *ogm_packet, + int is_single_hop_neigh) + { + struct batadv_nc_node *in_nc_node = NULL, *out_nc_node = NULL; + + /* Check if network coding is enabled */ + if (!atomic_read(&bat_priv->network_coding)) + goto out; + + /* accept ogms from 'good' neighbors and single hop neighbors */ + if (!batadv_can_nc_with_orig(bat_priv, orig_node, ogm_packet) && + !is_single_hop_neigh) + goto out; + + /* Add orig_node as in_nc_node on hop */ + in_nc_node = batadv_nc_get_nc_node(bat_priv, orig_node, + orig_neigh_node, true); + if (!in_nc_node) + goto out; + + in_nc_node->last_seen = jiffies; + + /* Add hop as out_nc_node on orig_node */ + out_nc_node = batadv_nc_get_nc_node(bat_priv, orig_neigh_node, + orig_node, false); + if (!out_nc_node) + goto out; + + out_nc_node->last_seen = jiffies; + + out: + if (in_nc_node) + batadv_nc_node_free_ref(in_nc_node); + if (out_nc_node) + batadv_nc_node_free_ref(out_nc_node); + } + + /** + * batadv_nc_get_path - get existing nc_path or allocate a new one + * @bat_priv: the bat priv with all the soft interface information + * @hash: hash table containing the nc path + * @src: ethernet source address - first half of the nc path search key + * @dst: ethernet destination address - second half of the nc path search key + * + * Returns pointer to nc_path if the path was found or created, returns NULL + * on error. + */ + static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv, + struct batadv_hashtable *hash, + uint8_t *src, + uint8_t *dst) + { + int hash_added; + struct batadv_nc_path *nc_path, nc_path_key; + + batadv_nc_hash_key_gen(&nc_path_key, src, dst); + + /* Search for existing nc_path */ + nc_path = batadv_nc_hash_find(hash, (void *)&nc_path_key); + + if (nc_path) { + /* Set timestamp to delay removal of nc_path */ + nc_path->last_valid = jiffies; + return nc_path; + } + + /* No existing nc_path was found; create a new */ + nc_path = kzalloc(sizeof(*nc_path), GFP_ATOMIC); + + if (!nc_path) + return NULL; + + /* Initialize nc_path */ + INIT_LIST_HEAD(&nc_path->packet_list); + spin_lock_init(&nc_path->packet_list_lock); + atomic_set(&nc_path->refcount, 2); + nc_path->last_valid = jiffies; + memcpy(nc_path->next_hop, dst, ETH_ALEN); + memcpy(nc_path->prev_hop, src, ETH_ALEN); + + batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_path %pM -> %pM\n", + nc_path->prev_hop, + nc_path->next_hop); + + /* Add nc_path to hash table */ + hash_added = batadv_hash_add(hash, batadv_nc_hash_compare, + batadv_nc_hash_choose, &nc_path_key, + &nc_path->hash_entry); + + if (hash_added < 0) { + kfree(nc_path); + return NULL; + } + + return nc_path; + } + + /** + * batadv_nc_random_weight_tq - scale the receivers TQ-value to avoid unfair + * selection of a receiver with slightly lower TQ than the other + * @tq: to be weighted tq value + */ + static uint8_t batadv_nc_random_weight_tq(uint8_t tq) + { + uint8_t rand_val, rand_tq; + + get_random_bytes(&rand_val, sizeof(rand_val)); + + /* randomize the estimated packet loss (max TQ - estimated TQ) */ + rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq); + + /* normalize the randomized packet loss */ + rand_tq /= BATADV_TQ_MAX_VALUE; + + /* convert to (randomized) estimated tq again */ + return BATADV_TQ_MAX_VALUE - rand_tq; + } + + /** + * batadv_nc_memxor - XOR destination with source + * @dst: byte array to XOR into + * @src: byte array to XOR from + * @len: length of destination array + */ + static void batadv_nc_memxor(char *dst, const char *src, unsigned int len) + { + unsigned int i; + + for (i = 0; i < len; ++i) + dst[i] ^= src[i]; + } + + /** + * batadv_nc_code_packets - code a received unicast_packet with an nc packet + * into a coded_packet and send it + * @bat_priv: the bat priv with all the soft interface information + * @skb: data skb to forward + * @ethhdr: pointer to the ethernet header inside the skb + * @nc_packet: structure containing the packet to the skb can be coded with + * @neigh_node: next hop to forward packet to + * + * Returns true if both packets are consumed, false otherwise. + */ + static bool batadv_nc_code_packets(struct batadv_priv *bat_priv, + struct sk_buff *skb, + struct ethhdr *ethhdr, + struct batadv_nc_packet *nc_packet, + struct batadv_neigh_node *neigh_node) + { + uint8_t tq_weighted_neigh, tq_weighted_coding; + struct sk_buff *skb_dest, *skb_src; + struct batadv_unicast_packet *packet1; + struct batadv_unicast_packet *packet2; + struct batadv_coded_packet *coded_packet; + struct batadv_neigh_node *neigh_tmp, *router_neigh; + struct batadv_neigh_node *router_coding = NULL; + uint8_t *first_source, *first_dest, *second_source, *second_dest; + __be32 packet_id1, packet_id2; + size_t count; + bool res = false; + int coding_len; + int unicast_size = sizeof(*packet1); + int coded_size = sizeof(*coded_packet); + int header_add = coded_size - unicast_size; + + router_neigh = batadv_orig_node_get_router(neigh_node->orig_node); + if (!router_neigh) + goto out; + + neigh_tmp = nc_packet->neigh_node; + router_coding = batadv_orig_node_get_router(neigh_tmp->orig_node); + if (!router_coding) + goto out; + + tq_weighted_neigh = batadv_nc_random_weight_tq(router_neigh->tq_avg); + tq_weighted_coding = batadv_nc_random_weight_tq(router_coding->tq_avg); + + /* Select one destination for the MAC-header dst-field based on + * weighted TQ-values. + */ + if (tq_weighted_neigh >= tq_weighted_coding) { + /* Destination from nc_packet is selected for MAC-header */ + first_dest = nc_packet->nc_path->next_hop; + first_source = nc_packet->nc_path->prev_hop; + second_dest = neigh_node->addr; + second_source = ethhdr->h_source; + packet1 = (struct batadv_unicast_packet *)nc_packet->skb->data; + packet2 = (struct batadv_unicast_packet *)skb->data; + packet_id1 = nc_packet->packet_id; + packet_id2 = batadv_skb_crc32(skb, + skb->data + sizeof(*packet2)); + } else { + /* Destination for skb is selected for MAC-header */ + first_dest = neigh_node->addr; + first_source = ethhdr->h_source; + second_dest = nc_packet->nc_path->next_hop; + second_source = nc_packet->nc_path->prev_hop; + packet1 = (struct batadv_unicast_packet *)skb->data; + packet2 = (struct batadv_unicast_packet *)nc_packet->skb->data; + packet_id1 = batadv_skb_crc32(skb, + skb->data + sizeof(*packet1)); + packet_id2 = nc_packet->packet_id; + } + + /* Instead of zero padding the smallest data buffer, we + * code into the largest. + */ + if (skb->len <= nc_packet->skb->len) { + skb_dest = nc_packet->skb; + skb_src = skb; + } else { + skb_dest = skb; + skb_src = nc_packet->skb; + } + + /* coding_len is used when decoding the packet shorter packet */ + coding_len = skb_src->len - unicast_size; + + if (skb_linearize(skb_dest) < 0 || skb_linearize(skb_src) < 0) + goto out; + + skb_push(skb_dest, header_add); + + coded_packet = (struct batadv_coded_packet *)skb_dest->data; + skb_reset_mac_header(skb_dest); + + coded_packet->header.packet_type = BATADV_CODED; + coded_packet->header.version = BATADV_COMPAT_VERSION; + coded_packet->header.ttl = packet1->header.ttl; + + /* Info about first unicast packet */ + memcpy(coded_packet->first_source, first_source, ETH_ALEN); + memcpy(coded_packet->first_orig_dest, packet1->dest, ETH_ALEN); + coded_packet->first_crc = packet_id1; + coded_packet->first_ttvn = packet1->ttvn; + + /* Info about second unicast packet */ + memcpy(coded_packet->second_dest, second_dest, ETH_ALEN); + memcpy(coded_packet->second_source, second_source, ETH_ALEN); + memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN); + coded_packet->second_crc = packet_id2; + coded_packet->second_ttl = packet2->header.ttl; + coded_packet->second_ttvn = packet2->ttvn; + coded_packet->coded_len = htons(coding_len); + + /* This is where the magic happens: Code skb_src into skb_dest */ + batadv_nc_memxor(skb_dest->data + coded_size, + skb_src->data + unicast_size, coding_len); + + /* Update counters accordingly */ + if (BATADV_SKB_CB(skb_src)->decoded && + BATADV_SKB_CB(skb_dest)->decoded) { + /* Both packets are recoded */ + count = skb_src->len + ETH_HLEN; + count += skb_dest->len + ETH_HLEN; + batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE, 2); + batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES, count); + } else if (!BATADV_SKB_CB(skb_src)->decoded && + !BATADV_SKB_CB(skb_dest)->decoded) { + /* Both packets are newly coded */ + count = skb_src->len + ETH_HLEN; + count += skb_dest->len + ETH_HLEN; + batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE, 2); + batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES, count); + } else if (BATADV_SKB_CB(skb_src)->decoded && + !BATADV_SKB_CB(skb_dest)->decoded) { + /* skb_src recoded and skb_dest is newly coded */ + batadv_inc_counter(bat_priv, BATADV_CNT_NC_RECODE); + batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES, + skb_src->len + ETH_HLEN); + batadv_inc_counter(bat_priv, BATADV_CNT_NC_CODE); + batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES, + skb_dest->len + ETH_HLEN); + } else if (!BATADV_SKB_CB(skb_src)->decoded && + BATADV_SKB_CB(skb_dest)->decoded) { + /* skb_src is newly coded and skb_dest is recoded */ + batadv_inc_counter(bat_priv, BATADV_CNT_NC_CODE); + batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES, + skb_src->len + ETH_HLEN); + batadv_inc_counter(bat_priv, BATADV_CNT_NC_RECODE); + batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES, + skb_dest->len + ETH_HLEN); + } + + /* skb_src is now coded into skb_dest, so free it */ + kfree_skb(skb_src); + + /* avoid duplicate free of skb from nc_packet */ + nc_packet->skb = NULL; + batadv_nc_packet_free(nc_packet); + + /* Send the coded packet and return true */ + batadv_send_skb_packet(skb_dest, neigh_node->if_incoming, first_dest); + res = true; + out: + if (router_neigh) + batadv_neigh_node_free_ref(router_neigh); + if (router_coding) + batadv_neigh_node_free_ref(router_coding); + return res; + } + + /** + * batadv_nc_skb_coding_possible - true if a decoded skb is available at dst. + * @skb: data skb to forward + * @dst: destination mac address of the other skb to code with + * @src: source mac address of skb + * + * Whenever we network code a packet we have to check whether we received it in + * a network coded form. If so, we may not be able to use it for coding because + * some neighbors may also have received (overheard) the packet in the network + * coded form without being able to decode it. It is hard to know which of the + * neighboring nodes was able to decode the packet, therefore we can only + * re-code the packet if the source of the previous encoded packet is involved. + * Since the source encoded the packet we can be certain it has all necessary + * decode information. + * + * Returns true if coding of a decoded packet is allowed. + */ + static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, + uint8_t *dst, uint8_t *src) + { + if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src)) + return false; + else + return true; + } + + /** + * batadv_nc_path_search - Find the coding path matching in_nc_node and + * out_nc_node to retrieve a buffered packet that can be used for coding. + * @bat_priv: the bat priv with all the soft interface information + * @in_nc_node: pointer to skb next hop's neighbor nc node + * @out_nc_node: pointer to skb source's neighbor nc node + * @skb: data skb to forward + * @eth_dst: next hop mac address of skb + * + * Returns true if coding of a decoded skb is allowed. + */ + static struct batadv_nc_packet * + batadv_nc_path_search(struct batadv_priv *bat_priv, + struct batadv_nc_node *in_nc_node, + struct batadv_nc_node *out_nc_node, + struct sk_buff *skb, + uint8_t *eth_dst) + { + struct batadv_nc_path *nc_path, nc_path_key; + struct batadv_nc_packet *nc_packet_out = NULL; + struct batadv_nc_packet *nc_packet, *nc_packet_tmp; + struct batadv_hashtable *hash = bat_priv->nc.coding_hash; + int idx; + + if (!hash) + return NULL; + + /* Create almost path key */ + batadv_nc_hash_key_gen(&nc_path_key, in_nc_node->addr, + out_nc_node->addr); + idx = batadv_nc_hash_choose(&nc_path_key, hash->size); + + /* Check for coding opportunities in this nc_path */ + rcu_read_lock(); + hlist_for_each_entry_rcu(nc_path, &hash->table[idx], hash_entry) { + if (!batadv_compare_eth(nc_path->prev_hop, in_nc_node->addr)) + continue; + + if (!batadv_compare_eth(nc_path->next_hop, out_nc_node->addr)) + continue; + + spin_lock_bh(&nc_path->packet_list_lock); + if (list_empty(&nc_path->packet_list)) { + spin_unlock_bh(&nc_path->packet_list_lock); + continue; + } + + list_for_each_entry_safe(nc_packet, nc_packet_tmp, + &nc_path->packet_list, list) { + if (!batadv_nc_skb_coding_possible(nc_packet->skb, + eth_dst, + in_nc_node->addr)) + continue; + + /* Coding opportunity is found! */ + list_del(&nc_packet->list); + nc_packet_out = nc_packet; + break; + } + + spin_unlock_bh(&nc_path->packet_list_lock); + break; + } + rcu_read_unlock(); + + return nc_packet_out; + } + + /** + * batadv_nc_skb_src_search - Loops through the list of neighoring nodes of the + * skb's sender (may be equal to the originator). + * @bat_priv: the bat priv with all the soft interface information + * @skb: data skb to forward + * @eth_dst: next hop mac address of skb + * @eth_src: source mac address of skb + * @in_nc_node: pointer to skb next hop's neighbor nc node + * + * Returns an nc packet if a suitable coding packet was found, NULL otherwise. + */ + static struct batadv_nc_packet * + batadv_nc_skb_src_search(struct batadv_priv *bat_priv, + struct sk_buff *skb, + uint8_t *eth_dst, + uint8_t *eth_src, + struct batadv_nc_node *in_nc_node) + { + struct batadv_orig_node *orig_node; + struct batadv_nc_node *out_nc_node; + struct batadv_nc_packet *nc_packet = NULL; + + orig_node = batadv_orig_hash_find(bat_priv, eth_src); + if (!orig_node) + return NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(out_nc_node, + &orig_node->out_coding_list, list) { + /* Check if the skb is decoded and if recoding is possible */ + if (!batadv_nc_skb_coding_possible(skb, + out_nc_node->addr, eth_src)) + continue; + + /* Search for an opportunity in this nc_path */ + nc_packet = batadv_nc_path_search(bat_priv, in_nc_node, + out_nc_node, skb, eth_dst); + if (nc_packet) + break; + } + rcu_read_unlock(); + + batadv_orig_node_free_ref(orig_node); + return nc_packet; + } + + /** + * batadv_nc_skb_store_before_coding - set the ethernet src and dst of the + * unicast skb before it is stored for use in later decoding + * @bat_priv: the bat priv with all the soft interface information + * @skb: data skb to store + * @eth_dst_new: new destination mac address of skb + */ + static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv, + struct sk_buff *skb, + uint8_t *eth_dst_new) + { + struct ethhdr *ethhdr; + + /* Copy skb header to change the mac header */ + skb = pskb_copy(skb, GFP_ATOMIC); + if (!skb) + return; + + /* Set the mac header as if we actually sent the packet uncoded */ + ethhdr = (struct ethhdr *)skb_mac_header(skb); + memcpy(ethhdr->h_source, ethhdr->h_dest, ETH_ALEN); + memcpy(ethhdr->h_dest, eth_dst_new, ETH_ALEN); + + /* Set data pointer to MAC header to mimic packets from our tx path */ + skb_push(skb, ETH_HLEN); + + /* Add the packet to the decoding packet pool */ + batadv_nc_skb_store_for_decoding(bat_priv, skb); + + /* batadv_nc_skb_store_for_decoding() clones the skb, so we must free + * our ref + */ + kfree_skb(skb); + } + + /** + * batadv_nc_skb_dst_search - Loops through list of neighboring nodes to dst. + * @skb: data skb to forward + * @neigh_node: next hop to forward packet to + * @ethhdr: pointer to the ethernet header inside the skb + * + * Loops through list of neighboring nodes the next hop has a good connection to + * (receives OGMs with a sufficient quality). We need to find a neighbor of our + * next hop that potentially sent a packet which our next hop also received + * (overheard) and has stored for later decoding. + * + * Returns true if the skb was consumed (encoded packet sent) or false otherwise + */ + static bool batadv_nc_skb_dst_search(struct sk_buff *skb, + struct batadv_neigh_node *neigh_node, + struct ethhdr *ethhdr) + { + struct net_device *netdev = neigh_node->if_incoming->soft_iface; + struct batadv_priv *bat_priv = netdev_priv(netdev); + struct batadv_orig_node *orig_node = neigh_node->orig_node; + struct batadv_nc_node *nc_node; + struct batadv_nc_packet *nc_packet = NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(nc_node, &orig_node->in_coding_list, list) { + /* Search for coding opportunity with this in_nc_node */ + nc_packet = batadv_nc_skb_src_search(bat_priv, skb, + neigh_node->addr, + ethhdr->h_source, nc_node); + + /* Opportunity was found, so stop searching */ + if (nc_packet) + break; + } + rcu_read_unlock(); + + if (!nc_packet) + return false; + + /* Save packets for later decoding */ + batadv_nc_skb_store_before_coding(bat_priv, skb, + neigh_node->addr); + batadv_nc_skb_store_before_coding(bat_priv, nc_packet->skb, + nc_packet->neigh_node->addr); + + /* Code and send packets */ + if (batadv_nc_code_packets(bat_priv, skb, ethhdr, nc_packet, + neigh_node)) + return true; + + /* out of mem ? Coding failed - we have to free the buffered packet + * to avoid memleaks. The skb passed as argument will be dealt with + * by the calling function. + */ + batadv_nc_send_packet(nc_packet); + return false; + } + + /** + * batadv_nc_skb_add_to_path - buffer skb for later encoding / decoding + * @skb: skb to add to path + * @nc_path: path to add skb to + * @neigh_node: next hop to forward packet to + * @packet_id: checksum to identify packet + * + * Returns true if the packet was buffered or false in case of an error. + */ + static bool batadv_nc_skb_add_to_path(struct sk_buff *skb, + struct batadv_nc_path *nc_path, + struct batadv_neigh_node *neigh_node, + __be32 packet_id) + { + struct batadv_nc_packet *nc_packet; + + nc_packet = kzalloc(sizeof(*nc_packet), GFP_ATOMIC); + if (!nc_packet) + return false; + + /* Initialize nc_packet */ + nc_packet->timestamp = jiffies; + nc_packet->packet_id = packet_id; + nc_packet->skb = skb; + nc_packet->neigh_node = neigh_node; + nc_packet->nc_path = nc_path; + + /* Add coding packet to list */ + spin_lock_bh(&nc_path->packet_list_lock); + list_add_tail(&nc_packet->list, &nc_path->packet_list); + spin_unlock_bh(&nc_path->packet_list_lock); + + return true; + } + + /** + * batadv_nc_skb_forward - try to code a packet or add it to the coding packet + * buffer + * @skb: data skb to forward + * @neigh_node: next hop to forward packet to + * @ethhdr: pointer to the ethernet header inside the skb + * + * Returns true if the skb was consumed (encoded packet sent) or false otherwise + */ + bool batadv_nc_skb_forward(struct sk_buff *skb, + struct batadv_neigh_node *neigh_node, + struct ethhdr *ethhdr) + { + const struct net_device *netdev = neigh_node->if_incoming->soft_iface; + struct batadv_priv *bat_priv = netdev_priv(netdev); + struct batadv_unicast_packet *packet; + struct batadv_nc_path *nc_path; + __be32 packet_id; + u8 *payload; + + /* Check if network coding is enabled */ + if (!atomic_read(&bat_priv->network_coding)) + goto out; + + /* We only handle unicast packets */ + payload = skb_network_header(skb); + packet = (struct batadv_unicast_packet *)payload; + if (packet->header.packet_type != BATADV_UNICAST) + goto out; + + /* Try to find a coding opportunity and send the skb if one is found */ + if (batadv_nc_skb_dst_search(skb, neigh_node, ethhdr)) + return true; + + /* Find or create a nc_path for this src-dst pair */ + nc_path = batadv_nc_get_path(bat_priv, + bat_priv->nc.coding_hash, + ethhdr->h_source, + neigh_node->addr); + + if (!nc_path) + goto out; + + /* Add skb to nc_path */ + packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet)); + if (!batadv_nc_skb_add_to_path(skb, nc_path, neigh_node, packet_id)) + goto free_nc_path; + + /* Packet is consumed */ + return true; + + free_nc_path: + batadv_nc_path_free_ref(nc_path); + out: + /* Packet is not consumed */ + return false; + } + + /** + * batadv_nc_skb_store_for_decoding - save a clone of the skb which can be used + * when decoding coded packets + * @bat_priv: the bat priv with all the soft interface information + * @skb: data skb to store + */ + void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv, + struct sk_buff *skb) + { + struct batadv_unicast_packet *packet; + struct batadv_nc_path *nc_path; + struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); + __be32 packet_id; + u8 *payload; + + /* Check if network coding is enabled */ + if (!atomic_read(&bat_priv->network_coding)) + goto out; + + /* Check for supported packet type */ + payload = skb_network_header(skb); + packet = (struct batadv_unicast_packet *)payload; + if (packet->header.packet_type != BATADV_UNICAST) + goto out; + + /* Find existing nc_path or create a new */ + nc_path = batadv_nc_get_path(bat_priv, + bat_priv->nc.decoding_hash, + ethhdr->h_source, + ethhdr->h_dest); + + if (!nc_path) + goto out; + + /* Clone skb and adjust skb->data to point at batman header */ + skb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb)) + goto free_nc_path; + + if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) + goto free_skb; + + if (unlikely(!skb_pull_rcsum(skb, ETH_HLEN))) + goto free_skb; + + /* Add skb to nc_path */ + packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet)); + if (!batadv_nc_skb_add_to_path(skb, nc_path, NULL, packet_id)) + goto free_skb; + + batadv_inc_counter(bat_priv, BATADV_CNT_NC_BUFFER); + return; + + free_skb: + kfree_skb(skb); + free_nc_path: + batadv_nc_path_free_ref(nc_path); + out: + return; + } + + /** + * batadv_nc_skb_store_sniffed_unicast - check if a received unicast packet + * should be saved in the decoding buffer and, if so, store it there + * @bat_priv: the bat priv with all the soft interface information + * @skb: unicast skb to store + */ + void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv, + struct sk_buff *skb) + { + struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); + - if (batadv_is_my_mac(ethhdr->h_dest)) ++ if (batadv_is_my_mac(bat_priv, ethhdr->h_dest)) + return; + + /* Set data pointer to MAC header to mimic packets from our tx path */ + skb_push(skb, ETH_HLEN); + + batadv_nc_skb_store_for_decoding(bat_priv, skb); + } + + /** + * batadv_nc_skb_decode_packet - decode given skb using the decode data stored + * in nc_packet ++ * @bat_priv: the bat priv with all the soft interface information + * @skb: unicast skb to decode + * @nc_packet: decode data needed to decode the skb + * + * Returns pointer to decoded unicast packet if the packet was decoded or NULL + * in case of an error. + */ + static struct batadv_unicast_packet * -batadv_nc_skb_decode_packet(struct sk_buff *skb, ++batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, + struct batadv_nc_packet *nc_packet) + { + const int h_size = sizeof(struct batadv_unicast_packet); + const int h_diff = sizeof(struct batadv_coded_packet) - h_size; + struct batadv_unicast_packet *unicast_packet; + struct batadv_coded_packet coded_packet_tmp; + struct ethhdr *ethhdr, ethhdr_tmp; + uint8_t *orig_dest, ttl, ttvn; + unsigned int coding_len; + + /* Save headers temporarily */ + memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp)); + memcpy(ðhdr_tmp, skb_mac_header(skb), sizeof(ethhdr_tmp)); + + if (skb_cow(skb, 0) < 0) + return NULL; + + if (unlikely(!skb_pull_rcsum(skb, h_diff))) + return NULL; + + /* Data points to batman header, so set mac header 14 bytes before + * and network to data + */ + skb_set_mac_header(skb, -ETH_HLEN); + skb_reset_network_header(skb); + + /* Reconstruct original mac header */ + ethhdr = (struct ethhdr *)skb_mac_header(skb); + memcpy(ethhdr, ðhdr_tmp, sizeof(*ethhdr)); + + /* Select the correct unicast header information based on the location + * of our mac address in the coded_packet header + */ - if (batadv_is_my_mac(coded_packet_tmp.second_dest)) { ++ if (batadv_is_my_mac(bat_priv, coded_packet_tmp.second_dest)) { + /* If we are the second destination the packet was overheard, + * so the Ethernet address must be copied to h_dest and + * pkt_type changed from PACKET_OTHERHOST to PACKET_HOST + */ + memcpy(ethhdr->h_dest, coded_packet_tmp.second_dest, ETH_ALEN); + skb->pkt_type = PACKET_HOST; + + orig_dest = coded_packet_tmp.second_orig_dest; + ttl = coded_packet_tmp.second_ttl; + ttvn = coded_packet_tmp.second_ttvn; + } else { + orig_dest = coded_packet_tmp.first_orig_dest; + ttl = coded_packet_tmp.header.ttl; + ttvn = coded_packet_tmp.first_ttvn; + } + + coding_len = ntohs(coded_packet_tmp.coded_len); + + if (coding_len > skb->len) + return NULL; + + /* Here the magic is reversed: + * extract the missing packet from the received coded packet + */ + batadv_nc_memxor(skb->data + h_size, + nc_packet->skb->data + h_size, + coding_len); + + /* Resize decoded skb if decoded with larger packet */ + if (nc_packet->skb->len > coding_len + h_size) + pskb_trim_rcsum(skb, coding_len + h_size); + + /* Create decoded unicast packet */ + unicast_packet = (struct batadv_unicast_packet *)skb->data; + unicast_packet->header.packet_type = BATADV_UNICAST; + unicast_packet->header.version = BATADV_COMPAT_VERSION; + unicast_packet->header.ttl = ttl; + memcpy(unicast_packet->dest, orig_dest, ETH_ALEN); + unicast_packet->ttvn = ttvn; + + batadv_nc_packet_free(nc_packet); + return unicast_packet; + } + + /** + * batadv_nc_find_decoding_packet - search through buffered decoding data to + * find the data needed to decode the coded packet + * @bat_priv: the bat priv with all the soft interface information + * @ethhdr: pointer to the ethernet header inside the coded packet + * @coded: coded packet we try to find decode data for + * + * Returns pointer to nc packet if the needed data was found or NULL otherwise. + */ + static struct batadv_nc_packet * + batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv, + struct ethhdr *ethhdr, + struct batadv_coded_packet *coded) + { + struct batadv_hashtable *hash = bat_priv->nc.decoding_hash; + struct batadv_nc_packet *tmp_nc_packet, *nc_packet = NULL; + struct batadv_nc_path *nc_path, nc_path_key; + uint8_t *dest, *source; + __be32 packet_id; + int index; + + if (!hash) + return NULL; + + /* Select the correct packet id based on the location of our mac-addr */ + dest = ethhdr->h_source; - if (!batadv_is_my_mac(coded->second_dest)) { ++ if (!batadv_is_my_mac(bat_priv, coded->second_dest)) { + source = coded->second_source; + packet_id = coded->second_crc; + } else { + source = coded->first_source; + packet_id = coded->first_crc; + } + + batadv_nc_hash_key_gen(&nc_path_key, source, dest); + index = batadv_nc_hash_choose(&nc_path_key, hash->size); + + /* Search for matching coding path */ + rcu_read_lock(); + hlist_for_each_entry_rcu(nc_path, &hash->table[index], hash_entry) { + /* Find matching nc_packet */ + spin_lock_bh(&nc_path->packet_list_lock); + list_for_each_entry(tmp_nc_packet, + &nc_path->packet_list, list) { + if (packet_id == tmp_nc_packet->packet_id) { + list_del(&tmp_nc_packet->list); + + nc_packet = tmp_nc_packet; + break; + } + } + spin_unlock_bh(&nc_path->packet_list_lock); + + if (nc_packet) + break; + } + rcu_read_unlock(); + + if (!nc_packet) + batadv_dbg(BATADV_DBG_NC, bat_priv, + "No decoding packet found for %u\n", packet_id); + + return nc_packet; + } + + /** + * batadv_nc_recv_coded_packet - try to decode coded packet and enqueue the + * resulting unicast packet + * @skb: incoming coded packet + * @recv_if: pointer to interface this packet was received on + */ + static int batadv_nc_recv_coded_packet(struct sk_buff *skb, + struct batadv_hard_iface *recv_if) + { + struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct batadv_unicast_packet *unicast_packet; + struct batadv_coded_packet *coded_packet; + struct batadv_nc_packet *nc_packet; + struct ethhdr *ethhdr; + int hdr_size = sizeof(*coded_packet); + + /* Check if network coding is enabled */ + if (!atomic_read(&bat_priv->network_coding)) + return NET_RX_DROP; + + /* Make sure we can access (and remove) header */ + if (unlikely(!pskb_may_pull(skb, hdr_size))) + return NET_RX_DROP; + + coded_packet = (struct batadv_coded_packet *)skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* Verify frame is destined for us */ - if (!batadv_is_my_mac(ethhdr->h_dest) && - !batadv_is_my_mac(coded_packet->second_dest)) ++ if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest) && ++ !batadv_is_my_mac(bat_priv, coded_packet->second_dest)) + return NET_RX_DROP; + + /* Update stat counter */ - if (batadv_is_my_mac(coded_packet->second_dest)) ++ if (batadv_is_my_mac(bat_priv, coded_packet->second_dest)) + batadv_inc_counter(bat_priv, BATADV_CNT_NC_SNIFFED); + + nc_packet = batadv_nc_find_decoding_packet(bat_priv, ethhdr, + coded_packet); + if (!nc_packet) { + batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED); + return NET_RX_DROP; + } + + /* Make skb's linear, because decoding accesses the entire buffer */ + if (skb_linearize(skb) < 0) + goto free_nc_packet; + + if (skb_linearize(nc_packet->skb) < 0) + goto free_nc_packet; + + /* Decode the packet */ - unicast_packet = batadv_nc_skb_decode_packet(skb, nc_packet); ++ unicast_packet = batadv_nc_skb_decode_packet(bat_priv, skb, nc_packet); + if (!unicast_packet) { + batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED); + goto free_nc_packet; + } + + /* Mark packet as decoded to do correct recoding when forwarding */ + BATADV_SKB_CB(skb)->decoded = true; + batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE); + batadv_add_counter(bat_priv, BATADV_CNT_NC_DECODE_BYTES, + skb->len + ETH_HLEN); + return batadv_recv_unicast_packet(skb, recv_if); + + free_nc_packet: + batadv_nc_packet_free(nc_packet); + return NET_RX_DROP; + } + + /** + * batadv_nc_free - clean up network coding memory + * @bat_priv: the bat priv with all the soft interface information + */ + void batadv_nc_free(struct batadv_priv *bat_priv) + { + batadv_recv_handler_unregister(BATADV_CODED); + cancel_delayed_work_sync(&bat_priv->nc.work); + + batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL); + batadv_hash_destroy(bat_priv->nc.coding_hash); + batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash, NULL); + batadv_hash_destroy(bat_priv->nc.decoding_hash); + } + + /** + * batadv_nc_nodes_seq_print_text - print the nc node information + * @seq: seq file to print on + * @offset: not used + */ + int batadv_nc_nodes_seq_print_text(struct seq_file *seq, void *offset) + { + struct net_device *net_dev = (struct net_device *)seq->private; + struct batadv_priv *bat_priv = netdev_priv(net_dev); + struct batadv_hashtable *hash = bat_priv->orig_hash; + struct batadv_hard_iface *primary_if; + struct hlist_head *head; + struct batadv_orig_node *orig_node; + struct batadv_nc_node *nc_node; + int i; + + primary_if = batadv_seq_print_text_primary_if_get(seq); + if (!primary_if) + goto out; + + /* Traverse list of originators */ + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + /* For each orig_node in this bin */ + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, head, hash_entry) { + seq_printf(seq, "Node: %pM\n", orig_node->orig); + + seq_puts(seq, " Ingoing: "); + /* For each in_nc_node to this orig_node */ + list_for_each_entry_rcu(nc_node, + &orig_node->in_coding_list, + list) + seq_printf(seq, "%pM ", + nc_node->addr); + seq_puts(seq, "\n"); + + seq_puts(seq, " Outgoing: "); + /* For out_nc_node to this orig_node */ + list_for_each_entry_rcu(nc_node, + &orig_node->out_coding_list, + list) + seq_printf(seq, "%pM ", + nc_node->addr); + seq_puts(seq, "\n\n"); + } + rcu_read_unlock(); + } + + out: + if (primary_if) + batadv_hardif_free_ref(primary_if); + return 0; + } + + /** + * batadv_nc_init_debugfs - create nc folder and related files in debugfs + * @bat_priv: the bat priv with all the soft interface information + */ + int batadv_nc_init_debugfs(struct batadv_priv *bat_priv) + { + struct dentry *nc_dir, *file; + + nc_dir = debugfs_create_dir("nc", bat_priv->debug_dir); + if (!nc_dir) + goto out; + + file = debugfs_create_u8("min_tq", S_IRUGO | S_IWUSR, nc_dir, + &bat_priv->nc.min_tq); + if (!file) + goto out; + + file = debugfs_create_u32("max_fwd_delay", S_IRUGO | S_IWUSR, nc_dir, + &bat_priv->nc.max_fwd_delay); + if (!file) + goto out; + + file = debugfs_create_u32("max_buffer_time", S_IRUGO | S_IWUSR, nc_dir, + &bat_priv->nc.max_buffer_time); + if (!file) + goto out; + + return 0; + + out: + return -ENOMEM; + } diff --combined net/batman-adv/routing.c index 319f290,8f88967..8e0bc82 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@@ -29,6 -29,7 +29,7 @@@ #include "unicast.h" #include "bridge_loop_avoidance.h" #include "distributed-arp-table.h" + #include "network-coding.h"
static int batadv_route_unicast_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if); @@@ -402,7 -403,7 +403,7 @@@ int batadv_recv_icmp_packet(struct sk_b goto out;
/* not for me */ - if (!batadv_is_my_mac(ethhdr->h_dest)) + if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest)) goto out;
icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; @@@ -416,7 -417,7 +417,7 @@@ }
/* packet for me */ - if (batadv_is_my_mac(icmp_packet->dst)) + if (batadv_is_my_mac(bat_priv, icmp_packet->dst)) return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
/* TTL exceeded */ @@@ -548,28 -549,37 +549,39 @@@ batadv_find_ifalter_router(struct batad return router; }
+ /** + * batadv_check_unicast_packet - Check for malformed unicast packets ++ * @batpriv: some description ... + * @skb: packet to check + * @hdr_size: size of header to pull + * + * Check for short header and bad addresses in given packet. Returns negative + * value when check fails and 0 otherwise. The negative value depends on the + * reason: -ENODATA for bad header, -EBADR for broadcast destination or source, + * and -EREMOTE for non-local (other host) destination. + */ -static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size) +static int batadv_check_unicast_packet(struct batadv_priv *bat_priv, + struct sk_buff *skb, int hdr_size) { struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */ if (unlikely(!pskb_may_pull(skb, hdr_size))) - return -1; + return -ENODATA;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with unicast indication but broadcast recipient */ if (is_broadcast_ether_addr(ethhdr->h_dest)) - return -1; + return -EBADR;
/* packet with broadcast sender address */ if (is_broadcast_ether_addr(ethhdr->h_source)) - return -1; + return -EBADR;
/* not for me */ - if (!batadv_is_my_mac(ethhdr->h_dest)) + if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest)) - return -1; + return -EREMOTE;
return 0; } @@@ -583,7 -593,7 +595,7 @@@ int batadv_recv_tt_query(struct sk_buf char tt_flag; size_t packet_size;
- if (batadv_check_unicast_packet(skb, hdr_size) < 0) + if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0) return NET_RX_DROP;
/* I could need to modify it */ @@@ -615,7 -625,7 +627,7 @@@ case BATADV_TT_RESPONSE: batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
- if (batadv_is_my_mac(tt_query->dst)) { + if (batadv_is_my_mac(bat_priv, tt_query->dst)) { /* packet needs to be linearized to access the TT * changes */ @@@ -658,15 -668,14 +670,15 @@@ int batadv_recv_roam_adv(struct sk_buf struct batadv_roam_adv_packet *roam_adv_packet; struct batadv_orig_node *orig_node;
- if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0) + if (batadv_check_unicast_packet(bat_priv, skb, + sizeof(*roam_adv_packet)) < 0) goto out;
batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
- if (!batadv_is_my_mac(roam_adv_packet->dst)) + if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst)) return batadv_route_unicast_packet(skb, recv_if);
/* check if it is a backbone gateway. we don't accept @@@ -852,15 -861,18 +864,18 @@@ static int batadv_route_unicast_packet( /* decrement ttl */ unicast_packet->header.ttl--;
- /* Update stats counter */ - batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD); - batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES, - skb->len + ETH_HLEN); - - /* route it */ - if (batadv_send_skb_to_orig(skb, orig_node, recv_if)) + /* network code packet if possible */ + if (batadv_nc_skb_forward(skb, neigh_node, ethhdr)) { + ret = NET_RX_SUCCESS; + } else if (batadv_send_skb_to_orig(skb, orig_node, recv_if)) { ret = NET_RX_SUCCESS;
+ /* Update stats counter */ + batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD); + batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES, + skb->len + ETH_HLEN); + } + out: if (neigh_node) batadv_neigh_node_free_ref(neigh_node); @@@ -969,7 -981,7 +984,7 @@@ static int batadv_check_unicast_ttvn(st * last time) the packet had an updated information or not */ curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); - if (!batadv_is_my_mac(unicast_packet->dest)) { + if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) { orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->dest); /* if it is not possible to find the orig_node representing the @@@ -1035,7 -1047,7 +1050,7 @@@ int batadv_recv_unicast_packet(struct s struct batadv_unicast_4addr_packet *unicast_4addr_packet; uint8_t *orig_addr; struct batadv_orig_node *orig_node = NULL; - int hdr_size = sizeof(*unicast_packet); + int check, hdr_size = sizeof(*unicast_packet); bool is4addr;
unicast_packet = (struct batadv_unicast_packet *)skb->data; @@@ -1046,14 -1058,23 +1061,23 @@@ if (is4addr) hdr_size = sizeof(*unicast_4addr_packet);
- if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0) + /* function returns -EREMOTE for promiscuous packets */ - check = batadv_check_unicast_packet(skb, hdr_size); ++ check = batadv_check_unicast_packet(bat_priv, skb, hdr_size); + + /* Even though the packet is not for us, we might save it to use for + * decoding a later received coded packet + */ + if (check == -EREMOTE) + batadv_nc_skb_store_sniffed_unicast(bat_priv, skb); + + if (check < 0) return NET_RX_DROP;
if (!batadv_check_unicast_ttvn(bat_priv, skb)) return NET_RX_DROP;
/* packet for me */ - if (batadv_is_my_mac(unicast_packet->dest)) { + if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { if (is4addr) { batadv_dat_inc_counter(bat_priv, unicast_4addr_packet->subtype); @@@ -1090,7 -1111,7 +1114,7 @@@ int batadv_recv_ucast_frag_packet(struc struct sk_buff *new_skb = NULL; int ret;
- if (batadv_check_unicast_packet(skb, hdr_size) < 0) + if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0) return NET_RX_DROP;
if (!batadv_check_unicast_ttvn(bat_priv, skb)) @@@ -1099,7 -1120,7 +1123,7 @@@ unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
/* packet for me */ - if (batadv_is_my_mac(unicast_packet->dest)) { + if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP) @@@ -1153,13 -1174,13 +1177,13 @@@ int batadv_recv_bcast_packet(struct sk_ goto out;
/* ignore broadcasts sent by myself */ - if (batadv_is_my_mac(ethhdr->h_source)) + if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto out;
bcast_packet = (struct batadv_bcast_packet *)skb->data;
/* ignore broadcasts originated by myself */ - if (batadv_is_my_mac(bcast_packet->orig)) + if (batadv_is_my_mac(bat_priv, bcast_packet->orig)) goto out;
if (bcast_packet->header.ttl < 2) @@@ -1245,14 -1266,14 +1269,14 @@@ int batadv_recv_vis_packet(struct sk_bu ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* not for me */ - if (!batadv_is_my_mac(ethhdr->h_dest)) + if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest)) return NET_RX_DROP;
/* ignore own packets */ - if (batadv_is_my_mac(vis_packet->vis_orig)) + if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig)) return NET_RX_DROP;
- if (batadv_is_my_mac(vis_packet->sender_orig)) + if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig)) return NET_RX_DROP;
switch (vis_packet->vis_type) { diff --combined net/batman-adv/translation-table.c index 7abee19,9322320..5e89dee --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@@ -385,25 -385,19 +385,19 @@@ static void batadv_tt_prepare_packet_bu int *packet_buff_len, int min_packet_len) { - struct batadv_hard_iface *primary_if; int req_len;
- primary_if = batadv_primary_if_get_selected(bat_priv); - req_len = min_packet_len; req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
/* if we have too many changes for one packet don't send any * and wait for the tt table request which will be fragmented */ - if ((!primary_if) || (req_len > primary_if->soft_iface->mtu)) + if (req_len > bat_priv->soft_iface->mtu) req_len = min_packet_len;
batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len, min_packet_len, req_len); - - if (primary_if) - batadv_hardif_free_ref(primary_if); }
static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv, @@@ -908,7 -902,7 +902,7 @@@ out_remove /* remove address from local hash if present */ local_flags = batadv_tt_local_remove(bat_priv, tt_addr, "global tt received", - !!(flags & BATADV_TT_CLIENT_ROAM)); + flags & BATADV_TT_CLIENT_ROAM); tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
if (!(flags & BATADV_TT_CLIENT_ROAM)) @@@ -1580,7 -1574,7 +1574,7 @@@ static int batadv_tt_global_valid(cons static struct sk_buff * batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, struct batadv_hashtable *hash, - struct batadv_hard_iface *primary_if, + struct batadv_priv *bat_priv, int (*valid_cb)(const void *, const void *), void *cb_data) { @@@ -1594,8 -1588,8 +1588,8 @@@ uint32_t i; size_t len;
- if (tt_query_size + tt_len > primary_if->soft_iface->mtu) { - tt_len = primary_if->soft_iface->mtu - tt_query_size; + if (tt_query_size + tt_len > bat_priv->soft_iface->mtu) { + tt_len = bat_priv->soft_iface->mtu - tt_query_size; tt_len -= tt_len % sizeof(struct batadv_tt_change); } tt_tot = tt_len / sizeof(struct batadv_tt_change); @@@ -1715,7 -1709,6 +1709,6 @@@ batadv_send_other_tt_response(struct ba { struct batadv_orig_node *req_dst_orig_node; struct batadv_orig_node *res_dst_orig_node = NULL; - struct batadv_hard_iface *primary_if = NULL; uint8_t orig_ttvn, req_ttvn, ttvn; int ret = false; unsigned char *tt_buff; @@@ -1740,10 -1733,6 +1733,6 @@@ if (!res_dst_orig_node) goto out;
- primary_if = batadv_primary_if_get_selected(bat_priv); - if (!primary_if) - goto out; - orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); req_ttvn = tt_request->ttvn;
@@@ -1791,7 -1780,7 +1780,7 @@@
skb = batadv_tt_response_fill_table(tt_len, ttvn, bat_priv->tt.global_hash, - primary_if, + bat_priv, batadv_tt_global_valid, req_dst_orig_node); if (!skb) @@@ -1828,8 -1817,6 +1817,6 @@@ out batadv_orig_node_free_ref(res_dst_orig_node); if (req_dst_orig_node) batadv_orig_node_free_ref(req_dst_orig_node); - if (primary_if) - batadv_hardif_free_ref(primary_if); if (!ret) kfree_skb(skb); return ret; @@@ -1907,7 -1894,7 +1894,7 @@@ batadv_send_my_tt_response(struct batad
skb = batadv_tt_response_fill_table(tt_len, ttvn, bat_priv->tt.local_hash, - primary_if, + bat_priv, batadv_tt_local_valid_entry, NULL); if (!skb) @@@ -1953,7 -1940,7 +1940,7 @@@ out bool batadv_send_tt_response(struct batadv_priv *bat_priv, struct batadv_tt_query_packet *tt_request) { - if (batadv_is_my_mac(tt_request->dst)) { + if (batadv_is_my_mac(bat_priv, tt_request->dst)) { /* don't answer backbone gws! */ if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src)) return true; @@@ -2528,7 -2515,7 +2515,7 @@@ bool batadv_tt_global_client_is_roaming if (!tt_global_entry) goto out;
- ret = !!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM); + ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM; batadv_tt_global_entry_free_ref(tt_global_entry); out: return ret; diff --combined net/batman-adv/vis.c index 6a1e646,962ccf3..1625e57 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c @@@ -149,7 -149,7 +149,7 @@@ static void batadv_vis_data_read_prim_s
hlist_for_each_entry(entry, if_list, list) { if (entry->primary) - seq_printf(seq, "PRIMARY, "); + seq_puts(seq, "PRIMARY, "); else seq_printf(seq, "SEC %pM, ", entry->addr); } @@@ -207,7 -207,7 +207,7 @@@ static void batadv_vis_data_read_entrie if (batadv_compare_eth(entry->addr, packet->vis_orig)) batadv_vis_data_read_prim_sec(seq, list);
- seq_printf(seq, "\n"); + seq_puts(seq, "\n"); } }
@@@ -477,7 -477,7 +477,7 @@@ void batadv_receive_client_update_packe
/* Are we the target for this VIS packet? */ if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && - batadv_is_my_mac(vis_packet->target_orig)) + batadv_is_my_mac(bat_priv, vis_packet->target_orig)) are_target = 1;
spin_lock_bh(&bat_priv->vis.hash_lock); @@@ -496,7 -496,7 +496,7 @@@ batadv_send_list_add(bat_priv, info);
/* ... we're not the recipient (and thus need to forward). */ - } else if (!batadv_is_my_mac(packet->target_orig)) { + } else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) { batadv_send_list_add(bat_priv, info); }
diff --combined net/bluetooth/af_bluetooth.c index d1b3d15,e5338f7..9096137 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@@ -92,23 -92,14 +92,14 @@@ int bt_sock_register(int proto, const s } EXPORT_SYMBOL(bt_sock_register);
- int bt_sock_unregister(int proto) + void bt_sock_unregister(int proto) { - int err = 0; - if (proto < 0 || proto >= BT_MAX_PROTO) - return -EINVAL; + return;
write_lock(&bt_proto_lock); - - if (!bt_proto[proto]) - err = -ENOENT; - else - bt_proto[proto] = NULL; - + bt_proto[proto] = NULL; write_unlock(&bt_proto_lock); - - return err; } EXPORT_SYMBOL(bt_sock_unregister);
@@@ -422,7 -413,8 +413,8 @@@ unsigned int bt_sock_poll(struct file * return bt_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) - mask |= POLLERR; + mask |= POLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; @@@ -617,7 -609,7 +609,7 @@@ static int bt_seq_open(struct inode *in struct bt_sock_list *sk_list; struct bt_seq_state *s;
- sk_list = PDE(inode)->data; + sk_list = PDE_DATA(inode); s = __seq_open_private(file, &bt_seq_ops, sizeof(struct bt_seq_state)); if (!s) @@@ -627,21 -619,26 +619,21 @@@ return 0; }
-int bt_procfs_init(struct module* module, struct net *net, const char *name, +static const struct file_operations bt_fops = { + .open = bt_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private +}; + +int bt_procfs_init(struct net *net, const char *name, struct bt_sock_list* sk_list, int (* seq_show)(struct seq_file *, void *)) { - struct proc_dir_entry * pde; - sk_list->custom_seq_show = seq_show;
- sk_list->fops.owner = module; - sk_list->fops.open = bt_seq_open; - sk_list->fops.read = seq_read; - sk_list->fops.llseek = seq_lseek; - sk_list->fops.release = seq_release_private; - - pde = proc_create(name, 0, net->proc_net, &sk_list->fops); - if (!pde) + if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list)) return -ENOMEM; - - pde->data = sk_list; - return 0; }
@@@ -650,7 -647,7 +642,7 @@@ void bt_procfs_cleanup(struct net *net remove_proc_entry(name, net->proc_net); } #else -int bt_procfs_init(struct module* module, struct net *net, const char *name, +int bt_procfs_init(struct net *net, const char *name, struct bt_sock_list* sk_list, int (* seq_show)(struct seq_file *, void *)) { diff --combined net/bluetooth/bnep/sock.c index d4686fb,5b1c04e..5f05129 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c @@@ -234,7 -234,7 +234,7 @@@ int __init bnep_sock_init(void goto error; }
- err = bt_procfs_init(THIS_MODULE, &init_net, "bnep", &bnep_sk_list, NULL); + err = bt_procfs_init(&init_net, "bnep", &bnep_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create BNEP proc file"); bt_sock_unregister(BTPROTO_BNEP); @@@ -253,8 -253,6 +253,6 @@@ error void __exit bnep_sock_cleanup(void) { bt_procfs_cleanup(&init_net, "bnep"); - if (bt_sock_unregister(BTPROTO_BNEP) < 0) - BT_ERR("Can't unregister BNEP socket"); - + bt_sock_unregister(BTPROTO_BNEP); proto_unregister(&bnep_proto); } diff --combined net/bluetooth/cmtp/sock.c index 03f26bf,58d9ede..d82787d --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c @@@ -245,7 -245,7 +245,7 @@@ int cmtp_init_sockets(void goto error; }
- err = bt_procfs_init(THIS_MODULE, &init_net, "cmtp", &cmtp_sk_list, NULL); + err = bt_procfs_init(&init_net, "cmtp", &cmtp_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create CMTP proc file"); bt_sock_unregister(BTPROTO_HIDP); @@@ -264,8 -264,6 +264,6 @@@ error void cmtp_cleanup_sockets(void) { bt_procfs_cleanup(&init_net, "cmtp"); - if (bt_sock_unregister(BTPROTO_CMTP) < 0) - BT_ERR("Can't unregister CMTP socket"); - + bt_sock_unregister(BTPROTO_CMTP); proto_unregister(&cmtp_proto); } diff --combined net/bluetooth/hci_sock.c index 6ad2395,aa4354f..9bd7d95 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@@ -854,6 -854,11 +854,11 @@@ static int hci_sock_sendmsg(struct kioc skb_queue_tail(&hdev->raw_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } else { + /* Stand-alone HCI commands must be flaged as + * single-command requests. + */ + bt_cb(skb)->req.start = true; + skb_queue_tail(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); } @@@ -1102,7 -1107,7 +1107,7 @@@ int __init hci_sock_init(void goto error; }
- err = bt_procfs_init(THIS_MODULE, &init_net, "hci", &hci_sk_list, NULL); + err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create HCI proc file"); bt_sock_unregister(BTPROTO_HCI); @@@ -1121,8 -1126,6 +1126,6 @@@ error void hci_sock_cleanup(void) { bt_procfs_cleanup(&init_net, "hci"); - if (bt_sock_unregister(BTPROTO_HCI) < 0) - BT_ERR("HCI socket unregistration failed"); - + bt_sock_unregister(BTPROTO_HCI); proto_unregister(&hci_sk_proto); } diff --combined net/bluetooth/hidp/sock.c index e7e04d4,5d0f1ca..c7df470 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c @@@ -284,7 -284,7 +284,7 @@@ int __init hidp_init_sockets(void goto error; }
- err = bt_procfs_init(THIS_MODULE, &init_net, "hidp", &hidp_sk_list, NULL); + err = bt_procfs_init(&init_net, "hidp", &hidp_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create HIDP proc file"); bt_sock_unregister(BTPROTO_HIDP); @@@ -304,8 -304,6 +304,6 @@@ error void __exit hidp_cleanup_sockets(void) { bt_procfs_cleanup(&init_net, "hidp"); - if (bt_sock_unregister(BTPROTO_HIDP) < 0) - BT_ERR("Can't unregister HIDP socket"); - + bt_sock_unregister(BTPROTO_HIDP); proto_unregister(&hidp_proto); } diff --combined net/bluetooth/l2cap_sock.c index fe15960,7f97049..e454b7b --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@@ -1292,7 -1292,7 +1292,7 @@@ int __init l2cap_init_sockets(void goto error; }
- err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list, + err = bt_procfs_init(&init_net, "l2cap", &l2cap_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create L2CAP proc file"); @@@ -1312,8 -1312,6 +1312,6 @@@ error void l2cap_cleanup_sockets(void) { bt_procfs_cleanup(&init_net, "l2cap"); - if (bt_sock_unregister(BTPROTO_L2CAP) < 0) - BT_ERR("L2CAP socket unregistration failed"); - + bt_sock_unregister(BTPROTO_L2CAP); proto_unregister(&l2cap_proto); } diff --combined net/bluetooth/rfcomm/sock.c index 4b6eeaf,a8638b5..30b3721 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@@ -1037,7 -1037,7 +1037,7 @@@ int __init rfcomm_init_sockets(void goto error; }
- err = bt_procfs_init(THIS_MODULE, &init_net, "rfcomm", &rfcomm_sk_list, NULL); + err = bt_procfs_init(&init_net, "rfcomm", &rfcomm_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create RFCOMM proc file"); bt_sock_unregister(BTPROTO_RFCOMM); @@@ -1066,8 -1066,7 +1066,7 @@@ void __exit rfcomm_cleanup_sockets(void
debugfs_remove(rfcomm_sock_debugfs);
- if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) - BT_ERR("RFCOMM socket layer unregistration failed"); + bt_sock_unregister(BTPROTO_RFCOMM);
proto_unregister(&rfcomm_proto); } diff --combined net/bluetooth/sco.c index 37bc712,2c80553..5434518 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@@ -1084,7 -1084,7 +1084,7 @@@ int __init sco_init(void goto error; }
- err = bt_procfs_init(THIS_MODULE, &init_net, "sco", &sco_sk_list, NULL); + err = bt_procfs_init(&init_net, "sco", &sco_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create SCO proc file"); bt_sock_unregister(BTPROTO_SCO); @@@ -1113,8 -1113,7 +1113,7 @@@ void __exit sco_exit(void
debugfs_remove(sco_debugfs);
- if (bt_sock_unregister(BTPROTO_SCO) < 0) - BT_ERR("SCO socket unregistration failed"); + bt_sock_unregister(BTPROTO_SCO);
proto_unregister(&sco_proto); } diff --combined net/bridge/br_if.c index 459dab2,f17fcb3..4cdba60 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@@ -67,8 -67,7 +67,8 @@@ void br_port_carrier_check(struct net_b struct net_device *dev = p->dev; struct net_bridge *br = p->br;
- if (netif_running(dev) && netif_oper_up(dev)) + if (!(p->flags & BR_ADMIN_COST) && + netif_running(dev) && netif_oper_up(dev)) p->path_cost = port_cost(dev);
if (!netif_running(br->dev)) @@@ -149,7 -148,6 +149,6 @@@ static void del_nbp(struct net_bridge_p dev->priv_flags &= ~IFF_BRIDGE_PORT;
netdev_rx_handler_unregister(dev); - synchronize_net();
netdev_upper_dev_unlink(dev, br->dev);
diff --combined net/can/gw.c index 117814a,2dc619d..3ee690e --- a/net/can/gw.c +++ b/net/can/gw.c @@@ -466,7 -466,7 +466,7 @@@ static int cgw_notifier(struct notifier if (gwj->src.dev == dev || gwj->dst.dev == dev) { hlist_del(&gwj->list); cgw_unregister_filter(gwj); - kfree(gwj); + kmem_cache_free(cgw_cache, gwj); } } } @@@ -778,8 -778,7 +778,7 @@@ static int cgw_parse_attr(struct nlmsgh return 0; }
- static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, - void *arg) + static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) { struct rtcanmsg *r; struct cgw_job *gwj; @@@ -864,11 -863,11 +863,11 @@@ static void cgw_remove_all_jobs(void hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { hlist_del(&gwj->list); cgw_unregister_filter(gwj); - kfree(gwj); + kmem_cache_free(cgw_cache, gwj); } }
- static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) { struct cgw_job *gwj = NULL; struct hlist_node *nx; @@@ -920,7 -919,7 +919,7 @@@
hlist_del(&gwj->list); cgw_unregister_filter(gwj); - kfree(gwj); + kmem_cache_free(cgw_cache, gwj); err = 0; break; } diff --combined net/core/dev.c index b24ab0e9,3655ff9..f57acaf --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -2148,9 -2148,6 +2148,9 @@@ static void skb_warn_bad_offload(const struct net_device *dev = skb->dev; const char *driver = "";
+ if (!net_ratelimit()) + return; + if (dev && dev->dev.parent) driver = dev_driver_string(dev->dev.parent);
@@@ -2210,16 -2207,8 +2210,8 @@@ out } EXPORT_SYMBOL(skb_checksum_help);
- /** - * skb_mac_gso_segment - mac layer segmentation handler. - * @skb: buffer to segment - * @features: features for the output path (see dev->features) - */ - struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, - netdev_features_t features) + __be16 skb_network_protocol(struct sk_buff *skb) { - struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); - struct packet_offload *ptype; __be16 type = skb->protocol; int vlan_depth = ETH_HLEN;
@@@ -2227,13 -2216,31 +2219,31 @@@ struct vlan_hdr *vh;
if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) - return ERR_PTR(-EINVAL); + return 0;
vh = (struct vlan_hdr *)(skb->data + vlan_depth); type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; }
+ return type; + } + + /** + * skb_mac_gso_segment - mac layer segmentation handler. + * @skb: buffer to segment + * @features: features for the output path (see dev->features) + */ + struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, + netdev_features_t features) + { + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); + struct packet_offload *ptype; + __be16 type = skb_network_protocol(skb); + + if (unlikely(!type)) + return ERR_PTR(-EINVAL); + __skb_pull(skb, skb->mac_len);
rcu_read_lock(); @@@ -2400,24 -2407,12 +2410,12 @@@ static int dev_gso_segment(struct sk_bu return 0; }
- static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) - { - return ((features & NETIF_F_GEN_CSUM) || - ((features & NETIF_F_V4_CSUM) && - protocol == htons(ETH_P_IP)) || - ((features & NETIF_F_V6_CSUM) && - protocol == htons(ETH_P_IPV6)) || - ((features & NETIF_F_FCOE_CRC) && - protocol == htons(ETH_P_FCOE))); - } - static netdev_features_t harmonize_features(struct sk_buff *skb, __be16 protocol, netdev_features_t features) { if (skb->ip_summed != CHECKSUM_NONE && !can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; - features &= ~NETIF_F_SG; } else if (illegal_highdma(skb->dev, skb)) { features &= ~NETIF_F_SG; } @@@ -2592,6 -2587,7 +2590,7 @@@ static void qdisc_pkt_len_init(struct s */ if (shinfo->gso_size) { unsigned int hdr_len; + u16 gso_segs = shinfo->gso_segs;
/* mac layer + network layer */ hdr_len = skb_transport_header(skb) - skb_mac_header(skb); @@@ -2601,7 -2597,12 +2600,12 @@@ hdr_len += tcp_hdrlen(skb); else hdr_len += sizeof(struct udphdr); - qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len; + + if (shinfo->gso_type & SKB_GSO_DODGY) + gso_segs = DIV_ROUND_UP(skb->len - hdr_len, + shinfo->gso_size); + + qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; } }
@@@ -3329,7 -3330,7 +3333,7 @@@ EXPORT_SYMBOL_GPL(netdev_rx_handler_reg * netdev_rx_handler_unregister - unregister receive handler * @dev: device to unregister a handler from * - * Unregister a receive hander from a device. + * Unregister a receive handler from a device. * * The caller must hold the rtnl_mutex. */ @@@ -4066,6 -4067,9 +4070,9 @@@ void netif_napi_add(struct net_device * napi->gro_list = NULL; napi->skb = NULL; napi->poll = poll; + if (weight > NAPI_POLL_WEIGHT) + pr_err_once("netif_napi_add() called with weight %d on device %s\n", + weight, dev->name); napi->weight = weight; list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; @@@ -4927,20 -4931,25 +4934,25 @@@ static netdev_features_t netdev_fix_fea features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); }
- /* Fix illegal SG+CSUM combinations. */ - if ((features & NETIF_F_SG) && - !(features & NETIF_F_ALL_CSUM)) { - netdev_dbg(dev, - "Dropping NETIF_F_SG since no checksum feature.\n"); - features &= ~NETIF_F_SG; - } - /* TSO requires that SG is present as well. */ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); features &= ~NETIF_F_ALL_TSO; }
+ if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && + !(features & NETIF_F_IP_CSUM)) { + netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); + features &= ~NETIF_F_TSO; + features &= ~NETIF_F_TSO_ECN; + } + + if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && + !(features & NETIF_F_IPV6_CSUM)) { + netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); + features &= ~NETIF_F_TSO6; + } + /* TSO ECN requires that TSO is present as well. */ if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) features &= ~NETIF_F_TSO_ECN; @@@ -5208,6 -5217,10 +5220,10 @@@ int register_netdevice(struct net_devic */ dev->vlan_features |= NETIF_F_HIGHDMA;
+ /* Make NETIF_F_SG inheritable to tunnel devices. + */ + dev->hw_enc_features |= NETIF_F_SG; + ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); ret = notifier_to_errno(ret); if (ret) diff --combined net/core/neighbour.c index 537301a,89a3a07..5c56b21 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@@ -39,21 -39,13 +39,13 @@@ #include <linux/string.h> #include <linux/log2.h>
+ #define DEBUG #define NEIGH_DEBUG 1 - - #define NEIGH_PRINTK(x...) printk(x) - #define NEIGH_NOPRINTK(x...) do { ; } while(0) - #define NEIGH_PRINTK1 NEIGH_NOPRINTK - #define NEIGH_PRINTK2 NEIGH_NOPRINTK - - #if NEIGH_DEBUG >= 1 - #undef NEIGH_PRINTK1 - #define NEIGH_PRINTK1 NEIGH_PRINTK - #endif - #if NEIGH_DEBUG >= 2 - #undef NEIGH_PRINTK2 - #define NEIGH_PRINTK2 NEIGH_PRINTK - #endif + #define neigh_dbg(level, fmt, ...) \ + do { \ + if (level <= NEIGH_DEBUG) \ + pr_debug(fmt, ##__VA_ARGS__); \ + } while (0)
#define PNEIGH_HASHMASK 0xF
@@@ -246,7 -238,7 +238,7 @@@ static void neigh_flush_dev(struct neig n->nud_state = NUD_NOARP; else n->nud_state = NUD_NONE; - NEIGH_PRINTK2("neigh %p is stray.\n", n); + neigh_dbg(2, "neigh %p is stray\n", n); } write_unlock(&n->lock); neigh_cleanup_and_release(n); @@@ -542,7 -534,7 +534,7 @@@ struct neighbour *__neigh_create(struc lockdep_is_held(&tbl->lock))); rcu_assign_pointer(nht->hash_buckets[hash_val], n); write_unlock_bh(&tbl->lock); - NEIGH_PRINTK2("neigh %p is created.\n", n); + neigh_dbg(2, "neigh %p is created\n", n); rc = n; out: return rc; @@@ -725,7 -717,7 +717,7 @@@ void neigh_destroy(struct neighbour *ne dev_put(dev); neigh_parms_put(neigh->parms);
- NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh); + neigh_dbg(2, "neigh %p is destroyed\n", neigh);
atomic_dec(&neigh->tbl->entries); kfree_rcu(neigh, rcu); @@@ -739,7 -731,7 +731,7 @@@ EXPORT_SYMBOL(neigh_destroy) */ static void neigh_suspect(struct neighbour *neigh) { - NEIGH_PRINTK2("neigh %p is suspected.\n", neigh); + neigh_dbg(2, "neigh %p is suspected\n", neigh);
neigh->output = neigh->ops->output; } @@@ -751,7 -743,7 +743,7 @@@ */ static void neigh_connect(struct neighbour *neigh) { - NEIGH_PRINTK2("neigh %p is connected.\n", neigh); + neigh_dbg(2, "neigh %p is connected\n", neigh);
neigh->output = neigh->ops->connected_output; } @@@ -852,7 -844,7 +844,7 @@@ static void neigh_invalidate(struct nei struct sk_buff *skb;
NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); - NEIGH_PRINTK2("neigh %p is failed.\n", neigh); + neigh_dbg(2, "neigh %p is failed\n", neigh); neigh->updated = jiffies;
/* It is very thin place. report_unreachable is very complicated @@@ -904,17 -896,17 +896,17 @@@ static void neigh_timer_handler(unsigne if (state & NUD_REACHABLE) { if (time_before_eq(now, neigh->confirmed + neigh->parms->reachable_time)) { - NEIGH_PRINTK2("neigh %p is still alive.\n", neigh); + neigh_dbg(2, "neigh %p is still alive\n", neigh); next = neigh->confirmed + neigh->parms->reachable_time; } else if (time_before_eq(now, neigh->used + neigh->parms->delay_probe_time)) { - NEIGH_PRINTK2("neigh %p is delayed.\n", neigh); + neigh_dbg(2, "neigh %p is delayed\n", neigh); neigh->nud_state = NUD_DELAY; neigh->updated = jiffies; neigh_suspect(neigh); next = now + neigh->parms->delay_probe_time; } else { - NEIGH_PRINTK2("neigh %p is suspected.\n", neigh); + neigh_dbg(2, "neigh %p is suspected\n", neigh); neigh->nud_state = NUD_STALE; neigh->updated = jiffies; neigh_suspect(neigh); @@@ -923,14 -915,14 +915,14 @@@ } else if (state & NUD_DELAY) { if (time_before_eq(now, neigh->confirmed + neigh->parms->delay_probe_time)) { - NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh); + neigh_dbg(2, "neigh %p is now reachable\n", neigh); neigh->nud_state = NUD_REACHABLE; neigh->updated = jiffies; neigh_connect(neigh); notify = 1; next = neigh->confirmed + neigh->parms->reachable_time; } else { - NEIGH_PRINTK2("neigh %p is probed.\n", neigh); + neigh_dbg(2, "neigh %p is probed\n", neigh); neigh->nud_state = NUD_PROBE; neigh->updated = jiffies; atomic_set(&neigh->probes, 0); @@@ -997,7 -989,7 +989,7 @@@ int __neigh_event_send(struct neighbou return 1; } } else if (neigh->nud_state & NUD_STALE) { - NEIGH_PRINTK2("neigh %p is delayed.\n", neigh); + neigh_dbg(2, "neigh %p is delayed\n", neigh); neigh->nud_state = NUD_DELAY; neigh->updated = jiffies; neigh_add_timer(neigh, @@@ -1320,8 -1312,7 +1312,7 @@@ int neigh_resolve_output(struct neighbo out: return rc; discard: - NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", - dst, neigh); + neigh_dbg(1, "%s: dst=%p neigh=%p\n", __func__, dst, neigh); out_kfree_skb: rc = -EINVAL; kfree_skb(skb); @@@ -1498,7 -1489,7 +1489,7 @@@ void neigh_parms_release(struct neigh_t } } write_unlock_bh(&tbl->lock); - NEIGH_PRINTK1("neigh_parms_release: not found\n"); + neigh_dbg(1, "%s: not found\n", __func__); } EXPORT_SYMBOL(neigh_parms_release);
@@@ -1613,7 -1604,7 +1604,7 @@@ int neigh_table_clear(struct neigh_tabl } EXPORT_SYMBOL(neigh_table_clear);
- static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ndmsg *ndm; @@@ -1677,7 -1668,7 +1668,7 @@@ out return err; }
- static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ndmsg *ndm; @@@ -1955,7 -1946,7 +1946,7 @@@ static const struct nla_policy nl_ntbl_ [NDTPA_LOCKTIME] = { .type = NLA_U64 }, };
- static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct neigh_table *tbl; @@@ -2714,7 -2705,7 +2705,7 @@@ static int neigh_stat_seq_open(struct i
if (!ret) { struct seq_file *sf = file->private_data; - sf->private = PDE(inode)->data; + sf->private = PDE_DATA(inode); } return ret; }; diff --combined net/core/rtnetlink.c index 23854b5,589d0ab..18af08a --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@@ -517,32 -517,6 +517,6 @@@ out return err; }
- static const int rtm_min[RTM_NR_FAMILIES] = - { - [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)), - [RTM_FAM(RTM_NEWADDR)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg)), - [RTM_FAM(RTM_NEWROUTE)] = NLMSG_LENGTH(sizeof(struct rtmsg)), - [RTM_FAM(RTM_NEWRULE)] = NLMSG_LENGTH(sizeof(struct fib_rule_hdr)), - [RTM_FAM(RTM_NEWQDISC)] = NLMSG_LENGTH(sizeof(struct tcmsg)), - [RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)), - [RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)), - [RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)), - [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), - [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), - }; - - static const int rta_max[RTM_NR_FAMILIES] = - { - [RTM_FAM(RTM_NEWLINK)] = IFLA_MAX, - [RTM_FAM(RTM_NEWADDR)] = IFA_MAX, - [RTM_FAM(RTM_NEWROUTE)] = RTA_MAX, - [RTM_FAM(RTM_NEWRULE)] = FRA_MAX, - [RTM_FAM(RTM_NEWQDISC)] = TCA_MAX, - [RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX, - [RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX, - [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX, - }; - int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) { struct sock *rtnl = net->rtnl; @@@ -1072,7 -1046,7 +1046,7 @@@ static int rtnl_dump_ifinfo(struct sk_b rcu_read_lock(); cb->seq = net->dev_base_seq;
- if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, + if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK]) @@@ -1539,7 -1513,7 +1513,7 @@@ errout return err; }
- static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; @@@ -1580,7 -1554,7 +1554,7 @@@ errout return err; }
- static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); const struct rtnl_link_ops *ops; @@@ -1711,7 -1685,7 +1685,7 @@@ static int rtnl_group_changelink(struc return 0; }
- static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); const struct rtnl_link_ops *ops; @@@ -1866,7 -1840,7 +1840,7 @@@ out } }
- static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) + static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh) { struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; @@@ -1922,7 -1896,7 +1896,7 @@@ static u16 rtnl_calcit(struct sk_buff * u32 ext_filter_mask = 0; u16 min_ifinfo_dump_size = 0;
- if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, + if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, ifla_policy) >= 0) { if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); @@@ -1957,8 -1931,11 +1931,11 @@@ static int rtnl_dump_all(struct sk_buf if (rtnl_msg_handlers[idx] == NULL || rtnl_msg_handlers[idx][type].dumpit == NULL) continue; - if (idx > s_idx) + if (idx > s_idx) { memset(&cb->args[0], 0, sizeof(cb->args)); + cb->prev_seq = 0; + cb->seq = 0; + } if (rtnl_msg_handlers[idx][type].dumpit(skb, cb)) break; } @@@ -2051,7 -2028,39 +2028,39 @@@ errout rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); }
- static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + /** + * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry + */ + int ndo_dflt_fdb_add(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 flags) + { + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; + } + EXPORT_SYMBOL(ndo_dflt_fdb_add); + + static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ndmsg *ndm; @@@ -2082,7 -2091,7 +2091,7 @@@ }
addr = nla_data(tb[NDA_LLADDR]); - if (!is_valid_ether_addr(addr)) { + if (is_zero_ether_addr(addr)) { pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n"); return -EINVAL; } @@@ -2103,10 -2112,13 +2112,13 @@@ }
/* Embedded bridge, macvlan, and any other device support */ - if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) { - err = dev->netdev_ops->ndo_fdb_add(ndm, tb, - dev, addr, - nlh->nlmsg_flags); + if ((ndm->ndm_flags & NTF_SELF)) { + if (dev->netdev_ops->ndo_fdb_add) + err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, + nlh->nlmsg_flags); + else + err = ndo_dflt_fdb_add(ndm, tb, dev, addr, + nlh->nlmsg_flags);
if (!err) { rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH); @@@ -2117,7 -2129,36 +2129,36 @@@ out return err; }
- static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + /** + * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry + */ + int ndo_dflt_fdb_del(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr) + { + int err = -EOPNOTSUPP; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state & NUD_PERMANENT) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return -EINVAL; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + else + err = -EINVAL; + + return err; + } + EXPORT_SYMBOL(ndo_dflt_fdb_del); + + static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ndmsg *ndm; @@@ -2174,8 -2215,11 +2215,11 @@@ }
/* Embedded bridge, macvlan, and any other device support */ - if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) { - err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr); + if (ndm->ndm_flags & NTF_SELF) { + if (dev->netdev_ops->ndo_fdb_del) + err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr); + else + err = ndo_dflt_fdb_del(ndm, tb, dev, addr);
if (!err) { rtnl_fdb_notify(dev, addr, RTM_DELNEIGH); @@@ -2220,7 -2264,7 +2264,7 @@@ skip * @dev: netdevice * * Default netdevice operation to dump the existing unicast address list. - * Returns zero on success. + * Returns number of addresses from list put in skb. */ int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, @@@ -2260,6 -2304,8 +2304,8 @@@ static int rtnl_fdb_dump(struct sk_buf
if (dev->netdev_ops->ndo_fdb_dump) idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx); + else + idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); } rcu_read_unlock();
@@@ -2411,8 -2457,7 +2457,7 @@@ errout return err; }
- static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, - void *arg) + static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; @@@ -2482,8 -2527,7 +2527,7 @@@ out return err; }
- static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, - void *arg) + static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; @@@ -2553,10 -2597,6 +2597,6 @@@ out return err; }
- /* Protected by RTNL sempahore. */ - static struct rtattr **rta_buf; - static int rtattr_max; - /* Process one rtnetlink message. */
static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) @@@ -2564,7 -2604,6 +2604,6 @@@ struct net *net = sock_net(skb->sk); rtnl_doit_func doit; int sz_idx, kind; - int min_len; int family; int type; int err; @@@ -2576,10 -2615,10 +2615,10 @@@ type -= RTM_BASE;
/* All the messages must have at least 1 byte length */ - if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg))) + if (nlmsg_len(nlh) < sizeof(struct rtgenmsg)) return 0;
- family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family; + family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; sz_idx = type>>2; kind = type&3;
@@@ -2612,32 -2651,11 +2651,11 @@@ return err; }
- memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *))); - - min_len = rtm_min[sz_idx]; - if (nlh->nlmsg_len < min_len) - return -EINVAL; - - if (nlh->nlmsg_len > min_len) { - int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); - struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); - - while (RTA_OK(attr, attrlen)) { - unsigned int flavor = attr->rta_type & NLA_TYPE_MASK; - if (flavor) { - if (flavor > rta_max[sz_idx]) - return -EINVAL; - rta_buf[flavor-1] = attr; - } - attr = RTA_NEXT(attr, attrlen); - } - } - doit = rtnl_get_doit(family, type); if (doit == NULL) return -EOPNOTSUPP;
- return doit(skb, nlh, (void *)&rta_buf[0]); + return doit(skb, nlh); }
static void rtnetlink_rcv(struct sk_buff *skb) @@@ -2707,16 -2725,6 +2725,6 @@@ static struct pernet_operations rtnetli
void __init rtnetlink_init(void) { - int i; - - rtattr_max = 0; - for (i = 0; i < ARRAY_SIZE(rta_max); i++) - if (rta_max[i] > rtattr_max) - rtattr_max = rta_max[i]; - rta_buf = kmalloc(rtattr_max * sizeof(struct rtattr *), GFP_KERNEL); - if (!rta_buf) - panic("rtnetlink_init: cannot allocate rta_buf\n"); - if (register_pernet_subsys(&rtnetlink_net_ops)) panic("rtnetlink_init: cannot initialize rtnetlink\n");
diff --combined net/ipv4/devinet.c index c6287cd,2759dfd..dfc39d4 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@@ -536,7 -536,7 +536,7 @@@ struct in_ifaddr *inet_ifa_byprefix(str return NULL; }
- static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; @@@ -587,16 -587,13 +587,16 @@@ static void check_lifetime(struct work_ { unsigned long now, next, next_sec, next_sched; struct in_ifaddr *ifa; + struct hlist_node *n; int i;
now = jiffies; next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
- rcu_read_lock(); for (i = 0; i < IN4_ADDR_HSIZE; i++) { + bool change_needed = false; + + rcu_read_lock(); hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { unsigned long age;
@@@ -609,7 -606,16 +609,7 @@@
if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && age >= ifa->ifa_valid_lft) { - struct in_ifaddr **ifap ; - - rtnl_lock(); - for (ifap = &ifa->ifa_dev->ifa_list; - *ifap != NULL; ifap = &ifa->ifa_next) { - if (*ifap == ifa) - inet_del_ifa(ifa->ifa_dev, - ifap, 1); - } - rtnl_unlock(); + change_needed = true; } else if (ifa->ifa_preferred_lft == INFINITY_LIFE_TIME) { continue; @@@ -619,8 -625,10 +619,8 @@@ next = ifa->ifa_tstamp + ifa->ifa_valid_lft * HZ;
- if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) { - ifa->ifa_flags |= IFA_F_DEPRECATED; - rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); - } + if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) + change_needed = true; } else if (time_before(ifa->ifa_tstamp + ifa->ifa_preferred_lft * HZ, next)) { @@@ -628,42 -636,8 +628,42 @@@ ifa->ifa_preferred_lft * HZ; } } + rcu_read_unlock(); + if (!change_needed) + continue; + rtnl_lock(); + hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) { + unsigned long age; + + if (ifa->ifa_flags & IFA_F_PERMANENT) + continue; + + /* We try to batch several events at once. */ + age = (now - ifa->ifa_tstamp + + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; + + if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && + age >= ifa->ifa_valid_lft) { + struct in_ifaddr **ifap; + + for (ifap = &ifa->ifa_dev->ifa_list; + *ifap != NULL; ifap = &(*ifap)->ifa_next) { + if (*ifap == ifa) { + inet_del_ifa(ifa->ifa_dev, + ifap, 1); + break; + } + } + } else if (ifa->ifa_preferred_lft != + INFINITY_LIFE_TIME && + age >= ifa->ifa_preferred_lft && + !(ifa->ifa_flags & IFA_F_DEPRECATED)) { + ifa->ifa_flags |= IFA_F_DEPRECATED; + rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); + } + } + rtnl_unlock(); } - rcu_read_unlock();
next_sec = round_jiffies_up(next); next_sched = next; @@@ -801,7 -775,7 +801,7 @@@ static struct in_ifaddr *find_matching_ return NULL; }
- static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct in_ifaddr *ifa; @@@ -830,8 -804,6 +830,8 @@@ return -EEXIST; ifa = ifa_existing; set_ifa_lifetime(ifa, valid_lft, prefered_lft); + cancel_delayed_work(&check_lifetime_work); + schedule_delayed_work(&check_lifetime_work, 0); rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); } @@@ -1529,6 -1501,8 +1529,8 @@@ static int inet_dump_ifaddr(struct sk_b idx = 0; head = &net->dev_index_head[h]; rcu_read_lock(); + cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^ + net->dev_base_seq; hlist_for_each_entry_rcu(dev, head, index_hlist) { if (idx < s_idx) goto cont; @@@ -1549,6 -1523,7 +1551,7 @@@ rcu_read_unlock(); goto done; } + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); } cont: idx++; @@@ -1760,8 -1735,7 +1763,7 @@@ static const struct nla_policy devconf_ };
static int inet_netconf_get_devconf(struct sk_buff *in_skb, - struct nlmsghdr *nlh, - void *arg) + struct nlmsghdr *nlh) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[NETCONFA_MAX+1]; @@@ -1821,6 -1795,77 +1823,77 @@@ errout return err; }
+ static int inet_netconf_dump_devconf(struct sk_buff *skb, + struct netlink_callback *cb) + { + struct net *net = sock_net(skb->sk); + int h, s_h; + int idx, s_idx; + struct net_device *dev; + struct in_device *in_dev; + struct hlist_head *head; + + s_h = cb->args[0]; + s_idx = idx = cb->args[1]; + + for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { + idx = 0; + head = &net->dev_index_head[h]; + rcu_read_lock(); + cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^ + net->dev_base_seq; + hlist_for_each_entry_rcu(dev, head, index_hlist) { + if (idx < s_idx) + goto cont; + in_dev = __in_dev_get_rcu(dev); + if (!in_dev) + goto cont; + + if (inet_netconf_fill_devconf(skb, dev->ifindex, + &in_dev->cnf, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, + NLM_F_MULTI, + -1) <= 0) { + rcu_read_unlock(); + goto done; + } + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); + cont: + idx++; + } + rcu_read_unlock(); + } + if (h == NETDEV_HASHENTRIES) { + if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL, + net->ipv4.devconf_all, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, NLM_F_MULTI, + -1) <= 0) + goto done; + else + h++; + } + if (h == NETDEV_HASHENTRIES + 1) { + if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT, + net->ipv4.devconf_dflt, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, NLM_F_MULTI, + -1) <= 0) + goto done; + else + h++; + } + done: + cb->args[0] = h; + cb->args[1] = idx; + + return skb->len; + } + #ifdef CONFIG_SYSCTL
static void devinet_copy_dflt_conf(struct net *net, int i) @@@ -2225,6 -2270,6 +2298,6 @@@ void __init devinet_init(void rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL); rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL); rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf, - NULL, NULL); + inet_netconf_dump_devconf, NULL); }
diff --combined net/ipv4/ip_fragment.c index 52c273e,9385206..b66910a --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@@ -79,40 -79,11 +79,11 @@@ struct ipq struct inet_peer *peer; };
- /* RFC 3168 support : - * We want to check ECN values of all fragments, do detect invalid combinations. - * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. - */ - #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ - #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ - #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ - #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ - static inline u8 ip4_frag_ecn(u8 tos) { return 1 << (tos & INET_ECN_MASK); }
- /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements - * Value : 0xff if frame should be dropped. - * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field - */ - static const u8 ip4_frag_ecn_table[16] = { - /* at least one fragment had CE, and others ECT_0 or ECT_1 */ - [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE, - [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE, - [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE, - - /* invalid combinations : drop frame */ - [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff, - [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff, - [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff, - [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff, - [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff, - [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff, - [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff, - }; - static struct inet_frags ip4_frags;
int ip_frag_nqueues(struct net *net) @@@ -248,7 -219,8 +219,7 @@@ static void ip_expire(unsigned long arg if (!head->dev) goto out_rcu_unlock;
- /* skb dst is stale, drop it, and perform route lookup again */ - skb_dst_drop(head); + /* skb has no dst, perform route lookup again */ iph = ip_hdr(head); err = ip_route_input_noref(head, iph->daddr, iph->saddr, iph->tos, head->dev); @@@ -522,16 -494,9 +493,16 @@@ found qp->q.max_size = skb->len + ihl;
if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && - qp->q.meat == qp->q.len) - return ip_frag_reasm(qp, prev, dev); + qp->q.meat == qp->q.len) { + unsigned long orefdst = skb->_skb_refdst;
+ skb->_skb_refdst = 0UL; + err = ip_frag_reasm(qp, prev, dev); + skb->_skb_refdst = orefdst; + return err; + } + + skb_dst_drop(skb); inet_frag_lru_move(&qp->q); return -EINPROGRESS;
@@@ -557,7 -522,7 +528,7 @@@ static int ip_frag_reasm(struct ipq *qp
ipq_kill(qp);
- ecn = ip4_frag_ecn_table[qp->ecn]; + ecn = ip_frag_ecn_table[qp->ecn]; if (unlikely(ecn == 0xff)) { err = -EINVAL; goto out_fail; diff --combined net/ipv4/syncookies.c index 397e0f6,7f4a5cb..b05c96e --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@@ -267,7 -267,6 +267,6 @@@ struct sock *cookie_v4_check(struct soc struct ip_options *opt) { struct tcp_options_received tcp_opt; - const u8 *hash_location; struct inet_request_sock *ireq; struct tcp_request_sock *treq; struct tcp_sock *tp = tcp_sk(sk); @@@ -294,7 -293,7 +293,7 @@@
/* check for timestamp cookie support */ memset(&tcp_opt, 0, sizeof(tcp_opt)); - tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL); + tcp_parse_options(skb, &tcp_opt, 0, NULL);
if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok)) goto out; @@@ -349,8 -348,8 +348,8 @@@ * hasn't changed since we received the original syn, but I see * no easy way to do this. */ - flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), - RT_SCOPE_UNIVERSE, IPPROTO_TCP, + flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, + RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, inet_sk_flowi_flags(sk), (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, ireq->loc_addr, th->source, th->dest); diff --combined net/ipv4/tcp_input.c index 13b9c08,6d9ca35..aafd052 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@@ -93,12 -93,11 +93,11 @@@ int sysctl_tcp_stdurg __read_mostly int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; int sysctl_tcp_frto __read_mostly = 2; - int sysctl_tcp_frto_response __read_mostly;
int sysctl_tcp_thin_dupack __read_mostly;
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; - int sysctl_tcp_early_retrans __read_mostly = 2; + int sysctl_tcp_early_retrans __read_mostly = 3;
#define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ @@@ -108,18 -107,15 +107,16 @@@ #define FLAG_DATA_SACKED 0x20 /* New SACK. */ #define FLAG_ECE 0x40 /* ECE in this ACK */ #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ - #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ + #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ - #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ +#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) - #define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) @@@ -1160,10 -1156,8 +1157,8 @@@ static u8 tcp_sacktag_one(struct sock * tcp_highest_sack_seq(tp))) state->reord = min(fack_count, state->reord); - - /* SACK enhanced F-RTO (RFC4138; Appendix B) */ - if (!after(end_seq, tp->frto_highmark)) - state->flag |= FLAG_ONLY_ORIG_SACKED; + if (!after(end_seq, tp->high_seq)) + state->flag |= FLAG_ORIG_SACK_ACKED; }
if (sacked & TCPCB_LOST) { @@@ -1556,7 -1550,6 +1551,6 @@@ static in tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, u32 prior_snd_una) { - const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); const unsigned char *ptr = (skb_transport_header(ack_skb) + TCP_SKB_CB(ack_skb)->sacked); @@@ -1729,12 -1722,6 +1723,6 @@@ walk start_seq, end_seq, dup_sack);
advance_sp: - /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct - * due to in-order walk - */ - if (after(end_seq, tp->frto_highmark)) - state.flag &= ~FLAG_ONLY_ORIG_SACKED; - i++; }
@@@ -1751,8 -1738,7 +1739,7 @@@ tcp_verify_left_out(tp);
if ((state.reord < tp->fackets_out) && - ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) && - (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) + ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
out: @@@ -1826,197 -1812,6 +1813,6 @@@ static inline void tcp_reset_reno_sack( tp->sacked_out = 0; }
- static int tcp_is_sackfrto(const struct tcp_sock *tp) - { - return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp); - } - - /* F-RTO can only be used if TCP has never retransmitted anything other than - * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) - */ - bool tcp_use_frto(struct sock *sk) - { - const struct tcp_sock *tp = tcp_sk(sk); - const struct inet_connection_sock *icsk = inet_csk(sk); - struct sk_buff *skb; - - if (!sysctl_tcp_frto) - return false; - - /* MTU probe and F-RTO won't really play nicely along currently */ - if (icsk->icsk_mtup.probe_size) - return false; - - if (tcp_is_sackfrto(tp)) - return true; - - /* Avoid expensive walking of rexmit queue if possible */ - if (tp->retrans_out > 1) - return false; - - skb = tcp_write_queue_head(sk); - if (tcp_skb_is_last(sk, skb)) - return true; - skb = tcp_write_queue_next(sk, skb); /* Skips head */ - tcp_for_write_queue_from(skb, sk) { - if (skb == tcp_send_head(sk)) - break; - if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) - return false; - /* Short-circuit when first non-SACKed skb has been checked */ - if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) - break; - } - return true; - } - - /* RTO occurred, but do not yet enter Loss state. Instead, defer RTO - * recovery a bit and use heuristics in tcp_process_frto() to detect if - * the RTO was spurious. Only clear SACKED_RETRANS of the head here to - * keep retrans_out counting accurate (with SACK F-RTO, other than head - * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS - * bits are handled if the Loss state is really to be entered (in - * tcp_enter_frto_loss). - * - * Do like tcp_enter_loss() would; when RTO expires the second time it - * does: - * "Reduce ssthresh if it has not yet been made inside this window." - */ - void tcp_enter_frto(struct sock *sk) - { - const struct inet_connection_sock *icsk = inet_csk(sk); - struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb; - - if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) || - tp->snd_una == tp->high_seq || - ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) && - !icsk->icsk_retransmits)) { - tp->prior_ssthresh = tcp_current_ssthresh(sk); - /* Our state is too optimistic in ssthresh() call because cwnd - * is not reduced until tcp_enter_frto_loss() when previous F-RTO - * recovery has not yet completed. Pattern would be this: RTO, - * Cumulative ACK, RTO (2xRTO for the same segment does not end - * up here twice). - * RFC4138 should be more specific on what to do, even though - * RTO is quite unlikely to occur after the first Cumulative ACK - * due to back-off and complexity of triggering events ... - */ - if (tp->frto_counter) { - u32 stored_cwnd; - stored_cwnd = tp->snd_cwnd; - tp->snd_cwnd = 2; - tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); - tp->snd_cwnd = stored_cwnd; - } else { - tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); - } - /* ... in theory, cong.control module could do "any tricks" in - * ssthresh(), which means that ca_state, lost bits and lost_out - * counter would have to be faked before the call occurs. We - * consider that too expensive, unlikely and hacky, so modules - * using these in ssthresh() must deal these incompatibility - * issues if they receives CA_EVENT_FRTO and frto_counter != 0 - */ - tcp_ca_event(sk, CA_EVENT_FRTO); - } - - tp->undo_marker = tp->snd_una; - tp->undo_retrans = 0; - - skb = tcp_write_queue_head(sk); - if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) - tp->undo_marker = 0; - if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { - TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; - tp->retrans_out -= tcp_skb_pcount(skb); - } - tcp_verify_left_out(tp); - - /* Too bad if TCP was application limited */ - tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); - - /* Earlier loss recovery underway (see RFC4138; Appendix B). - * The last condition is necessary at least in tp->frto_counter case. - */ - if (tcp_is_sackfrto(tp) && (tp->frto_counter || - ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) && - after(tp->high_seq, tp->snd_una)) { - tp->frto_highmark = tp->high_seq; - } else { - tp->frto_highmark = tp->snd_nxt; - } - tcp_set_ca_state(sk, TCP_CA_Disorder); - tp->high_seq = tp->snd_nxt; - tp->frto_counter = 1; - } - - /* Enter Loss state after F-RTO was applied. Dupack arrived after RTO, - * which indicates that we should follow the traditional RTO recovery, - * i.e. mark everything lost and do go-back-N retransmission. - */ - static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) - { - struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb; - - tp->lost_out = 0; - tp->retrans_out = 0; - if (tcp_is_reno(tp)) - tcp_reset_reno_sack(tp); - - tcp_for_write_queue(skb, sk) { - if (skb == tcp_send_head(sk)) - break; - - TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; - /* - * Count the retransmission made on RTO correctly (only when - * waiting for the first ACK and did not get it)... - */ - if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) { - /* For some reason this R-bit might get cleared? */ - if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) - tp->retrans_out += tcp_skb_pcount(skb); - /* ...enter this if branch just for the first segment */ - flag |= FLAG_DATA_ACKED; - } else { - if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) - tp->undo_marker = 0; - TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; - } - - /* Marking forward transmissions that were made after RTO lost - * can cause unnecessary retransmissions in some scenarios, - * SACK blocks will mitigate that in some but not in all cases. - * We used to not mark them but it was causing break-ups with - * receivers that do only in-order receival. - * - * TODO: we could detect presence of such receiver and select - * different behavior per flow. - */ - if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { - TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; - tp->lost_out += tcp_skb_pcount(skb); - tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; - } - } - tcp_verify_left_out(tp); - - tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments; - tp->snd_cwnd_cnt = 0; - tp->snd_cwnd_stamp = tcp_time_stamp; - tp->frto_counter = 0; - - tp->reordering = min_t(unsigned int, tp->reordering, - sysctl_tcp_reordering); - tcp_set_ca_state(sk, TCP_CA_Loss); - tp->high_seq = tp->snd_nxt; - TCP_ECN_queue_cwr(tp); - - tcp_clear_all_retrans_hints(tp); - } - static void tcp_clear_retrans_partial(struct tcp_sock *tp) { tp->retrans_out = 0; @@@ -2043,10 -1838,13 +1839,13 @@@ void tcp_enter_loss(struct sock *sk, in const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; + bool new_recovery = false;
/* Reduce ssthresh if it has not yet been made inside this window. */ - if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || + if (icsk->icsk_ca_state <= TCP_CA_Disorder || + !after(tp->high_seq, tp->snd_una) || (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { + new_recovery = true; tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_ca_event(sk, CA_EVENT_LOSS); @@@ -2088,8 -1886,14 +1887,14 @@@ tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; TCP_ECN_queue_cwr(tp); - /* Abort F-RTO algorithm if one is in progress */ - tp->frto_counter = 0; + + /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous + * loss recovery is underway except recurring timeout(s) on + * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing + */ + tp->frto = sysctl_tcp_frto && + (new_recovery || icsk->icsk_retransmits) && + !inet_csk(sk)->icsk_mtup.probe_size; }
/* If ACK arrived pointing to a remembered SACK, it means that our @@@ -2148,15 -1952,16 +1953,16 @@@ static bool tcp_pause_early_retransmit( * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples * available, or RTO is scheduled to fire first. */ - if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt) + if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 || + (flag & FLAG_ECE) || !tp->srtt) return false;
delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2)); if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) return false;
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX); - tp->early_retrans_delayed = 1; + inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, + TCP_RTO_MAX); return true; }
@@@ -2272,10 -2077,6 +2078,6 @@@ static bool tcp_time_to_recover(struct struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out;
- /* Do not perform any recovery during F-RTO algorithm */ - if (tp->frto_counter) - return false; - /* Trick#1: The loss is proven. */ if (tp->lost_out) return true; @@@ -2319,7 -2120,7 +2121,7 @@@ * interval if appropriate. */ if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && - (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) && + (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) && !tcp_may_send_now(sk)) return !tcp_pause_early_retransmit(sk, flag);
@@@ -2636,12 -2437,12 +2438,12 @@@ static int tcp_try_undo_partial(struct return failed; }
- /* Undo during loss recovery after partial ACK. */ - static bool tcp_try_undo_loss(struct sock *sk) + /* Undo during loss recovery after partial ACK or using F-RTO. */ + static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) { struct tcp_sock *tp = tcp_sk(sk);
- if (tcp_may_undo(tp)) { + if (frto_undo || tcp_may_undo(tp)) { struct sk_buff *skb; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) @@@ -2655,9 -2456,12 +2457,12 @@@ tp->lost_out = 0; tcp_undo_cwr(sk, true); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); + if (frto_undo) + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPSPURIOUSRTOS); inet_csk(sk)->icsk_retransmits = 0; tp->undo_marker = 0; - if (tcp_is_sack(tp)) + if (frto_undo || tcp_is_sack(tp)) tcp_set_ca_state(sk, TCP_CA_Open); return true; } @@@ -2679,6 -2483,7 +2484,7 @@@ static void tcp_init_cwnd_reduction(str struct tcp_sock *tp = tcp_sk(sk);
tp->high_seq = tp->snd_nxt; + tp->tlp_high_seq = 0; tp->snd_cwnd_cnt = 0; tp->prior_cwnd = tp->snd_cwnd; tp->prr_delivered = 0; @@@ -2756,7 -2561,7 +2562,7 @@@ static void tcp_try_to_open(struct soc
tcp_verify_left_out(tp);
- if (!tp->frto_counter && !tcp_any_retrans_done(sk)) + if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0;
if (flag & FLAG_ECE) @@@ -2873,6 -2678,58 +2679,58 @@@ static void tcp_enter_recovery(struct s tcp_set_ca_state(sk, TCP_CA_Recovery); }
+ /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are + * recovered or spurious. Otherwise retransmits more on partial ACKs. + */ + static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) + { + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); + bool recovered = !before(tp->snd_una, tp->high_seq); + + if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ + if (flag & FLAG_ORIG_SACK_ACKED) { + /* Step 3.b. A timeout is spurious if not all data are + * lost, i.e., never-retransmitted data are (s)acked. + */ + tcp_try_undo_loss(sk, true); + return; + } + if (after(tp->snd_nxt, tp->high_seq) && + (flag & FLAG_DATA_SACKED || is_dupack)) { + tp->frto = 0; /* Loss was real: 2nd part of step 3.a */ + } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { + tp->high_seq = tp->snd_nxt; + __tcp_push_pending_frames(sk, tcp_current_mss(sk), + TCP_NAGLE_OFF); + if (after(tp->snd_nxt, tp->high_seq)) + return; /* Step 2.b */ + tp->frto = 0; + } + } + + if (recovered) { + /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ + icsk->icsk_retransmits = 0; + tcp_try_undo_recovery(sk); + return; + } + if (flag & FLAG_DATA_ACKED) + icsk->icsk_retransmits = 0; + if (tcp_is_reno(tp)) { + /* A Reno DUPACK means new data in F-RTO step 2.b above are + * delivered. Lower inflight to clock out (re)tranmissions. + */ + if (after(tp->snd_nxt, tp->high_seq) && is_dupack) + tcp_add_reno_sack(sk); + else if (flag & FLAG_SND_UNA_ADVANCED) + tcp_reset_reno_sack(tp); + } + if (tcp_try_undo_loss(sk, false)) + return; + tcp_xmit_retransmit_queue(sk); + } + /* Process an event, which can update packets-in-flight not trivially. * Main goal of this function is to calculate new estimate for left_out, * taking into account both packets sitting in receiver's buffer and @@@ -2919,12 -2776,6 +2777,6 @@@ static void tcp_fastretrans_alert(struc tp->retrans_stamp = 0; } else if (!before(tp->snd_una, tp->high_seq)) { switch (icsk->icsk_ca_state) { - case TCP_CA_Loss: - icsk->icsk_retransmits = 0; - if (tcp_try_undo_recovery(sk)) - return; - break; - case TCP_CA_CWR: /* CWR is to be held something *above* high_seq * is ACKed for CWR bit to reach receiver. */ @@@ -2955,18 -2806,10 +2807,10 @@@ newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; break; case TCP_CA_Loss: - if (flag & FLAG_DATA_ACKED) - icsk->icsk_retransmits = 0; - if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED) - tcp_reset_reno_sack(tp); - if (!tcp_try_undo_loss(sk)) { - tcp_moderate_cwnd(tp); - tcp_xmit_retransmit_queue(sk); - return; - } + tcp_process_loss(sk, flag, is_dupack); if (icsk->icsk_ca_state != TCP_CA_Open) return; - /* Loss is undone; fall through to processing in Open state. */ + /* Fall through to processing in Open state. */ default: if (tcp_is_reno(tp)) { if (flag & FLAG_SND_UNA_ADVANCED) @@@ -3079,6 -2922,7 +2923,7 @@@ static void tcp_cong_avoid(struct sock */ void tcp_rearm_rto(struct sock *sk) { + const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk);
/* If the retrans timer is currently being used by Fast Open @@@ -3092,12 -2936,13 +2937,13 @@@ } else { u32 rto = inet_csk(sk)->icsk_rto; /* Offset the time elapsed after installing regular RTO */ - if (tp->early_retrans_delayed) { + if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { struct sk_buff *skb = tcp_write_queue_head(sk); const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); /* delta may not be positive if the socket is locked - * when the delayed ER timer fires and is rescheduled. + * when the retrans timer fires and is rescheduled. */ if (delta > 0) rto = delta; @@@ -3105,7 -2950,6 +2951,6 @@@ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, TCP_RTO_MAX); } - tp->early_retrans_delayed = 0; }
/* This function is called when the delayed ER timer fires. TCP enters @@@ -3193,8 -3037,6 +3038,6 @@@ static int tcp_clean_rtx_queue(struct s flag |= FLAG_RETRANS_DATA_ACKED; ca_seq_rtt = -1; seq_rtt = -1; - if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1)) - flag |= FLAG_NONHEAD_RETRANS_ACKED; } else { ca_seq_rtt = now - scb->when; last_ackt = skb->tstamp; @@@ -3203,6 -3045,8 +3046,8 @@@ } if (!(sacked & TCPCB_SACKED_ACKED)) reord = min(pkts_acked, reord); + if (!after(scb->end_seq, tp->high_seq)) + flag |= FLAG_ORIG_SACK_ACKED; }
if (sacked & TCPCB_SACKED_ACKED) @@@ -3403,150 -3247,6 +3248,6 @@@ static int tcp_ack_update_window(struc return flag; }
- /* A very conservative spurious RTO response algorithm: reduce cwnd and - * continue in congestion avoidance. - */ - static void tcp_conservative_spur_to_response(struct tcp_sock *tp) - { - tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); - tp->snd_cwnd_cnt = 0; - TCP_ECN_queue_cwr(tp); - tcp_moderate_cwnd(tp); - } - - /* A conservative spurious RTO response algorithm: reduce cwnd using - * PRR and continue in congestion avoidance. - */ - static void tcp_cwr_spur_to_response(struct sock *sk) - { - tcp_enter_cwr(sk, 0); - } - - static void tcp_undo_spur_to_response(struct sock *sk, int flag) - { - if (flag & FLAG_ECE) - tcp_cwr_spur_to_response(sk); - else - tcp_undo_cwr(sk, true); - } - - /* F-RTO spurious RTO detection algorithm (RFC4138) - * - * F-RTO affects during two new ACKs following RTO (well, almost, see inline - * comments). State (ACK number) is kept in frto_counter. When ACK advances - * window (but not to or beyond highest sequence sent before RTO): - * On First ACK, send two new segments out. - * On Second ACK, RTO was likely spurious. Do spurious response (response - * algorithm is not part of the F-RTO detection algorithm - * given in RFC4138 but can be selected separately). - * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss - * and TCP falls back to conventional RTO recovery. F-RTO allows overriding - * of Nagle, this is done using frto_counter states 2 and 3, when a new data - * segment of any size sent during F-RTO, state 2 is upgraded to 3. - * - * Rationale: if the RTO was spurious, new ACKs should arrive from the - * original window even after we transmit two new data segments. - * - * SACK version: - * on first step, wait until first cumulative ACK arrives, then move to - * the second step. In second step, the next ACK decides. - * - * F-RTO is implemented (mainly) in four functions: - * - tcp_use_frto() is used to determine if TCP is can use F-RTO - * - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is - * called when tcp_use_frto() showed green light - * - tcp_process_frto() handles incoming ACKs during F-RTO algorithm - * - tcp_enter_frto_loss() is called if there is not enough evidence - * to prove that the RTO is indeed spurious. It transfers the control - * from F-RTO to the conventional RTO recovery - */ - static bool tcp_process_frto(struct sock *sk, int flag) - { - struct tcp_sock *tp = tcp_sk(sk); - - tcp_verify_left_out(tp); - - /* Duplicate the behavior from Loss state (fastretrans_alert) */ - if (flag & FLAG_DATA_ACKED) - inet_csk(sk)->icsk_retransmits = 0; - - if ((flag & FLAG_NONHEAD_RETRANS_ACKED) || - ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) - tp->undo_marker = 0; - - if (!before(tp->snd_una, tp->frto_highmark)) { - tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); - return true; - } - - if (!tcp_is_sackfrto(tp)) { - /* RFC4138 shortcoming in step 2; should also have case c): - * ACK isn't duplicate nor advances window, e.g., opposite dir - * data, winupdate - */ - if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) - return true; - - if (!(flag & FLAG_DATA_ACKED)) { - tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), - flag); - return true; - } - } else { - if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { - if (!tcp_packets_in_flight(tp)) { - tcp_enter_frto_loss(sk, 2, flag); - return true; - } - - /* Prevent sending of new data. */ - tp->snd_cwnd = min(tp->snd_cwnd, - tcp_packets_in_flight(tp)); - return true; - } - - if ((tp->frto_counter >= 2) && - (!(flag & FLAG_FORWARD_PROGRESS) || - ((flag & FLAG_DATA_SACKED) && - !(flag & FLAG_ONLY_ORIG_SACKED)))) { - /* RFC4138 shortcoming (see comment above) */ - if (!(flag & FLAG_FORWARD_PROGRESS) && - (flag & FLAG_NOT_DUP)) - return true; - - tcp_enter_frto_loss(sk, 3, flag); - return true; - } - } - - if (tp->frto_counter == 1) { - /* tcp_may_send_now needs to see updated state */ - tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; - tp->frto_counter = 2; - - if (!tcp_may_send_now(sk)) - tcp_enter_frto_loss(sk, 2, flag); - - return true; - } else { - switch (sysctl_tcp_frto_response) { - case 2: - tcp_undo_spur_to_response(sk, flag); - break; - case 1: - tcp_conservative_spur_to_response(tp); - break; - default: - tcp_cwr_spur_to_response(sk); - break; - } - tp->frto_counter = 0; - tp->undo_marker = 0; - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); - } - return false; - } - /* RFC 5961 7 [ACK Throttling] */ static void tcp_send_challenge_ack(struct sock *sk) { @@@ -3565,27 -3265,38 +3266,59 @@@ } }
+static void tcp_store_ts_recent(struct tcp_sock *tp) +{ + tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; + tp->rx_opt.ts_recent_stamp = get_seconds(); +} + +static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) +{ + if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { + /* PAWS bug workaround wrt. ACK frames, the PAWS discard + * extra check below makes sure this can only happen + * for pure ACK frames. -DaveM + * + * Not only, also it occurs for expired timestamps. + */ + + if (tcp_paws_check(&tp->rx_opt, 0)) + tcp_store_ts_recent(tp); + } +} + + /* This routine deals with acks during a TLP episode. + * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. + */ + static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) + { + struct tcp_sock *tp = tcp_sk(sk); + bool is_tlp_dupack = (ack == tp->tlp_high_seq) && + !(flag & (FLAG_SND_UNA_ADVANCED | + FLAG_NOT_DUP | FLAG_DATA_SACKED)); + + /* Mark the end of TLP episode on receiving TLP dupack or when + * ack is after tlp_high_seq. + */ + if (is_tlp_dupack) { + tp->tlp_high_seq = 0; + return; + } + + if (after(ack, tp->tlp_high_seq)) { + tp->tlp_high_seq = 0; + /* Don't reduce cwnd if DSACK arrives for TLP retrans. */ + if (!(flag & FLAG_DSACKING_ACK)) { + tcp_init_cwnd_reduction(sk, true); + tcp_set_ca_state(sk, TCP_CA_CWR); + tcp_end_cwnd_reduction(sk); + tcp_set_ca_state(sk, TCP_CA_Open); + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPLOSSPROBERECOVERY); + } + } + } + /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) { @@@ -3600,7 -3311,6 +3333,6 @@@ int prior_packets; int prior_sacked = tp->sacked_out; int pkts_acked = 0; - bool frto_cwnd = false;
/* If the ack is older than previous acks * then we can probably ignore it. @@@ -3620,7 -3330,8 +3352,8 @@@ if (after(ack, tp->snd_nxt)) goto invalid_ack;
- if (tp->early_retrans_delayed) + if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk);
if (after(ack, prior_snd_una)) @@@ -3629,12 -3340,6 +3362,12 @@@ prior_fackets = tp->fackets_out; prior_in_flight = tcp_packets_in_flight(tp);
+ /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + if (flag & FLAG_UPDATE_TS_RECENT) + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { /* Window is constant, pure forward advance. * No more checks are required. @@@ -3679,30 -3384,29 +3412,29 @@@
pkts_acked = prior_packets - tp->packets_out;
- if (tp->frto_counter) - frto_cwnd = tcp_process_frto(sk, flag); - /* Guarantee sacktag reordering detection against wrap-arounds */ - if (before(tp->frto_highmark, tp->snd_una)) - tp->frto_highmark = 0; - if (tcp_ack_is_dubious(sk, flag)) { /* Advance CWND, if state allows this. */ - if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && - tcp_may_raise_cwnd(sk, flag)) + if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, prior_in_flight); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, is_dupack, flag); } else { - if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) + if (flag & FLAG_DATA_ACKED) tcp_cong_avoid(sk, ack, prior_in_flight); }
+ if (tp->tlp_high_seq) + tcp_process_tlp_ack(sk, ack, flag); + if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) { struct dst_entry *dst = __sk_dst_get(sk); if (dst) dst_confirm(dst); } + + if (icsk->icsk_pending == ICSK_TIME_RETRANS) + tcp_schedule_loss_probe(sk); return 1;
no_queue: @@@ -3716,6 -3420,9 +3448,9 @@@ */ if (tcp_send_head(sk)) tcp_ack_probe(sk); + + if (tp->tlp_high_seq) + tcp_process_tlp_ack(sk, ack, flag); return 1;
invalid_ack: @@@ -3740,8 -3447,8 +3475,8 @@@ old_ack * But, this can also be called on packets in the established flow when * the fast version below fails. */ - void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, - const u8 **hvpp, int estab, + void tcp_parse_options(const struct sk_buff *skb, + struct tcp_options_received *opt_rx, int estab, struct tcp_fastopen_cookie *foc) { const unsigned char *ptr; @@@ -3825,31 -3532,6 +3560,6 @@@ */ break; #endif - case TCPOPT_COOKIE: - /* This option is variable length. - */ - switch (opsize) { - case TCPOLEN_COOKIE_BASE: - /* not yet implemented */ - break; - case TCPOLEN_COOKIE_PAIR: - /* not yet implemented */ - break; - case TCPOLEN_COOKIE_MIN+0: - case TCPOLEN_COOKIE_MIN+2: - case TCPOLEN_COOKIE_MIN+4: - case TCPOLEN_COOKIE_MIN+6: - case TCPOLEN_COOKIE_MAX: - /* 16-bit multiple */ - opt_rx->cookie_plus = opsize; - *hvpp = ptr; - break; - default: - /* ignore option */ - break; - } - break; - case TCPOPT_EXP: /* Fast Open option shares code 254 using a * 16 bits magic number. It's valid only in @@@ -3895,8 -3577,7 +3605,7 @@@ static bool tcp_parse_aligned_timestamp * If it is wrong it falls back on tcp_parse_options(). */ static bool tcp_fast_parse_options(const struct sk_buff *skb, - const struct tcphdr *th, - struct tcp_sock *tp, const u8 **hvpp) + const struct tcphdr *th, struct tcp_sock *tp) { /* In the spirit of fast parsing, compare doff directly to constant * values. Because equality is used, short doff can be ignored here. @@@ -3910,7 -3591,7 +3619,7 @@@ return true; }
- tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); + tcp_parse_options(skb, &tp->rx_opt, 1, NULL); if (tp->rx_opt.saw_tstamp) tp->rx_opt.rcv_tsecr -= tp->tsoffset;
@@@ -3955,6 -3636,27 +3664,6 @@@ const u8 *tcp_parse_md5sig_option(cons EXPORT_SYMBOL(tcp_parse_md5sig_option); #endif
-static inline void tcp_store_ts_recent(struct tcp_sock *tp) -{ - tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; - tp->rx_opt.ts_recent_stamp = get_seconds(); -} - -static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) -{ - if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { - /* PAWS bug workaround wrt. ACK frames, the PAWS discard - * extra check below makes sure this can only happen - * for pure ACK frames. -DaveM - * - * Not only, also it occurs for expired timestamps. - */ - - if (tcp_paws_check(&tp->rx_opt, 0)) - tcp_store_ts_recent(tp); - } -} - /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM * * It is not fatal. If this ACK does _not_ change critical state (seqs, window) @@@ -5270,12 -4972,10 +4979,10 @@@ out static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, int syn_inerr) { struct tcp_sock *tp = tcp_sk(sk);
/* RFC1323: H1. Apply PAWS check first. */ - if (tcp_fast_parse_options(skb, th, tp, &hash_location) && - tp->rx_opt.saw_tstamp && + if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && tcp_paws_discard(sk, skb)) { if (!th->rst) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); @@@ -5550,9 -5250,14 +5257,9 @@@ slow_path return 0;
step5: - if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) + if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) goto discard;
- /* ts_recent update must be made after we are sure that the packet - * is in window. - */ - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - tcp_rcv_rtt_measure_ts(sk, skb);
/* Process urgent data. */ @@@ -5624,12 -5329,11 +5331,11 @@@ static bool tcp_rcv_fastopen_synack(str
if (mss == tp->rx_opt.user_mss) { struct tcp_options_received opt; - const u8 *hash_location;
/* Get original SYNACK MSS value if user MSS sets mss_clamp */ tcp_clear_options(&opt); opt.user_mss = opt.mss_clamp = 0; - tcp_parse_options(synack, &opt, &hash_location, 0, NULL); + tcp_parse_options(synack, &opt, 0, NULL); mss = opt.mss_clamp; }
@@@ -5660,14 -5364,12 +5366,12 @@@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { - const u8 *hash_location; struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_cookie foc = { .len = -1 }; int saved_clamp = tp->rx_opt.mss_clamp;
- tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc); + tcp_parse_options(skb, &tp->rx_opt, 0, &foc); if (tp->rx_opt.saw_tstamp) tp->rx_opt.rcv_tsecr -= tp->tsoffset;
@@@ -5764,30 -5466,6 +5468,6 @@@ * is initialized. */ tp->copied_seq = tp->rcv_nxt;
- if (cvp != NULL && - cvp->cookie_pair_size > 0 && - tp->rx_opt.cookie_plus > 0) { - int cookie_size = tp->rx_opt.cookie_plus - - TCPOLEN_COOKIE_BASE; - int cookie_pair_size = cookie_size - + cvp->cookie_desired; - - /* A cookie extension option was sent and returned. - * Note that each incoming SYNACK replaces the - * Responder cookie. The initial exchange is most - * fragile, as protection against spoofing relies - * entirely upon the sequence and timestamp (above). - * This replacement strategy allows the correct pair to - * pass through, while any others will be filtered via - * Responder verification later. - */ - if (sizeof(cvp->cookie_pair) >= cookie_pair_size) { - memcpy(&cvp->cookie_pair[cvp->cookie_desired], - hash_location, cookie_size); - cvp->cookie_pair_size = cookie_pair_size; - } - } - smp_mb();
tcp_finish_connect(sk, skb); @@@ -5988,8 -5666,7 +5668,8 @@@ int tcp_rcv_state_process(struct sock *
/* step 5: check the ACK field */ if (true) { - int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; + int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | + FLAG_UPDATE_TS_RECENT) > 0;
switch (sk->sk_state) { case TCP_SYN_RECV: @@@ -6140,6 -5817,11 +5820,6 @@@ } }
- /* ts_recent update must be made after we are sure that the packet - * is in window. - */ - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - /* step 6: check the URG bit */ tcp_urg(sk, skb, th);
diff --combined net/ipv4/tcp_ipv4.c index fc55a1c,2278669..8667aaa --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@@ -838,7 -838,6 +838,6 @@@ static void tcp_v4_reqsk_send_ack(struc */ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, struct request_sock *req, - struct request_values *rvp, u16 queue_mapping, bool nocache) { @@@ -851,7 -850,7 +850,7 @@@ if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) return -1;
- skb = tcp_make_synack(sk, dst, req, rvp, NULL); + skb = tcp_make_synack(sk, dst, req, NULL);
if (skb) { __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); @@@ -868,10 -867,9 +867,9 @@@ return err; }
- static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, - struct request_values *rvp) + static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req) { - int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false); + int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
if (!res) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); @@@ -1371,8 -1369,7 +1369,7 @@@ static bool tcp_fastopen_check(struct s static int tcp_v4_conn_req_fastopen(struct sock *sk, struct sk_buff *skb, struct sk_buff *skb_synack, - struct request_sock *req, - struct request_values *rvp) + struct request_sock *req) { struct tcp_sock *tp = tcp_sk(sk); struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; @@@ -1467,9 -1464,7 +1464,7 @@@
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { - struct tcp_extend_values tmp_ext; struct tcp_options_received tmp_opt; - const u8 *hash_location; struct request_sock *req; struct inet_request_sock *ireq; struct tcp_sock *tp = tcp_sk(sk); @@@ -1519,42 -1514,7 +1514,7 @@@ tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = TCP_MSS_DEFAULT; tmp_opt.user_mss = tp->rx_opt.user_mss; - tcp_parse_options(skb, &tmp_opt, &hash_location, 0, - want_cookie ? NULL : &foc); - - if (tmp_opt.cookie_plus > 0 && - tmp_opt.saw_tstamp && - !tp->rx_opt.cookie_out_never && - (sysctl_tcp_cookie_size > 0 || - (tp->cookie_values != NULL && - tp->cookie_values->cookie_desired > 0))) { - u8 *c; - u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS]; - int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE; - - if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0) - goto drop_and_release; - - /* Secret recipe starts with IP addresses */ - *mess++ ^= (__force u32)daddr; - *mess++ ^= (__force u32)saddr; - - /* plus variable length Initiator Cookie */ - c = (u8 *)mess; - while (l-- > 0) - *c++ ^= *hash_location++; - - want_cookie = false; /* not our kind of cookie */ - tmp_ext.cookie_out_never = 0; /* false */ - tmp_ext.cookie_plus = tmp_opt.cookie_plus; - } else if (!tp->rx_opt.cookie_in_always) { - /* redundant indications, but ensure initialization. */ - tmp_ext.cookie_out_never = 1; /* true */ - tmp_ext.cookie_plus = 0; - } else { - goto drop_and_release; - } - tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always; + tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt); @@@ -1636,7 -1596,6 +1596,6 @@@ * of tcp_v4_send_synack()->tcp_select_initial_window(). */ skb_synack = tcp_make_synack(sk, dst, req, - (struct request_values *)&tmp_ext, fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
if (skb_synack) { @@@ -1660,8 -1619,7 +1619,7 @@@ if (fastopen_cookie_present(&foc) && foc.len != 0) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); - } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req, - (struct request_values *)&tmp_ext)) + } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req)) goto drop_and_free;
return 0; @@@ -1950,6 -1908,50 +1908,50 @@@ void tcp_v4_early_demux(struct sk_buff } }
+ /* Packet is added to VJ-style prequeue for processing in process + * context, if a reader task is waiting. Apparently, this exciting + * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) + * failed somewhere. Latency? Burstiness? Well, at least now we will + * see, why it failed. 8)8) --ANK + * + */ + bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) + { + struct tcp_sock *tp = tcp_sk(sk); + + if (sysctl_tcp_low_latency || !tp->ucopy.task) + return false; + + if (skb->len <= tcp_hdrlen(skb) && + skb_queue_len(&tp->ucopy.prequeue) == 0) + return false; + + __skb_queue_tail(&tp->ucopy.prequeue, skb); + tp->ucopy.memory += skb->truesize; + if (tp->ucopy.memory > sk->sk_rcvbuf) { + struct sk_buff *skb1; + + BUG_ON(sock_owned_by_user(sk)); + + while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { + sk_backlog_rcv(sk, skb1); + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPPREQUEUEDROPPED); + } + + tp->ucopy.memory = 0; + } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { + wake_up_interruptible_sync_poll(sk_sleep(sk), + POLLIN | POLLRDNORM | POLLRDBAND); + if (!inet_csk_ack_scheduled(sk)) + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, + (3 * tcp_rto_min(sk)) / 4, + TCP_RTO_MAX); + } + return true; + } + EXPORT_SYMBOL(tcp_prequeue); + /* * From tcp_input.c */ @@@ -2197,12 -2199,6 +2199,6 @@@ void tcp_v4_destroy_sock(struct sock *s if (inet_csk(sk)->icsk_bind_hash) inet_put_port(sk);
- /* TCP Cookie Transactions */ - if (tp->cookie_values != NULL) { - kref_put(&tp->cookie_values->kref, - tcp_cookie_values_release); - tp->cookie_values = NULL; - } BUG_ON(tp->fastopen_rsk != NULL);
/* If socket is aborted during connect operation */ @@@ -2580,7 -2576,7 +2576,7 @@@ static void tcp_seq_stop(struct seq_fil
int tcp_seq_open(struct inode *inode, struct file *file) { - struct tcp_seq_afinfo *afinfo = PDE(inode)->data; + struct tcp_seq_afinfo *afinfo = PDE_DATA(inode); struct tcp_iter_state *s; int err;
@@@ -2659,7 -2655,9 +2655,9 @@@ static void get_tcp4_sock(struct sock * __u16 srcp = ntohs(inet->inet_sport); int rx_queue;
- if (icsk->icsk_pending == ICSK_TIME_RETRANS) { + if (icsk->icsk_pending == ICSK_TIME_RETRANS || + icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { diff --combined net/ipv4/tcp_output.c index 509912a,5f28131..b735c23 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@@ -65,27 -65,22 +65,22 @@@ int sysctl_tcp_base_mss __read_mostly /* By default, RFC2861 behavior. */ int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
- int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */ - EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size); - static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, int push_one, gfp_t gfp);
/* Account for new data that has been sent to the network. */ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) { + struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); unsigned int prior_packets = tp->packets_out;
tcp_advance_send_head(sk, skb); tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
- /* Don't override Nagle indefinitely with F-RTO */ - if (tp->frto_counter == 2) - tp->frto_counter = 3; - tp->packets_out += tcp_skb_pcount(skb); - if (!prior_packets || tp->early_retrans_delayed) + if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk); }
@@@ -384,7 -379,6 +379,6 @@@ static inline bool tcp_urg_mode(const s #define OPTION_TS (1 << 1) #define OPTION_MD5 (1 << 2) #define OPTION_WSCALE (1 << 3) - #define OPTION_COOKIE_EXTENSION (1 << 4) #define OPTION_FAST_OPEN_COOKIE (1 << 8)
struct tcp_out_options { @@@ -398,36 -392,6 +392,6 @@@ struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ };
- /* The sysctl int routines are generic, so check consistency here. - */ - static u8 tcp_cookie_size_check(u8 desired) - { - int cookie_size; - - if (desired > 0) - /* previously specified */ - return desired; - - cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size); - if (cookie_size <= 0) - /* no default specified */ - return 0; - - if (cookie_size <= TCP_COOKIE_MIN) - /* value too small, specify minimum */ - return TCP_COOKIE_MIN; - - if (cookie_size >= TCP_COOKIE_MAX) - /* value too large, specify maximum */ - return TCP_COOKIE_MAX; - - if (cookie_size & 1) - /* 8-bit multiple, illegal, fix it */ - cookie_size++; - - return (u8)cookie_size; - } - /* Write previously computed TCP options to the packet. * * Beware: Something in the Internet is very sensitive to the ordering of @@@ -446,27 -410,9 +410,9 @@@ static void tcp_options_write(__be32 *p { u16 options = opts->options; /* mungable copy */
- /* Having both authentication and cookies for security is redundant, - * and there's certainly not enough room. Instead, the cookie-less - * extension variant is proposed. - * - * Consider the pessimal case with authentication. The options - * could look like: - * COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40 - */ if (unlikely(OPTION_MD5 & options)) { - if (unlikely(OPTION_COOKIE_EXTENSION & options)) { - *ptr++ = htonl((TCPOPT_COOKIE << 24) | - (TCPOLEN_COOKIE_BASE << 16) | - (TCPOPT_MD5SIG << 8) | - TCPOLEN_MD5SIG); - } else { - *ptr++ = htonl((TCPOPT_NOP << 24) | - (TCPOPT_NOP << 16) | - (TCPOPT_MD5SIG << 8) | - TCPOLEN_MD5SIG); - } - options &= ~OPTION_COOKIE_EXTENSION; + *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | + (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); /* overload cookie hash location */ opts->hash_location = (__u8 *)ptr; ptr += 4; @@@ -495,44 -441,6 +441,6 @@@ *ptr++ = htonl(opts->tsecr); }
- /* Specification requires after timestamp, so do it now. - * - * Consider the pessimal case without authentication. The options - * could look like: - * MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40 - */ - if (unlikely(OPTION_COOKIE_EXTENSION & options)) { - __u8 *cookie_copy = opts->hash_location; - u8 cookie_size = opts->hash_size; - - /* 8-bit multiple handled in tcp_cookie_size_check() above, - * and elsewhere. - */ - if (0x2 & cookie_size) { - __u8 *p = (__u8 *)ptr; - - /* 16-bit multiple */ - *p++ = TCPOPT_COOKIE; - *p++ = TCPOLEN_COOKIE_BASE + cookie_size; - *p++ = *cookie_copy++; - *p++ = *cookie_copy++; - ptr++; - cookie_size -= 2; - } else { - /* 32-bit multiple */ - *ptr++ = htonl(((TCPOPT_NOP << 24) | - (TCPOPT_NOP << 16) | - (TCPOPT_COOKIE << 8) | - TCPOLEN_COOKIE_BASE) + - cookie_size); - } - - if (cookie_size > 0) { - memcpy(ptr, cookie_copy, cookie_size); - ptr += (cookie_size / 4); - } - } - if (unlikely(OPTION_SACK_ADVERTISE & options)) { *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | @@@ -591,11 -499,7 +499,7 @@@ static unsigned int tcp_syn_options(str struct tcp_md5sig_key **md5) { struct tcp_sock *tp = tcp_sk(sk); - struct tcp_cookie_values *cvp = tp->cookie_values; unsigned int remaining = MAX_TCP_OPTION_SPACE; - u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? - tcp_cookie_size_check(cvp->cookie_desired) : - 0; struct tcp_fastopen_request *fastopen = tp->fastopen_req;
#ifdef CONFIG_TCP_MD5SIG @@@ -647,52 -551,7 +551,7 @@@ tp->syn_fastopen = 1; } } - /* Note that timestamps are required by the specification. - * - * Odd numbers of bytes are prohibited by the specification, ensuring - * that the cookie is 16-bit aligned, and the resulting cookie pair is - * 32-bit aligned. - */ - if (*md5 == NULL && - (OPTION_TS & opts->options) && - cookie_size > 0) { - int need = TCPOLEN_COOKIE_BASE + cookie_size; - - if (0x2 & need) { - /* 32-bit multiple */ - need += 2; /* NOPs */ - - if (need > remaining) { - /* try shrinking cookie to fit */ - cookie_size -= 2; - need -= 4; - } - } - while (need > remaining && TCP_COOKIE_MIN <= cookie_size) { - cookie_size -= 4; - need -= 4; - } - if (TCP_COOKIE_MIN <= cookie_size) { - opts->options |= OPTION_COOKIE_EXTENSION; - opts->hash_location = (__u8 *)&cvp->cookie_pair[0]; - opts->hash_size = cookie_size; - - /* Remember for future incarnations. */ - cvp->cookie_desired = cookie_size; - - if (cvp->cookie_desired != cvp->cookie_pair_size) { - /* Currently use random bytes as a nonce, - * assuming these are completely unpredictable - * by hostile users of the same system. - */ - get_random_bytes(&cvp->cookie_pair[0], - cookie_size); - cvp->cookie_pair_size = cookie_size; - }
- remaining -= need; - } - } return MAX_TCP_OPTION_SPACE - remaining; }
@@@ -702,14 -561,10 +561,10 @@@ static unsigned int tcp_synack_options( unsigned int mss, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5, - struct tcp_extend_values *xvp, struct tcp_fastopen_cookie *foc) { struct inet_request_sock *ireq = inet_rsk(req); unsigned int remaining = MAX_TCP_OPTION_SPACE; - u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? - xvp->cookie_plus : - 0;
#ifdef CONFIG_TCP_MD5SIG *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); @@@ -757,28 -612,7 +612,7 @@@ remaining -= need; } } - /* Similar rationale to tcp_syn_options() applies here, too. - * If the <SYN> options fit, the same options should fit now! - */ - if (*md5 == NULL && - ireq->tstamp_ok && - cookie_plus > TCPOLEN_COOKIE_BASE) { - int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */ - - if (0x2 & need) { - /* 32-bit multiple */ - need += 2; /* NOPs */ - } - if (need <= remaining) { - opts->options |= OPTION_COOKIE_EXTENSION; - opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE; - remaining -= need; - } else { - /* There's no error return, so flag it. */ - xvp->cookie_out_never = 1; /* true */ - opts->hash_size = 0; - } - } + return MAX_TCP_OPTION_SPACE - remaining; }
@@@ -953,7 -787,7 +787,7 @@@ void __init tcp_tasklet_init(void * We cant xmit new skbs from this context, as we might already * hold qdisc lock. */ - static void tcp_wfree(struct sk_buff *skb) + void tcp_wfree(struct sk_buff *skb) { struct sock *sk = skb->sk; struct tcp_sock *tp = tcp_sk(sk); @@@ -1012,6 -846,13 +846,13 @@@ static int tcp_transmit_skb(struct soc __net_timestamp(skb);
if (likely(clone_it)) { + const struct sk_buff *fclone = skb + 1; + + if (unlikely(skb->fclone == SKB_FCLONE_ORIG && + fclone->fclone == SKB_FCLONE_CLONE)) + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); + if (unlikely(skb_cloned(skb))) skb = pskb_copy(skb, gfp_mask); else @@@ -1632,11 -1473,8 +1473,8 @@@ static inline bool tcp_nagle_test(cons if (nonagle & TCP_NAGLE_PUSH) return true;
- /* Don't use the nagle rule for urgent data (or for the final FIN). - * Nagle can be ignored during F-RTO too (see RFC4138). - */ - if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || - (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) + /* Don't use the nagle rule for urgent data (or for the final FIN). */ + if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) return true;
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) @@@ -1961,6 -1799,9 +1799,9 @@@ static int tcp_mtu_probe(struct sock *s * snd_up-64k-mss .. snd_up cannot be large. However, taking into * account rare use of URG, this is not a big flaw. * + * Send at most one packet when push_one > 0. Temporarily ignore + * cwnd limit to force at most one packet out when push_one == 2. + * Returns true, if no segments are in flight and we have queued segments, * but cannot send anything now because of SWS or another problem. */ @@@ -1996,8 -1837,13 +1837,13 @@@ static bool tcp_write_xmit(struct sock goto repair; /* Skip network transmission */
cwnd_quota = tcp_cwnd_test(tp, skb); - if (!cwnd_quota) - break; + if (!cwnd_quota) { + if (push_one == 2) + /* Force out a loss probe pkt. */ + cwnd_quota = 1; + else + break; + }
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) break; @@@ -2051,10 -1897,129 +1897,129 @@@ repair if (likely(sent_pkts)) { if (tcp_in_cwnd_reduction(sk)) tp->prr_out += sent_pkts; + + /* Send one loss probe per tail loss episode. */ + if (push_one != 2) + tcp_schedule_loss_probe(sk); tcp_cwnd_validate(sk); return false; } - return !tp->packets_out && tcp_send_head(sk); + return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); + } + + bool tcp_schedule_loss_probe(struct sock *sk) + { + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); + u32 timeout, tlp_time_stamp, rto_time_stamp; + u32 rtt = tp->srtt >> 3; + + if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS)) + return false; + /* No consecutive loss probes. */ + if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { + tcp_rearm_rto(sk); + return false; + } + /* Don't do any loss probe on a Fast Open connection before 3WHS + * finishes. + */ + if (sk->sk_state == TCP_SYN_RECV) + return false; + + /* TLP is only scheduled when next timer event is RTO. */ + if (icsk->icsk_pending != ICSK_TIME_RETRANS) + return false; + + /* Schedule a loss probe in 2*RTT for SACK capable connections + * in Open state, that are either limited by cwnd or application. + */ + if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out || + !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) + return false; + + if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) && + tcp_send_head(sk)) + return false; + + /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account + * for delayed ack when there's one outstanding packet. + */ + timeout = rtt << 1; + if (tp->packets_out == 1) + timeout = max_t(u32, timeout, + (rtt + (rtt >> 1) + TCP_DELACK_MAX)); + timeout = max_t(u32, timeout, msecs_to_jiffies(10)); + + /* If RTO is shorter, just schedule TLP in its place. */ + tlp_time_stamp = tcp_time_stamp + timeout; + rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; + if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { + s32 delta = rto_time_stamp - tcp_time_stamp; + if (delta > 0) + timeout = delta; + } + + inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, + TCP_RTO_MAX); + return true; + } + + /* When probe timeout (PTO) fires, send a new segment if one exists, else + * retransmit the last segment. + */ + void tcp_send_loss_probe(struct sock *sk) + { + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb; + int pcount; + int mss = tcp_current_mss(sk); + int err = -1; + + if (tcp_send_head(sk) != NULL) { + err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); + goto rearm_timer; + } + + /* At most one outstanding TLP retransmission. */ + if (tp->tlp_high_seq) + goto rearm_timer; + + /* Retransmit last segment. */ + skb = tcp_write_queue_tail(sk); + if (WARN_ON(!skb)) + goto rearm_timer; + + pcount = tcp_skb_pcount(skb); + if (WARN_ON(!pcount)) + goto rearm_timer; + + if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { + if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss))) + goto rearm_timer; + skb = tcp_write_queue_tail(sk); + } + + if (WARN_ON(!skb || !tcp_skb_pcount(skb))) + goto rearm_timer; + + /* Probe with zero data doesn't trigger fast recovery. */ + if (skb->len > 0) + err = __tcp_retransmit_skb(sk, skb); + + /* Record snd_nxt for loss detection. */ + if (likely(!err)) + tp->tlp_high_seq = tp->snd_nxt; + + rearm_timer: + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, + inet_csk(sk)->icsk_rto, + TCP_RTO_MAX); + + if (likely(!err)) + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPLOSSPROBES); + return; }
/* Push out any pending frames which were held back due to @@@ -2388,12 -2353,8 +2353,12 @@@ int __tcp_retransmit_skb(struct sock *s */ TCP_SKB_CB(skb)->when = tcp_time_stamp;
- /* make sure skb->data is aligned on arches that require it */ - if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { + /* make sure skb->data is aligned on arches that require it + * and check if ack-trimming & collapsing extended the headroom + * beyond what csum_start can cover. + */ + if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || + skb_headroom(skb) >= 0xFFFF)) { struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : @@@ -2679,32 -2640,24 +2644,24 @@@ int tcp_send_synack(struct sock *sk * sk: listener socket * dst: dst entry attached to the SYNACK * req: request_sock pointer - * rvp: request_values pointer * * Allocate one skb and build a SYNACK packet. * @dst is consumed : Caller should not use it again. */ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct request_sock *req, struct tcp_fastopen_cookie *foc) { struct tcp_out_options opts; - struct tcp_extend_values *xvp = tcp_xv(rvp); struct inet_request_sock *ireq = inet_rsk(req); struct tcp_sock *tp = tcp_sk(sk); - const struct tcp_cookie_values *cvp = tp->cookie_values; struct tcphdr *th; struct sk_buff *skb; struct tcp_md5sig_key *md5; int tcp_header_size; int mss; - int s_data_desired = 0;
- if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) - s_data_desired = cvp->s_data_desired; - skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired, - sk_gfp_atomic(sk, GFP_ATOMIC)); + skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); if (unlikely(!skb)) { dst_release(dst); return NULL; @@@ -2713,7 -2666,6 +2670,7 @@@ skb_reserve(skb, MAX_TCP_HEADER);
skb_dst_set(skb, dst); + security_skb_owned_by(skb, sk);
mss = dst_metric_advmss(dst); if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) @@@ -2747,9 -2699,8 +2704,8 @@@ else #endif TCP_SKB_CB(skb)->when = tcp_time_stamp; - tcp_header_size = tcp_synack_options(sk, req, mss, - skb, &opts, &md5, xvp, foc) - + sizeof(*th); + tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, + foc) + sizeof(*th);
skb_push(skb, tcp_header_size); skb_reset_transport_header(skb); @@@ -2767,40 -2718,6 +2723,6 @@@ tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, TCPHDR_SYN | TCPHDR_ACK);
- if (OPTION_COOKIE_EXTENSION & opts.options) { - if (s_data_desired) { - u8 *buf = skb_put(skb, s_data_desired); - - /* copy data directly from the listening socket. */ - memcpy(buf, cvp->s_data_payload, s_data_desired); - TCP_SKB_CB(skb)->end_seq += s_data_desired; - } - - if (opts.hash_size > 0) { - __u32 workspace[SHA_WORKSPACE_WORDS]; - u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS]; - u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1]; - - /* Secret recipe depends on the Timestamp, (future) - * Sequence and Acknowledgment Numbers, Initiator - * Cookie, and others handled by IP variant caller. - */ - *tail-- ^= opts.tsval; - *tail-- ^= tcp_rsk(req)->rcv_isn + 1; - *tail-- ^= TCP_SKB_CB(skb)->seq + 1; - - /* recommended */ - *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source); - *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ - - sha_transform((__u32 *)&xvp->cookie_bakery[0], - (char *)mess, - &workspace[0]); - opts.hash_location = - (__u8 *)&xvp->cookie_bakery[0]; - } - } - th->seq = htonl(TCP_SKB_CB(skb)->seq); /* XXX data is queued and acked as is. No buffer/window check */ th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); diff --combined net/ipv4/udp.c index d272643,2722db0..d035cea --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@@ -902,9 -902,9 +902,9 @@@ int udp_sendmsg(struct kiocb *iocb, str ipc.addr = inet->inet_saddr;
ipc.oif = sk->sk_bound_dev_if; - err = sock_tx_timestamp(sk, &ipc.tx_flags); - if (err) - return err; + + sock_tx_timestamp(sk, &ipc.tx_flags); + if (msg->msg_controllen) { err = ip_cmsg_send(sock_net(sk), msg, &ipc); if (err) @@@ -2093,7 -2093,7 +2093,7 @@@ static void udp_seq_stop(struct seq_fil
int udp_seq_open(struct inode *inode, struct file *file) { - struct udp_seq_afinfo *afinfo = PDE(inode)->data; + struct udp_seq_afinfo *afinfo = PDE_DATA(inode); struct udp_iter_state *s; int err;
@@@ -2279,31 -2279,88 +2279,88 @@@ void __init udp_init(void
int udp4_ufo_send_check(struct sk_buff *skb) { - const struct iphdr *iph; - struct udphdr *uh; - - if (!pskb_may_pull(skb, sizeof(*uh))) + if (!pskb_may_pull(skb, sizeof(struct udphdr))) return -EINVAL;
- iph = ip_hdr(skb); - uh = udp_hdr(skb); + if (likely(!skb->encapsulation)) { + const struct iphdr *iph; + struct udphdr *uh; + + iph = ip_hdr(skb); + uh = udp_hdr(skb);
- uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, - IPPROTO_UDP, 0); - skb->csum_start = skb_transport_header(skb) - skb->head; - skb->csum_offset = offsetof(struct udphdr, check); - skb->ip_summed = CHECKSUM_PARTIAL; + uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, + IPPROTO_UDP, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + skb->ip_summed = CHECKSUM_PARTIAL; + } return 0; }
+ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, + netdev_features_t features) + { + struct sk_buff *segs = ERR_PTR(-EINVAL); + int mac_len = skb->mac_len; + int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); + int outer_hlen; + netdev_features_t enc_features; + + if (unlikely(!pskb_may_pull(skb, tnl_hlen))) + goto out; + + skb->encapsulation = 0; + __skb_pull(skb, tnl_hlen); + skb_reset_mac_header(skb); + skb_set_network_header(skb, skb_inner_network_offset(skb)); + skb->mac_len = skb_inner_network_offset(skb); + + /* segment inner packet. */ + enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); + segs = skb_mac_gso_segment(skb, enc_features); + if (!segs || IS_ERR(segs)) + goto out; + + outer_hlen = skb_tnl_header_len(skb); + skb = segs; + do { + struct udphdr *uh; + int udp_offset = outer_hlen - tnl_hlen; + + skb->mac_len = mac_len; + + skb_push(skb, outer_hlen); + skb_reset_mac_header(skb); + skb_set_network_header(skb, mac_len); + skb_set_transport_header(skb, udp_offset); + uh = udp_hdr(skb); + uh->len = htons(skb->len - udp_offset); + + /* csum segment if tunnel sets skb with csum. */ + if (unlikely(uh->check)) { + struct iphdr *iph = ip_hdr(skb); + + uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - udp_offset, + IPPROTO_UDP, 0); + uh->check = csum_fold(skb_checksum(skb, udp_offset, + skb->len - udp_offset, 0)); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + } + skb->ip_summed = CHECKSUM_NONE; + } while ((skb = skb->next)); + out: + return segs; + } + struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; - int offset; - __wsum csum; - mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; @@@ -2313,6 -2370,7 +2370,7 @@@ int type = skb_shinfo(skb)->gso_type;
if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | + SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE) || !(type & (SKB_GSO_UDP)))) goto out; @@@ -2323,20 -2381,27 +2381,27 @@@ goto out; }
/* Fragment the skb. IP headers of the fragments are updated in * inet_gso_segment() */ - segs = skb_segment(skb, features); + if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) + segs = skb_udp_tunnel_segment(skb, features); + else { + int offset; + __wsum csum; + + /* Do software UFO. Complete and fill in the UDP checksum as + * HW cannot do checksum of UDP packets sent as multiple + * IP fragments. + */ + offset = skb_checksum_start_offset(skb); + csum = skb_checksum(skb, offset, skb->len - offset, 0); + offset += skb->csum_offset; + *(__sum16 *)(skb->data + offset) = csum_fold(csum); + skb->ip_summed = CHECKSUM_NONE; + + segs = skb_segment(skb, features); + } out: return segs; } - diff --combined net/ipv6/addrconf.c index dae802c,28b61e8..d1ab6ab --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@@ -70,6 -70,7 +70,7 @@@ #include <net/snmp.h>
#include <net/af_ieee802154.h> + #include <net/firewire.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/ndisc.h> @@@ -168,6 -169,8 +169,6 @@@ static void inet6_prefix_notify(int eve static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, struct net_device *dev);
-static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); - static struct ipv6_devconf ipv6_devconf __read_mostly = { .forwarding = 0, .hop_limit = IPV6_DEFAULT_HOPLIMIT, @@@ -419,6 -422,7 +420,7 @@@ static struct inet6_dev *ipv6_add_dev(s ipv6_regen_rndid((unsigned long) ndev); } #endif + ndev->token = in6addr_any;
if (netif_running(dev) && addrconf_qdisc_ok(dev)) ndev->if_flags |= IF_READY; @@@ -542,8 -546,7 +544,7 @@@ static const struct nla_policy devconf_ };
static int inet6_netconf_get_devconf(struct sk_buff *in_skb, - struct nlmsghdr *nlh, - void *arg) + struct nlmsghdr *nlh) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[NETCONFA_MAX+1]; @@@ -603,6 -606,77 +604,77 @@@ errout return err; }
+ static int inet6_netconf_dump_devconf(struct sk_buff *skb, + struct netlink_callback *cb) + { + struct net *net = sock_net(skb->sk); + int h, s_h; + int idx, s_idx; + struct net_device *dev; + struct inet6_dev *idev; + struct hlist_head *head; + + s_h = cb->args[0]; + s_idx = idx = cb->args[1]; + + for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { + idx = 0; + head = &net->dev_index_head[h]; + rcu_read_lock(); + cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ + net->dev_base_seq; + hlist_for_each_entry_rcu(dev, head, index_hlist) { + if (idx < s_idx) + goto cont; + idev = __in6_dev_get(dev); + if (!idev) + goto cont; + + if (inet6_netconf_fill_devconf(skb, dev->ifindex, + &idev->cnf, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, + NLM_F_MULTI, + -1) <= 0) { + rcu_read_unlock(); + goto done; + } + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); + cont: + idx++; + } + rcu_read_unlock(); + } + if (h == NETDEV_HASHENTRIES) { + if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL, + net->ipv6.devconf_all, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, NLM_F_MULTI, + -1) <= 0) + goto done; + else + h++; + } + if (h == NETDEV_HASHENTRIES + 1) { + if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT, + net->ipv6.devconf_dflt, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, NLM_F_MULTI, + -1) <= 0) + goto done; + else + h++; + } + done: + cb->args[0] = h; + cb->args[1] = idx; + + return skb->len; + } + #ifdef CONFIG_SYSCTL static void dev_forward_change(struct inet6_dev *idev) { @@@ -804,6 -878,7 +876,7 @@@ ipv6_add_addr(struct inet6_dev *idev, c ifa->prefix_len = pfxlen; ifa->flags = flags | IFA_F_TENTATIVE; ifa->cstamp = ifa->tstamp = jiffies; + ifa->tokenized = false;
ifa->rt = rt;
@@@ -835,7 -910,7 +908,7 @@@ out2 rcu_read_unlock_bh();
if (likely(err == 0)) - atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa); + inet6addr_notifier_call_chain(NETDEV_UP, ifa); else { kfree(ifa); ifa = ERR_PTR(err); @@@ -925,7 -1000,7 +998,7 @@@ static void ipv6_del_addr(struct inet6_
ipv6_ifa_notify(RTM_DELADDR, ifp);
- atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp); + inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
/* * Purge or update corresponding prefix @@@ -1666,6 -1741,20 +1739,20 @@@ static int addrconf_ifid_eui64(u8 *eui return 0; }
+ static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev) + { + union fwnet_hwaddr *ha; + + if (dev->addr_len != FWNET_ALEN) + return -1; + + ha = (union fwnet_hwaddr *)dev->dev_addr; + + memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id)); + eui[0] ^= 2; + return 0; + } + static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev) { /* XXX: inherit EUI-64 from other interface -- yoshfuji */ @@@ -1730,6 -1819,8 +1817,8 @@@ static int ipv6_generate_eui64(u8 *eui return addrconf_ifid_gre(eui, dev); case ARPHRD_IEEE802154: return addrconf_ifid_eui64(eui, dev); + case ARPHRD_IEEE1394: + return addrconf_ifid_ieee1394(eui, dev); } return -1; } @@@ -2044,11 -2135,19 +2133,19 @@@ void addrconf_prefix_rcv(struct net_dev struct inet6_ifaddr *ifp; struct in6_addr addr; int create = 0, update_lft = 0; + bool tokenized = false;
if (pinfo->prefix_len == 64) { memcpy(&addr, &pinfo->prefix, 8); - if (ipv6_generate_eui64(addr.s6_addr + 8, dev) && - ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) { + + if (!ipv6_addr_any(&in6_dev->token)) { + read_lock_bh(&in6_dev->lock); + memcpy(addr.s6_addr + 8, + in6_dev->token.s6_addr + 8, 8); + read_unlock_bh(&in6_dev->lock); + tokenized = true; + } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) && + ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) { in6_dev_put(in6_dev); return; } @@@ -2089,6 -2188,7 +2186,7 @@@ ok
update_lft = create = 1; ifp->cstamp = jiffies; + ifp->tokenized = tokenized; addrconf_dad_start(ifp); }
@@@ -2598,7 -2698,8 +2696,8 @@@ static void addrconf_dev_config(struct (dev->type != ARPHRD_FDDI) && (dev->type != ARPHRD_ARCNET) && (dev->type != ARPHRD_INFINIBAND) && - (dev->type != ARPHRD_IEEE802154)) { + (dev->type != ARPHRD_IEEE802154) && + (dev->type != ARPHRD_IEEE1394)) { /* Alas, we support only Ethernet autoconfiguration. */ return; } @@@ -2986,7 -3087,7 +3085,7 @@@ static int addrconf_ifdown(struct net_d
if (state != INET6_IFADDR_STATE_DEAD) { __ipv6_ifa_notify(RTM_DELADDR, ifa); - atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); + inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); } in6_ifa_put(ifa);
@@@ -3535,7 -3636,7 +3634,7 @@@ static const struct nla_policy ifa_ipv6 };
static int - inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ifaddrmsg *ifm; @@@ -3601,7 -3702,7 +3700,7 @@@ static int inet6_addr_modify(struct ine }
static int - inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ifaddrmsg *ifm; @@@ -3832,6 -3933,7 +3931,7 @@@ static int in6_dump_addrs(struct inet6_ NLM_F_MULTI); if (err <= 0) break; + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); } break; } @@@ -3889,6 -3991,7 +3989,7 @@@ static int inet6_dump_addr(struct sk_bu s_ip_idx = ip_idx = cb->args[2];
rcu_read_lock(); + cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq; for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { idx = 0; head = &net->dev_index_head[h]; @@@ -3940,8 -4043,7 +4041,7 @@@ static int inet6_dump_ifacaddr(struct s return inet6_dump_addr(skb, cb, type); }
- static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh, - void *arg) + static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh) { struct net *net = sock_net(in_skb->sk); struct ifaddrmsg *ifm; @@@ -4074,7 -4176,8 +4174,8 @@@ static inline size_t inet6_ifla6_size(v + nla_total_size(sizeof(struct ifla_cacheinfo)) + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */ + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */ - + nla_total_size(ICMP6_MIB_MAX * 8); /* IFLA_INET6_ICMP6STATS */ + + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */ + + nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */ }
static inline size_t inet6_if_nlmsg_size(void) @@@ -4161,6 -4264,13 +4262,13 @@@ static int inet6_fill_ifla6_attrs(struc goto nla_put_failure; snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
+ nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr)); + if (nla == NULL) + goto nla_put_failure; + read_lock_bh(&idev->lock); + memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla)); + read_unlock_bh(&idev->lock); + return 0;
nla_put_failure: @@@ -4188,6 -4298,80 +4296,80 @@@ static int inet6_fill_link_af(struct sk return 0; }
+ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) + { + struct inet6_ifaddr *ifp; + struct net_device *dev = idev->dev; + bool update_rs = false; + + if (token == NULL) + return -EINVAL; + if (ipv6_addr_any(token)) + return -EINVAL; + if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) + return -EINVAL; + if (!ipv6_accept_ra(idev)) + return -EINVAL; + if (idev->cnf.rtr_solicits <= 0) + return -EINVAL; + + write_lock_bh(&idev->lock); + + BUILD_BUG_ON(sizeof(token->s6_addr) != 16); + memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8); + + write_unlock_bh(&idev->lock); + + if (!idev->dead && (idev->if_flags & IF_READY)) { + struct in6_addr ll_addr; + + ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE | + IFA_F_OPTIMISTIC); + + /* If we're not ready, then normal ifup will take care + * of this. Otherwise, we need to request our rs here. + */ + ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters); + update_rs = true; + } + + write_lock_bh(&idev->lock); + + if (update_rs) + idev->if_flags |= IF_RS_SENT; + + /* Well, that's kinda nasty ... */ + list_for_each_entry(ifp, &idev->addr_list, if_list) { + spin_lock(&ifp->lock); + if (ifp->tokenized) { + ifp->valid_lft = 0; + ifp->prefered_lft = 0; + } + spin_unlock(&ifp->lock); + } + + write_unlock_bh(&idev->lock); + return 0; + } + + static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla) + { + int err = -EINVAL; + struct inet6_dev *idev = __in6_dev_get(dev); + struct nlattr *tb[IFLA_INET6_MAX + 1]; + + if (!idev) + return -EAFNOSUPPORT; + + if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL) < 0) + BUG(); + + if (tb[IFLA_INET6_TOKEN]) + err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN])); + + return err; + } + static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, u32 portid, u32 seq, int event, unsigned int flags) { @@@ -4366,6 -4550,8 +4548,8 @@@ errout
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) { + struct net *net = dev_net(ifp->idev->dev); + inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
switch (event) { @@@ -4391,6 -4577,7 +4575,7 @@@ dst_free(&ifp->rt->dst); break; } + atomic_inc(&net->ipv6.dev_addr_genid); }
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) @@@ -4867,10 -5054,27 +5052,11 @@@ static struct pernet_operations addrcon .exit = addrconf_exit_net, };
-/* - * Device notifier - */ - -int register_inet6addr_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&inet6addr_chain, nb); -} -EXPORT_SYMBOL(register_inet6addr_notifier); - -int unregister_inet6addr_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&inet6addr_chain, nb); -} -EXPORT_SYMBOL(unregister_inet6addr_notifier); - static struct rtnl_af_ops inet6_ops = { .family = AF_INET6, .fill_link_af = inet6_fill_link_af, .get_link_af_size = inet6_get_link_af_size, + .set_link_af = inet6_set_link_af, };
/* @@@ -4943,7 -5147,7 +5129,7 @@@ int __init addrconf_init(void __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, inet6_dump_ifacaddr, NULL); __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf, - NULL, NULL); + inet6_netconf_dump_devconf, NULL);
ipv6_addr_label_rtnl_register();
diff --combined net/ipv6/reassembly.c index 0ba10e5,e6e44ce..790d9f4 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@@ -58,6 -58,7 +58,7 @@@ #include <net/ndisc.h> #include <net/addrconf.h> #include <net/inet_frag.h> + #include <net/inet_ecn.h>
struct ip6frag_skb_cb { @@@ -67,6 -68,10 +68,10 @@@
#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
+ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) + { + return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK); + }
static struct inet_frags ip6_frags;
@@@ -119,6 -124,7 +124,7 @@@ void ip6_frag_init(struct inet_frag_que fq->user = arg->user; fq->saddr = *arg->src; fq->daddr = *arg->dst; + fq->ecn = arg->ecn; } EXPORT_SYMBOL(ip6_frag_init);
@@@ -173,7 -179,8 +179,8 @@@ static void ip6_frag_expire(unsigned lo }
static __inline__ struct frag_queue * - fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst) + fq_find(struct net *net, __be32 id, const struct in6_addr *src, + const struct in6_addr *dst, u8 ecn) { struct inet_frag_queue *q; struct ip6_create_arg arg; @@@ -183,6 -190,7 +190,7 @@@ arg.user = IP6_DEFRAG_LOCAL_DELIVER; arg.src = src; arg.dst = dst; + arg.ecn = ecn;
read_lock(&ip6_frags.lock); hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); @@@ -202,6 -210,7 +210,7 @@@ static int ip6_frag_queue(struct frag_q struct net_device *dev; int offset, end; struct net *net = dev_net(skb_dst(skb)->dev); + u8 ecn;
if (fq->q.last_in & INET_FRAG_COMPLETE) goto err; @@@ -219,6 -228,8 +228,8 @@@ return -1; }
+ ecn = ip6_frag_ecn(ipv6_hdr(skb)); + if (skb->ip_summed == CHECKSUM_COMPLETE) { const unsigned char *nh = skb_network_header(skb); skb->csum = csum_sub(skb->csum, @@@ -319,6 -330,7 +330,7 @@@ found } fq->q.stamp = skb->tstamp; fq->q.meat += skb->len; + fq->ecn |= ecn; add_frag_mem_limit(&fq->q, skb->truesize);
/* The first fragment. @@@ -330,17 -342,9 +342,17 @@@ }
if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && - fq->q.meat == fq->q.len) - return ip6_frag_reasm(fq, prev, dev); + fq->q.meat == fq->q.len) { + int res; + unsigned long orefdst = skb->_skb_refdst; + + skb->_skb_refdst = 0UL; + res = ip6_frag_reasm(fq, prev, dev); + skb->_skb_refdst = orefdst; + return res; + }
+ skb_dst_drop(skb); inet_frag_lru_move(&fq->q); return -1;
@@@ -370,9 -374,14 +382,14 @@@ static int ip6_frag_reasm(struct frag_q int payload_len; unsigned int nhoff; int sum_truesize; + u8 ecn;
inet_frag_kill(&fq->q, &ip6_frags);
+ ecn = ip_frag_ecn_table[fq->ecn]; + if (unlikely(ecn == 0xff)) + goto out_fail; + /* Make the one we just received the head. */ if (prev) { head = prev->next; @@@ -471,6 -480,7 +488,7 @@@ head->dev = dev; head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); + ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); IP6CB(head)->nhoff = nhoff;
/* Yes, and fold redundant checksum back. 8) */ @@@ -534,7 -544,8 +552,8 @@@ static int ipv6_frag_rcv(struct sk_buf IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS, evicted);
- fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr); + fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, + ip6_frag_ecn(hdr)); if (fq != NULL) { int ret;
diff --combined net/iucv/af_iucv.c index 206ce6d,e165e8d..ae69165 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@@ -49,6 -49,12 +49,6 @@@ static const u8 iprm_shutdown[8]
#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
-/* macros to set/get socket control buffer at correct offset */ -#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */ -#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag)) -#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ -#define CB_TRGCLS_LEN (TRGCLS_SIZE) - #define __iucv_sock_wait(sk, condition, timeo, ret) \ do { \ DEFINE_WAIT(__wait); \ @@@ -1135,7 -1141,7 +1135,7 @@@ static int iucv_sock_sendmsg(struct kio
/* increment and save iucv message tag for msg_completion cbk */ txmsg.tag = iucv->send_tag++; - memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); + IUCV_SKB_CB(skb)->tag = txmsg.tag;
if (iucv->transport == AF_IUCV_TRANS_HIPER) { atomic_inc(&iucv->msg_sent); @@@ -1218,7 -1224,7 +1218,7 @@@ static int iucv_fragment_skb(struct soc return -ENOMEM;
/* copy target class to control buffer of new skb */ - memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); + IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
/* copy data fragment */ memcpy(nskb->data, skb->data + copied, size); @@@ -1250,7 -1256,7 +1250,7 @@@ static void iucv_process_message(struc
/* store msg target class in the second 4 bytes of skb ctrl buffer */ /* Note: the first 4 bytes are reserved for msg tag */ - memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); + IUCV_SKB_CB(skb)->class = msg->class;
/* check for special IPRM messages (e.g. iucv_sock_shutdown) */ if ((msg->flags & IUCV_IPRMDATA) && len > 7) { @@@ -1286,7 -1292,6 +1286,7 @@@ } }
+ IUCV_SKB_CB(skb)->offset = 0; if (sock_queue_rcv_skb(sk, skb)) skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); } @@@ -1322,7 -1327,6 +1322,7 @@@ static int iucv_sock_recvmsg(struct kio unsigned int copied, rlen; struct sk_buff *skb, *rskb, *cskb; int err = 0; + u32 offset;
msg->msg_namelen = 0;
@@@ -1344,14 -1348,13 +1344,14 @@@ return err; }
- rlen = skb->len; /* real length of skb */ + offset = IUCV_SKB_CB(skb)->offset; + rlen = skb->len - offset; /* real length of skb */ copied = min_t(unsigned int, rlen, len); if (!rlen) sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
cskb = skb; - if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { + if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; @@@ -1369,8 -1372,7 +1369,8 @@@ * get the trgcls from the control buffer of the skb due to * fragmentation of original iucv message. */ err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, - CB_TRGCLS_LEN, CB_TRGCLS(skb)); + sizeof(IUCV_SKB_CB(skb)->class), + (void *)&IUCV_SKB_CB(skb)->class); if (err) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); @@@ -1382,8 -1384,9 +1382,8 @@@
/* SOCK_STREAM: re-queue skb if it contains unreceived data */ if (sk->sk_type == SOCK_STREAM) { - skb_pull(skb, copied); - if (skb->len) { - skb_queue_head(&sk->sk_receive_queue, skb); + if (copied < rlen) { + IUCV_SKB_CB(skb)->offset = offset + copied; goto done; } } @@@ -1402,7 -1405,6 +1402,7 @@@ spin_lock_bh(&iucv->message_q.lock); rskb = skb_dequeue(&iucv->backlog_skb_q); while (rskb) { + IUCV_SKB_CB(rskb)->offset = 0; if (sock_queue_rcv_skb(sk, rskb)) { skb_queue_head(&iucv->backlog_skb_q, rskb); @@@ -1461,7 -1463,8 +1461,8 @@@ unsigned int iucv_sock_poll(struct fil return iucv_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) - mask |= POLLERR; + mask |= POLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP; @@@ -1830,7 -1833,7 +1831,7 @@@ static void iucv_callback_txdone(struc spin_lock_irqsave(&list->lock, flags);
while (list_skb != (struct sk_buff *)list) { - if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { + if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { this = list_skb; break; } @@@ -2091,7 -2094,6 +2092,7 @@@ static int afiucv_hs_callback_rx(struc skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); skb_reset_transport_header(skb); skb_reset_network_header(skb); + IUCV_SKB_CB(skb)->offset = 0; spin_lock(&iucv->message_q.lock); if (skb_queue_empty(&iucv->backlog_skb_q)) { if (sock_queue_rcv_skb(sk, skb)) { @@@ -2196,7 -2198,8 +2197,7 @@@ static int afiucv_hs_rcv(struct sk_buf /* fall through and receive zero length data */ case 0: /* plain data frame */ - memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, - CB_TRGCLS_LEN); + IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; err = afiucv_hs_callback_rx(sk, skb); break; default: diff --combined net/mac80211/iface.c index 9ed49ad,69aaba7..e8a260f --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@@ -78,7 -78,7 +78,7 @@@ void ieee80211_recalc_txpower(struct ie ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); }
-u32 ieee80211_idle_off(struct ieee80211_local *local) +static u32 __ieee80211_idle_off(struct ieee80211_local *local) { if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) return 0; @@@ -87,29 -87,27 +87,29 @@@ return IEEE80211_CONF_CHANGE_IDLE; }
-static u32 ieee80211_idle_on(struct ieee80211_local *local) +static u32 __ieee80211_idle_on(struct ieee80211_local *local) { if (local->hw.conf.flags & IEEE80211_CONF_IDLE) return 0;
- drv_flush(local, false); + ieee80211_flush_queues(local, NULL);
local->hw.conf.flags |= IEEE80211_CONF_IDLE; return IEEE80211_CONF_CHANGE_IDLE; }
-void ieee80211_recalc_idle(struct ieee80211_local *local) +static u32 __ieee80211_recalc_idle(struct ieee80211_local *local, + bool force_active) { bool working = false, scanning, active; unsigned int led_trig_start = 0, led_trig_stop = 0; struct ieee80211_roc_work *roc;
lockdep_assert_held(&local->mtx);
- active = !list_empty(&local->chanctx_list) || local->monitors; + active = force_active || + !list_empty(&local->chanctx_list) || + local->monitors;
if (!local->ops->remain_on_channel) { list_for_each_entry(roc, &local->roc_list, list) { @@@ -134,18 -132,9 +134,18 @@@ ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
if (working || scanning || active) - change = ieee80211_idle_off(local); - else - change = ieee80211_idle_on(local); + return __ieee80211_idle_off(local); + return __ieee80211_idle_on(local); +} + +u32 ieee80211_idle_off(struct ieee80211_local *local) +{ + return __ieee80211_recalc_idle(local, true); +} + +void ieee80211_recalc_idle(struct ieee80211_local *local) +{ + u32 change = __ieee80211_recalc_idle(local, false); if (change) ieee80211_hw_config(local, change); } @@@ -499,8 -488,6 +499,6 @@@ int ieee80211_do_open(struct wireless_d res = drv_start(local); if (res) goto err_del_bss; - if (local->ops->napi_poll) - napi_enable(&local->napi); /* we're brought up, everything changes */ hw_reconf_flags = ~0; ieee80211_led_radio(local, true); @@@ -573,8 -560,6 +571,6 @@@ goto err_del_interface; }
- drv_add_interface_debugfs(local, sdata); - if (sdata->vif.type == NL80211_IFTYPE_AP) { local->fif_pspoll++; local->fif_probe_req++; @@@ -852,15 -837,15 +848,15 @@@ static void ieee80211_do_stop(struct ie rcu_barrier(); sta_info_flush_cleanup(sdata);
- skb_queue_purge(&sdata->skb_queue); - /* * Free all remaining keys, there shouldn't be any, - * except maybe group keys in AP more or WDS? + * except maybe in WDS mode? */ ieee80211_free_keys(sdata);
- drv_remove_interface_debugfs(local, sdata); + /* fall through */ + case NL80211_IFTYPE_AP: + skb_queue_purge(&sdata->skb_queue);
if (going_down) drv_remove_interface(local, sdata); @@@ -871,8 -856,6 +867,6 @@@ ieee80211_recalc_ps(local, -1);
if (local->open_count == 0) { - if (local->ops->napi_poll) - napi_disable(&local->napi); ieee80211_clear_tx_pending(local); ieee80211_stop_device(local);
@@@ -935,6 -918,17 +929,17 @@@ static void ieee80211_set_multicast_lis atomic_dec(&local->iff_promiscs); sdata->flags ^= IEEE80211_SDATA_PROMISC; } + + /* + * TODO: If somebody needs this on AP interfaces, + * it can be enabled easily but multicast + * addresses from VLANs need to be synced. + */ + if (sdata->vif.type != NL80211_IFTYPE_MONITOR && + sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + sdata->vif.type != NL80211_IFTYPE_AP) + drv_set_multicast_list(local, sdata, &dev->mc); + spin_lock_bh(&local->filter_lock); __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); spin_unlock_bh(&local->filter_lock); @@@ -1561,6 -1555,8 +1566,8 @@@ int ieee80211_if_add(struct ieee80211_l INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk); INIT_DELAYED_WORK(&sdata->dfs_cac_timer_work, ieee80211_dfs_cac_timer_work); + INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk, + ieee80211_delayed_tailroom_dec);
for (i = 0; i < IEEE80211_NUM_BANDS; i++) { struct ieee80211_supported_band *sband; diff --combined net/mac80211/mlme.c index 346ad4c,e06dbbf..dec42ab --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@@ -87,9 -87,6 +87,6 @@@ MODULE_PARM_DESC(probe_wait_ms */ #define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
- #define TMR_RUNNING_TIMER 0 - #define TMR_RUNNING_CHANSW 1 - /* * All cfg80211 functions have to be called outside a locked * section so that they can acquire a lock themselves... This @@@ -609,6 -606,7 +606,7 @@@ static void ieee80211_add_vht_ie(struc BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); + ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
/* determine capability flags */ cap = vht_cap.cap; @@@ -1011,6 -1009,7 +1009,7 @@@ static void ieee80211_chswitch_work(str
/* XXX: wait for a beacon first? */ ieee80211_wake_queues_by_reason(&sdata->local->hw, + IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_CSA); out: ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; @@@ -1038,14 -1037,8 +1037,8 @@@ static void ieee80211_chswitch_timer(un { struct ieee80211_sub_if_data *sdata = (struct ieee80211_sub_if_data *) data; - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - - if (sdata->local->quiescing) { - set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); - return; - }
- ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); + ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work); }
void @@@ -1116,6 -1109,7 +1109,7 @@@ ieee80211_sta_process_chanswitch(struc
if (sw_elem->mode) ieee80211_stop_queues_by_reason(&sdata->local->hw, + IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_CSA);
if (sdata->local->ops->channel_switch) { @@@ -1383,6 -1377,7 +1377,7 @@@ void ieee80211_dynamic_ps_disable_work( }
ieee80211_wake_queues_by_reason(&local->hw, + IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_PS); }
@@@ -1444,7 -1439,7 +1439,7 @@@ void ieee80211_dynamic_ps_enable_work(s else { ieee80211_send_nullfunc(local, sdata, 1); /* Flush to get the tx status of nullfunc frame */ - drv_flush(local, false); + ieee80211_flush_queues(local, sdata); } }
@@@ -1775,7 -1770,7 +1770,7 @@@ static void ieee80211_set_disassoc(stru
/* flush out any pending frame (e.g. DELBA) before deauth/disassoc */ if (tx) - drv_flush(local, false); + ieee80211_flush_queues(local, sdata);
/* deauthenticate/disassociate now */ if (tx || frame_buf) @@@ -1784,7 -1779,7 +1779,7 @@@
/* flush out frame */ if (tx) - drv_flush(local, false); + ieee80211_flush_queues(local, sdata);
/* clear bssid only after building the needed mgmt frames */ memset(ifmgd->bssid, 0, ETH_ALEN); @@@ -1802,9 -1797,11 +1797,11 @@@ sdata->vif.bss_conf.p2p_ctwindow = 0; sdata->vif.bss_conf.p2p_oppps = false;
- /* on the next assoc, re-program HT parameters */ + /* on the next assoc, re-program HT/VHT parameters */ memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); + memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa)); + memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
@@@ -1830,8 -1827,6 +1827,6 @@@ del_timer_sync(&sdata->u.mgd.timer); del_timer_sync(&sdata->u.mgd.chswitch_timer);
- sdata->u.mgd.timers_running = 0; - sdata->vif.bss_conf.dtim_period = 0;
ifmgd->flags = 0; @@@ -1956,7 -1951,7 +1951,7 @@@ static void ieee80211_mgd_probe_ap_send ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); run_again(ifmgd, ifmgd->probe_timeout); if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) - drv_flush(sdata->local, false); + ieee80211_flush_queues(sdata->local, sdata); }
static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, @@@ -2079,6 -2074,7 +2074,7 @@@ static void __ieee80211_disconnect(stru true, frame_buf); ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; ieee80211_wake_queues_by_reason(&sdata->local->hw, + IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_CSA); mutex_unlock(&ifmgd->mtx);
@@@ -3140,15 -3136,8 +3136,8 @@@ static void ieee80211_sta_timer(unsigne { struct ieee80211_sub_if_data *sdata = (struct ieee80211_sub_if_data *) data; - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - struct ieee80211_local *local = sdata->local; - - if (local->quiescing) { - set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); - return; - }
- ieee80211_queue_work(&local->hw, &sdata->work); + ieee80211_queue_work(&sdata->local->hw, &sdata->work); }
static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, @@@ -3500,72 -3489,6 +3489,6 @@@ static void ieee80211_restart_sta_timer } }
- #ifdef CONFIG_PM - void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) - { - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - - /* - * Stop timers before deleting work items, as timers - * could race and re-add the work-items. They will be - * re-established on connection. - */ - del_timer_sync(&ifmgd->conn_mon_timer); - del_timer_sync(&ifmgd->bcn_mon_timer); - - /* - * we need to use atomic bitops for the running bits - * only because both timers might fire at the same - * time -- the code here is properly synchronised. - */ - - cancel_work_sync(&ifmgd->request_smps_work); - - cancel_work_sync(&ifmgd->monitor_work); - cancel_work_sync(&ifmgd->beacon_connection_loss_work); - cancel_work_sync(&ifmgd->csa_connection_drop_work); - if (del_timer_sync(&ifmgd->timer)) - set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); - - if (del_timer_sync(&ifmgd->chswitch_timer)) - set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); - cancel_work_sync(&ifmgd->chswitch_work); - } - - void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) - { - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - - mutex_lock(&ifmgd->mtx); - if (!ifmgd->associated) { - mutex_unlock(&ifmgd->mtx); - return; - } - - if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) { - sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME; - mlme_dbg(sdata, "driver requested disconnect after resume\n"); - ieee80211_sta_connection_lost(sdata, - ifmgd->associated->bssid, - WLAN_REASON_UNSPECIFIED, - true); - mutex_unlock(&ifmgd->mtx); - return; - } - mutex_unlock(&ifmgd->mtx); - - if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running)) - add_timer(&ifmgd->timer); - if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) - add_timer(&ifmgd->chswitch_timer); - ieee80211_sta_reset_beacon_monitor(sdata); - - mutex_lock(&sdata->local->mtx); - ieee80211_restart_sta_timer(sdata); - mutex_unlock(&sdata->local->mtx); - } - #endif - /* interface setup */ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) { @@@ -3964,16 -3887,8 +3887,16 @@@ int ieee80211_mgd_auth(struct ieee80211 /* prep auth_data so we don't go into idle on disassoc */ ifmgd->auth_data = auth_data;
- if (ifmgd->associated) - ieee80211_set_disassoc(sdata, 0, 0, false, NULL); + if (ifmgd->associated) { + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, + WLAN_REASON_UNSPECIFIED, + false, frame_buf); + + __cfg80211_send_deauth(sdata->dev, frame_buf, + sizeof(frame_buf)); + }
sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
@@@ -4033,16 -3948,8 +3956,16 @@@ int ieee80211_mgd_assoc(struct ieee8021
mutex_lock(&ifmgd->mtx);
- if (ifmgd->associated) - ieee80211_set_disassoc(sdata, 0, 0, false, NULL); + if (ifmgd->associated) { + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, + WLAN_REASON_UNSPECIFIED, + false, frame_buf); + + __cfg80211_send_deauth(sdata->dev, frame_buf, + sizeof(frame_buf)); + }
if (ifmgd->auth_data && !ifmgd->auth_data->done) { err = -EBUSY; @@@ -4089,6 -3996,9 +4012,9 @@@ ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; }
+ if (req->flags & ASSOC_REQ_DISABLE_VHT) + ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; + /* Also disable HT if we don't support it or the AP doesn't use WMM */ sband = local->hw.wiphy->bands[req->bss->channel->band]; if (!sband->ht_cap.ht_supported || @@@ -4112,6 -4022,10 +4038,10 @@@ memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask, sizeof(ifmgd->ht_capa_mask));
+ memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa)); + memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask, + sizeof(ifmgd->vht_capa_mask)); + if (req->ie && req->ie_len) { memcpy(assoc_data->ie, req->ie, req->ie_len); assoc_data->ie_len = req->ie_len; diff --combined net/openvswitch/datapath.c index 6980c3e,b7d0b7c..78a1195 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@@ -44,6 -44,7 +44,7 @@@ #include <linux/netfilter_ipv4.h> #include <linux/inetdevice.h> #include <linux/list.h> + #include <linux/lockdep.h> #include <linux/openvswitch.h> #include <linux/rculist.h> #include <linux/dmi.h> @@@ -56,38 -57,59 +57,59 @@@ #include "flow.h" #include "vport-internal_dev.h"
- /** - * struct ovs_net - Per net-namespace data for ovs. - * @dps: List of datapaths to enable dumping them all out. - * Protected by genl_mutex. - */ - struct ovs_net { - struct list_head dps; - }; - - static int ovs_net_id __read_mostly;
#define REHASH_FLOW_INTERVAL (10 * 60 * HZ) static void rehash_flow_table(struct work_struct *work); static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
+ int ovs_net_id __read_mostly; + + static void ovs_notify(struct sk_buff *skb, struct genl_info *info, + struct genl_multicast_group *grp) + { + genl_notify(skb, genl_info_net(info), info->snd_portid, + grp->id, info->nlhdr, GFP_KERNEL); + } + /** * DOC: Locking: * - * Writes to device state (add/remove datapath, port, set operations on vports, - * etc.) are protected by RTNL. - * - * Writes to other state (flow table modifications, set miscellaneous datapath - * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside - * genl_mutex. + * All writes e.g. Writes to device state (add/remove datapath, port, set + * operations on vports, etc.), Writes to other state (flow table + * modifications, set miscellaneous datapath parameters, etc.) are protected + * by ovs_lock. * * Reads are protected by RCU. * * There are a few special cases (mostly stats) that have their own * synchronization but they nest under all of above and don't interact with * each other. + * + * The RTNL lock nests inside ovs_mutex. */
+ static DEFINE_MUTEX(ovs_mutex); + + void ovs_lock(void) + { + mutex_lock(&ovs_mutex); + } + + void ovs_unlock(void) + { + mutex_unlock(&ovs_mutex); + } + + #ifdef CONFIG_LOCKDEP + int lockdep_ovsl_is_held(void) + { + if (debug_locks) + return lockdep_is_held(&ovs_mutex); + else + return 1; + } + #endif + static struct vport *new_vport(const struct vport_parms *); static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *, const struct dp_upcall_info *); @@@ -95,7 -117,7 +117,7 @@@ static int queue_userspace_packet(struc struct sk_buff *, const struct dp_upcall_info *);
- /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */ + /* Must be called with rcu_read_lock or ovs_mutex. */ static struct datapath *get_dp(struct net *net, int dp_ifindex) { struct datapath *dp = NULL; @@@ -113,10 -135,10 +135,10 @@@ return dp; }
- /* Must be called with rcu_read_lock or RTNL lock. */ + /* Must be called with rcu_read_lock or ovs_mutex. */ const char *ovs_dp_name(const struct datapath *dp) { - struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL); + struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL); return vport->ops->get_name(vport); }
@@@ -168,7 -190,7 +190,7 @@@ struct vport *ovs_lookup_vport(const st return NULL; }
- /* Called with RTNL lock and genl_lock. */ + /* Called with ovs_mutex. */ static struct vport *new_vport(const struct vport_parms *parms) { struct vport *vport; @@@ -180,14 -202,12 +202,12 @@@
hlist_add_head_rcu(&vport->dp_hash_node, head); } return vport; }
- /* Called with RTNL lock. */ void ovs_dp_detach_port(struct vport *p) { - ASSERT_RTNL(); + ASSERT_OVSL();
/* First drop references to device. */ hlist_del_rcu(&p->dp_hash_node); @@@ -337,6 -357,35 +357,35 @@@ static int queue_gso_packets(struct ne return err; }
+ static size_t key_attr_size(void) + { + return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ + + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */ + + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ + + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ + + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ + + nla_total_size(4) /* OVS_KEY_ATTR_8021Q */ + + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */ + + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ + + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */ + + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */ + + nla_total_size(28); /* OVS_KEY_ATTR_ND */ + } + + static size_t upcall_msg_size(const struct sk_buff *skb, + const struct nlattr *userdata) + { + size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) + + nla_total_size(skb->len) /* OVS_PACKET_ATTR_PACKET */ + + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */ + + /* OVS_PACKET_ATTR_USERDATA */ + if (userdata) + size += NLA_ALIGN(userdata->nla_len); + + return size; + } + static int queue_userspace_packet(struct net *net, int dp_ifindex, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) @@@ -345,7 -394,6 +394,6 @@@ struct sk_buff *nskb = NULL; struct sk_buff *user_skb; /* to be queued to userspace */ struct nlattr *nla; - unsigned int len; int err;
if (vlan_tx_tag_present(skb)) { @@@ -366,13 -414,7 +414,7 @@@ goto out; }
- len = sizeof(struct ovs_header); - len += nla_total_size(skb->len); - len += nla_total_size(FLOW_BUFSIZE); - if (upcall_info->cmd == OVS_PACKET_CMD_ACTION) - len += nla_total_size(8); - - user_skb = genlmsg_new(len, GFP_ATOMIC); + user_skb = genlmsg_new(upcall_msg_size(skb, upcall_info->userdata), GFP_ATOMIC); if (!user_skb) { err = -ENOMEM; goto out; @@@ -387,8 -429,9 +429,9 @@@ nla_nest_end(user_skb, nla);
if (upcall_info->userdata) - nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, - nla_get_u64(upcall_info->userdata)); + __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA, + nla_len(upcall_info->userdata), + nla_data(upcall_info->userdata));
nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
@@@ -402,13 -445,13 +445,13 @@@ out return err; }
- /* Called with genl_mutex. */ + /* Called with ovs_mutex. */ static int flush_flows(struct datapath *dp) { struct flow_table *old_table; struct flow_table *new_table;
- old_table = genl_dereference(dp->table); + old_table = ovsl_dereference(dp->table); new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS); if (!new_table) return -ENOMEM; @@@ -544,7 -587,7 +587,7 @@@ static int validate_userspace(const str { static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, - [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 }, + [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC }, }; struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; int error; @@@ -661,8 -704,7 +704,7 @@@ static int ovs_packet_cmd_execute(struc
err = -EINVAL; if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || - !a[OVS_PACKET_ATTR_ACTIONS] || - nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN) + !a[OVS_PACKET_ATTR_ACTIONS]) goto err;
len = nla_len(a[OVS_PACKET_ATTR_PACKET]); @@@ -672,7 -714,7 +714,7 @@@ goto err; skb_reserve(packet, NET_IP_ALIGN);
- memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len); + nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
skb_reset_mac_header(packet); eth = eth_hdr(packet); @@@ -680,7 -722,7 +722,7 @@@ /* Normally, setting the skb 'protocol' field would be handled by a * call to eth_type_trans(), but it assumes there's a sending * device, which we may not have. */ - if (ntohs(eth->h_proto) >= 1536) + if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) packet->protocol = eth->h_proto; else packet->protocol = htons(ETH_P_802_2); @@@ -743,7 -785,7 +785,7 @@@ err }
static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { - [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC }, + [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, }; @@@ -759,7 -801,7 +801,7 @@@ static struct genl_ops dp_packet_genl_o static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) { int i; - struct flow_table *table = genl_dereference(dp->table); + struct flow_table *table = ovsl_dereference(dp->table);
stats->n_flows = ovs_flow_tbl_count(table);
@@@ -801,7 -843,17 +843,17 @@@ static struct genl_multicast_group ovs_ .name = OVS_FLOW_MCGROUP };
- /* Called with genl_lock. */ + static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) + { + return NLMSG_ALIGN(sizeof(struct ovs_header)) + + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */ + + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ + + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ + + nla_total_size(8) /* OVS_FLOW_ATTR_USED */ + + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */ + } + + /* Called with ovs_mutex. */ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, struct sk_buff *skb, u32 portid, u32 seq, u32 flags, u8 cmd) @@@ -815,8 -867,7 +867,7 @@@ u8 tcp_flags; int err;
- sf_acts = rcu_dereference_protected(flow->sf_acts, - lockdep_genl_is_held()); + sf_acts = ovsl_dereference(flow->sf_acts);
ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); if (!ovs_header) @@@ -879,25 -930,10 +930,10 @@@ error static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow) { const struct sw_flow_actions *sf_acts; - int len;
- sf_acts = rcu_dereference_protected(flow->sf_acts, - lockdep_genl_is_held()); + sf_acts = ovsl_dereference(flow->sf_acts);
- /* OVS_FLOW_ATTR_KEY */ - len = nla_total_size(FLOW_BUFSIZE); - /* OVS_FLOW_ATTR_ACTIONS */ - len += nla_total_size(sf_acts->actions_len); - /* OVS_FLOW_ATTR_STATS */ - len += nla_total_size(sizeof(struct ovs_flow_stats)); - /* OVS_FLOW_ATTR_TCP_FLAGS */ - len += nla_total_size(1); - /* OVS_FLOW_ATTR_USED */ - len += nla_total_size(8); - - len += NLMSG_ALIGN(sizeof(struct ovs_header)); - - return genlmsg_new(len, GFP_KERNEL); + return genlmsg_new(ovs_flow_cmd_msg_size(sf_acts), GFP_KERNEL); }
static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, @@@ -946,12 -982,13 +982,13 @@@ static int ovs_flow_cmd_new_or_set(stru goto error; }
+ ovs_lock(); dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); error = -ENODEV; if (!dp) - goto error; + goto err_unlock_ovs;
- table = genl_dereference(dp->table); + table = ovsl_dereference(dp->table); flow = ovs_flow_tbl_lookup(table, &key, key_len); if (!flow) { struct sw_flow_actions *acts; @@@ -959,7 -996,7 +996,7 @@@ /* Bail out if we're not allowed to create a new flow. */ error = -ENOENT; if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) - goto error; + goto err_unlock_ovs;
/* Expand table, if necessary, to make room. */ if (ovs_flow_tbl_need_to_expand(table)) { @@@ -969,7 -1006,7 +1006,7 @@@ if (!IS_ERR(new_table)) { rcu_assign_pointer(dp->table, new_table); ovs_flow_tbl_deferred_destroy(table); - table = genl_dereference(dp->table); + table = ovsl_dereference(dp->table); } }
@@@ -977,7 -1014,7 +1014,7 @@@ flow = ovs_flow_alloc(); if (IS_ERR(flow)) { error = PTR_ERR(flow); - goto error; + goto err_unlock_ovs; } flow->key = key; clear_stats(flow); @@@ -1010,11 -1047,10 +1047,10 @@@ error = -EEXIST; if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW && info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) - goto error; + goto err_unlock_ovs;
/* Update actions. */ - old_acts = rcu_dereference_protected(flow->sf_acts, - lockdep_genl_is_held()); + old_acts = ovsl_dereference(flow->sf_acts); acts_attrs = a[OVS_FLOW_ATTR_ACTIONS]; if (acts_attrs && (old_acts->actions_len != nla_len(acts_attrs) || @@@ -1025,7 -1061,7 +1061,7 @@@ new_acts = ovs_flow_actions_alloc(acts_attrs); error = PTR_ERR(new_acts); if (IS_ERR(new_acts)) - goto error; + goto err_unlock_ovs;
rcu_assign_pointer(flow->sf_acts, new_acts); ovs_flow_deferred_free_acts(old_acts); @@@ -1041,11 -1077,10 +1077,10 @@@ spin_unlock_bh(&flow->lock); } } + ovs_unlock();
if (!IS_ERR(reply)) - genl_notify(reply, genl_info_net(info), info->snd_portid, - ovs_dp_flow_multicast_group.id, info->nlhdr, - GFP_KERNEL); + ovs_notify(reply, info, &ovs_dp_flow_multicast_group); else netlink_set_err(sock_net(skb->sk)->genl_sock, 0, ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); @@@ -1053,6 -1088,8 +1088,8 @@@
error_free_flow: ovs_flow_free(flow); + err_unlock_ovs: + ovs_unlock(); error: return error; } @@@ -1075,21 -1112,32 +1112,32 @@@ static int ovs_flow_cmd_get(struct sk_b if (err) return err;
+ ovs_lock(); dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); - if (!dp) - return -ENODEV; + if (!dp) { + err = -ENODEV; + goto unlock; + }
- table = genl_dereference(dp->table); + table = ovsl_dereference(dp->table); flow = ovs_flow_tbl_lookup(table, &key, key_len); - if (!flow) - return -ENOENT; + if (!flow) { + err = -ENOENT; + goto unlock; + }
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, info->snd_seq, OVS_FLOW_CMD_NEW); - if (IS_ERR(reply)) - return PTR_ERR(reply); + if (IS_ERR(reply)) { + err = PTR_ERR(reply); + goto unlock; + }
+ ovs_unlock(); return genlmsg_reply(reply, info); + unlock: + ovs_unlock(); + return err; }
static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) @@@ -1104,25 -1152,33 +1152,33 @@@ int err; int key_len;
+ ovs_lock(); dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); - if (!dp) - return -ENODEV; - - if (!a[OVS_FLOW_ATTR_KEY]) - return flush_flows(dp); + if (!dp) { + err = -ENODEV; + goto unlock; + }
+ if (!a[OVS_FLOW_ATTR_KEY]) { + err = flush_flows(dp); + goto unlock; + } err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); if (err) - return err; + goto unlock;
- table = genl_dereference(dp->table); + table = ovsl_dereference(dp->table); flow = ovs_flow_tbl_lookup(table, &key, key_len); - if (!flow) - return -ENOENT; + if (!flow) { + err = -ENOENT; + goto unlock; + }
reply = ovs_flow_cmd_alloc_info(flow); - if (!reply) - return -ENOMEM; + if (!reply) { + err = -ENOMEM; + goto unlock; + }
ovs_flow_tbl_remove(table, flow);
@@@ -1131,10 -1187,13 +1187,13 @@@ BUG_ON(err < 0);
ovs_flow_deferred_free(flow); + ovs_unlock();
- genl_notify(reply, genl_info_net(info), info->snd_portid, - ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); + ovs_notify(reply, info, &ovs_dp_flow_multicast_group); return 0; + unlock: + ovs_unlock(); + return err; }
static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) @@@ -1143,11 -1202,14 +1202,14 @@@ struct datapath *dp; struct flow_table *table;
+ ovs_lock(); dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); - if (!dp) + if (!dp) { + ovs_unlock(); return -ENODEV; + }
- table = genl_dereference(dp->table); + table = ovsl_dereference(dp->table);
for (;;) { struct sw_flow *flow; @@@ -1168,6 -1230,7 +1230,7 @@@ cb->args[0] = bucket; cb->args[1] = obj; } + ovs_unlock(); return skb->len; }
@@@ -1213,6 -1276,16 +1276,16 @@@ static struct genl_multicast_group ovs_ .name = OVS_DATAPATH_MCGROUP };
+ static size_t ovs_dp_cmd_msg_size(void) + { + size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header)); + + msgsize += nla_total_size(IFNAMSIZ); + msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); + + return msgsize; + } + static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, u32 portid, u32 seq, u32 flags, u8 cmd) { @@@ -1251,7 -1324,7 +1324,7 @@@ static struct sk_buff *ovs_dp_cmd_build struct sk_buff *skb; int retval;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + skb = genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL); if (!skb) return ERR_PTR(-ENOMEM);
@@@ -1263,7 -1336,7 +1336,7 @@@ return skb; }
- /* Called with genl_mutex and optionally with RTNL lock also. */ + /* Called with ovs_mutex. */ static struct datapath *lookup_datapath(struct net *net, struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1]) @@@ -1297,12 -1370,12 +1370,12 @@@ static int ovs_dp_cmd_new(struct sk_buf if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) goto err;
- rtnl_lock(); + ovs_lock();
err = -ENOMEM; dp = kzalloc(sizeof(*dp), GFP_KERNEL); if (dp == NULL) - goto err_unlock_rtnl; + goto err_unlock_ovs;
ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
@@@ -1353,37 -1426,34 +1426,34 @@@
ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); list_add_tail(&dp->list_node, &ovs_net->dps); - rtnl_unlock();
- genl_notify(reply, genl_info_net(info), info->snd_portid, - ovs_dp_datapath_multicast_group.id, info->nlhdr, - GFP_KERNEL); + ovs_unlock(); + + ovs_notify(reply, info, &ovs_dp_datapath_multicast_group); return 0;
err_destroy_local_port: - ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL)); + ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); err_destroy_ports_array: kfree(dp->ports); err_destroy_percpu: free_percpu(dp->stats_percpu); err_destroy_table: - ovs_flow_tbl_destroy(genl_dereference(dp->table)); + ovs_flow_tbl_destroy(ovsl_dereference(dp->table)); err_free_dp: release_net(ovs_dp_get_net(dp)); kfree(dp); - err_unlock_rtnl: - rtnl_unlock(); + err_unlock_ovs: + ovs_unlock(); err: return err; }
- /* Called with genl_mutex. */ + /* Called with ovs_mutex. */ static void __dp_destroy(struct datapath *dp) { int i;
- rtnl_lock(); - for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { struct vport *vport; struct hlist_node *n; @@@ -1394,14 -1464,11 +1464,11 @@@ }
list_del(&dp->list_node); - ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
- /* rtnl_unlock() will wait until all the references to devices that - * are pending unregistration have been dropped. We do it here to - * ensure that any internal devices (which contain DP pointers) are - * fully destroyed before freeing the datapath. + /* OVSP_LOCAL is datapath internal port. We need to make sure that + * all port in datapath are destroyed first before freeing datapath. */ - rtnl_unlock(); + ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
call_rcu(&dp->rcu, destroy_dp_rcu); } @@@ -1412,24 -1479,27 +1479,27 @@@ static int ovs_dp_cmd_del(struct sk_buf struct datapath *dp; int err;
+ ovs_lock(); dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); err = PTR_ERR(dp); if (IS_ERR(dp)) - return err; + goto unlock;
reply = ovs_dp_cmd_build_info(dp, info->snd_portid, info->snd_seq, OVS_DP_CMD_DEL); err = PTR_ERR(reply); if (IS_ERR(reply)) - return err; + goto unlock;
__dp_destroy(dp); + ovs_unlock();
- genl_notify(reply, genl_info_net(info), info->snd_portid, - ovs_dp_datapath_multicast_group.id, info->nlhdr, - GFP_KERNEL); + ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
return 0; + unlock: + ovs_unlock(); + return err; }
static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) @@@ -1438,9 -1508,11 +1508,11 @@@ struct datapath *dp; int err;
+ ovs_lock(); dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); + err = PTR_ERR(dp); if (IS_ERR(dp)) - return PTR_ERR(dp); + goto unlock;
reply = ovs_dp_cmd_build_info(dp, info->snd_portid, info->snd_seq, OVS_DP_CMD_NEW); @@@ -1448,31 -1520,45 +1520,45 @@@ err = PTR_ERR(reply); netlink_set_err(sock_net(skb->sk)->genl_sock, 0, ovs_dp_datapath_multicast_group.id, err); - return 0; + err = 0; + goto unlock; }
- genl_notify(reply, genl_info_net(info), info->snd_portid, - ovs_dp_datapath_multicast_group.id, info->nlhdr, - GFP_KERNEL); + ovs_unlock(); + ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
return 0; + unlock: + ovs_unlock(); + return err; }
static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *reply; struct datapath *dp; + int err;
+ ovs_lock(); dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); - if (IS_ERR(dp)) - return PTR_ERR(dp); + if (IS_ERR(dp)) { + err = PTR_ERR(dp); + goto unlock; + }
reply = ovs_dp_cmd_build_info(dp, info->snd_portid, info->snd_seq, OVS_DP_CMD_NEW); - if (IS_ERR(reply)) - return PTR_ERR(reply); + if (IS_ERR(reply)) { + err = PTR_ERR(reply); + goto unlock; + }
+ ovs_unlock(); return genlmsg_reply(reply, info); + + unlock: + ovs_unlock(); + return err; }
static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) @@@ -1482,6 -1568,7 +1568,7 @@@ int skip = cb->args[0]; int i = 0;
+ ovs_lock(); list_for_each_entry(dp, &ovs_net->dps, list_node) { if (i >= skip && ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, @@@ -1490,6 -1577,7 +1577,7 @@@ break; i++; } + ovs_unlock();
cb->args[0] = i;
@@@ -1542,7 -1630,7 +1630,7 @@@ struct genl_multicast_group ovs_dp_vpor .name = OVS_VPORT_MCGROUP };
- /* Called with RTNL lock or RCU read lock. */ + /* Called with ovs_mutex or RCU read lock. */ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, u32 portid, u32 seq, u32 flags, u8 cmd) { @@@ -1581,7 -1669,7 +1669,7 @@@ error return err; }
- /* Called with RTNL lock or RCU read lock. */ + /* Called with ovs_mutex or RCU read lock. */ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid, u32 seq, u8 cmd) { @@@ -1593,12 -1681,14 +1681,12 @@@ return ERR_PTR(-ENOMEM);
retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); - if (retval < 0) { - kfree_skb(skb); - return ERR_PTR(retval); - } + BUG_ON(retval < 0); + return skb; }
- /* Called with RTNL lock or RCU read lock. */ + /* Called with ovs_mutex or RCU read lock. */ static struct vport *lookup_vport(struct net *net, struct ovs_header *ovs_header, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) @@@ -1624,9 -1714,9 +1712,9 @@@ if (!dp) return ERR_PTR(-ENODEV);
- vport = ovs_vport_rtnl_rcu(dp, port_no); + vport = ovs_vport_ovsl_rcu(dp, port_no); if (!vport) - return ERR_PTR(-ENOENT); + return ERR_PTR(-ENODEV); return vport; } else return ERR_PTR(-EINVAL); @@@ -1648,7 -1738,7 +1736,7 @@@ static int ovs_vport_cmd_new(struct sk_ !a[OVS_VPORT_ATTR_UPCALL_PID]) goto exit;
- rtnl_lock(); + ovs_lock(); dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); err = -ENODEV; if (!dp) @@@ -1661,7 -1751,7 +1749,7 @@@ if (port_no >= DP_MAX_PORTS) goto exit_unlock;
- vport = ovs_vport_rtnl_rcu(dp, port_no); + vport = ovs_vport_ovsl(dp, port_no); err = -EBUSY; if (vport) goto exit_unlock; @@@ -1671,7 -1761,7 +1759,7 @@@ err = -EFBIG; goto exit_unlock; } - vport = ovs_vport_rtnl(dp, port_no); + vport = ovs_vport_ovsl(dp, port_no); if (!vport) break; } @@@ -1697,11 -1787,11 +1785,11 @@@ ovs_dp_detach_port(vport); goto exit_unlock; } - genl_notify(reply, genl_info_net(info), info->snd_portid, - ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); + + ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
exit_unlock: - rtnl_unlock(); + ovs_unlock(); exit: return err; } @@@ -1713,7 -1803,7 +1801,7 @@@ static int ovs_vport_cmd_set(struct sk_ struct vport *vport; int err;
- rtnl_lock(); + ovs_lock(); vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); err = PTR_ERR(vport); if (IS_ERR(vport)) @@@ -1724,34 -1814,27 +1812,35 @@@ nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) err = -EINVAL;
+ reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!reply) { + err = -ENOMEM; + goto exit_unlock; + } + if (!err && a[OVS_VPORT_ATTR_OPTIONS]) err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); if (err) - goto exit_unlock; + goto exit_free; + if (a[OVS_VPORT_ATTR_UPCALL_PID]) vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
- reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, - OVS_VPORT_CMD_NEW); - if (IS_ERR(reply)) { - netlink_set_err(sock_net(skb->sk)->genl_sock, 0, - ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); - goto exit_unlock; - } + err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, + info->snd_seq, 0, OVS_VPORT_CMD_NEW); + BUG_ON(err < 0);
- genl_notify(reply, genl_info_net(info), info->snd_portid, - ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); + ovs_unlock(); + ovs_notify(reply, info, &ovs_dp_vport_multicast_group); + return 0;
+ rtnl_unlock(); + return 0; + +exit_free: + kfree_skb(reply); exit_unlock: - rtnl_unlock(); + ovs_unlock(); return err; }
@@@ -1762,7 -1845,7 +1851,7 @@@ static int ovs_vport_cmd_del(struct sk_ struct vport *vport; int err;
- rtnl_lock(); + ovs_lock(); vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); err = PTR_ERR(vport); if (IS_ERR(vport)) @@@ -1782,11 -1865,10 +1871,10 @@@ err = 0; ovs_dp_detach_port(vport);
- genl_notify(reply, genl_info_net(info), info->snd_portid, - ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); + ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
exit_unlock: - rtnl_unlock(); + ovs_unlock(); return err; }
@@@ -1946,13 -2028,13 +2034,13 @@@ static void rehash_flow_table(struct wo struct datapath *dp; struct net *net;
- genl_lock(); + ovs_lock(); rtnl_lock(); for_each_net(net) { struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
list_for_each_entry(dp, &ovs_net->dps, list_node) { - struct flow_table *old_table = genl_dereference(dp->table); + struct flow_table *old_table = ovsl_dereference(dp->table); struct flow_table *new_table;
new_table = ovs_flow_tbl_rehash(old_table); @@@ -1963,8 -2045,7 +2051,7 @@@ } } rtnl_unlock(); - genl_unlock(); - + ovs_unlock(); schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); }
@@@ -1973,18 -2054,21 +2060,21 @@@ static int __net_init ovs_init_net(stru struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
INIT_LIST_HEAD(&ovs_net->dps); + INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq); return 0; }
static void __net_exit ovs_exit_net(struct net *net) { - struct ovs_net *ovs_net = net_generic(net, ovs_net_id); struct datapath *dp, *dp_next; + struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
- genl_lock(); + ovs_lock(); list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) __dp_destroy(dp); - genl_unlock(); + ovs_unlock(); + + cancel_work_sync(&ovs_net->dp_notify_work); }
static struct pernet_operations ovs_net_ops = { diff --combined net/openvswitch/flow.c index 67a2b78,cf9328b..b15321a --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@@ -211,7 -211,7 +211,7 @@@ struct sw_flow_actions *ovs_flow_action return ERR_PTR(-ENOMEM);
sfa->actions_len = actions_len; - memcpy(sfa->actions, nla_data(actions), actions_len); + nla_memcpy(sfa->actions, actions, actions_len); return sfa; }
@@@ -466,7 -466,7 +466,7 @@@ static __be16 parse_ethertype(struct sk proto = *(__be16 *) skb->data; __skb_pull(skb, sizeof(__be16));
- if (ntohs(proto) >= 1536) + if (ntohs(proto) >= ETH_P_802_3_MIN) return proto;
if (skb->len < sizeof(struct llc_snap_hdr)) @@@ -483,7 -483,7 +483,7 @@@
__skb_pull(skb, sizeof(struct llc_snap_hdr));
- if (ntohs(llc->ethertype) >= 1536) + if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN) return llc->ethertype;
return htons(ETH_P_802_2); @@@ -795,9 -795,9 +795,9 @@@ void ovs_flow_tbl_insert(struct flow_ta
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) { + BUG_ON(table->count == 0); hlist_del_rcu(&flow->hash_node[table->node_ver]); table->count--; - BUG_ON(table->count < 0); }
/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ @@@ -1038,7 -1038,7 +1038,7 @@@ int ovs_flow_from_nlattrs(struct sw_flo
if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); - if (ntohs(swkey->eth.type) < 1536) + if (ntohs(swkey->eth.type) < ETH_P_802_3_MIN) return -EINVAL; attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); } else { diff --combined net/unix/af_unix.c index 2db702d,5ca1631..9efe011 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@@ -1340,7 -1340,6 +1340,6 @@@ static void unix_destruct_scm(struct sk struct scm_cookie scm; memset(&scm, 0, sizeof(scm)); scm.pid = UNIXCB(skb).pid; - scm.cred = UNIXCB(skb).cred; if (UNIXCB(skb).fp) unix_detach_fds(&scm, skb);
@@@ -1391,8 -1390,8 +1390,8 @@@ static int unix_scm_to_skb(struct scm_c int err = 0;
UNIXCB(skb).pid = get_pid(scm->pid); - if (scm->cred) - UNIXCB(skb).cred = get_cred(scm->cred); + UNIXCB(skb).uid = scm->creds.uid; + UNIXCB(skb).gid = scm->creds.gid; UNIXCB(skb).fp = NULL; if (scm->fp && send_fds) err = unix_attach_fds(scm, skb); @@@ -1409,13 -1408,13 +1408,13 @@@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock, const struct sock *other) { - if (UNIXCB(skb).cred) + if (UNIXCB(skb).pid) return; if (test_bit(SOCK_PASSCRED, &sock->flags) || !other->sk_socket || test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) { UNIXCB(skb).pid = get_pid(task_tgid(current)); - UNIXCB(skb).cred = get_current_cred(); - current_euid_egid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); ++ current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); } }
@@@ -1819,7 -1818,7 +1818,7 @@@ static int unix_dgram_recvmsg(struct ki siocb->scm = &tmp_scm; memset(&tmp_scm, 0, sizeof(tmp_scm)); } - scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); + scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); unix_set_secdata(siocb->scm, skb);
if (!(flags & MSG_PEEK)) { @@@ -1991,11 -1990,12 +1990,12 @@@ again if (check_creds) { /* Never glue messages from different writers */ if ((UNIXCB(skb).pid != siocb->scm->pid) || - (UNIXCB(skb).cred != siocb->scm->cred)) + !uid_eq(UNIXCB(skb).uid, siocb->scm->creds.uid) || + !gid_eq(UNIXCB(skb).gid, siocb->scm->creds.gid)) break; } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { /* Copy credentials */ - scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); + scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); check_creds = 1; }
@@@ -2196,7 -2196,9 +2196,9 @@@ static unsigned int unix_dgram_poll(str
/* exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) - mask |= POLLERR; + mask |= POLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); + if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) diff --combined security/selinux/hooks.c index 7171a95,0a0609f..bf889ee --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@@ -51,7 -51,6 +51,7 @@@ #include <linux/tty.h> #include <net/icmp.h> #include <net/ip.h> /* for local_port_range[] */ +#include <net/sock.h> #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ #include <net/net_namespace.h> #include <net/netlabel.h> @@@ -61,7 -60,7 +61,7 @@@ #include <linux/bitops.h> #include <linux/interrupt.h> #include <linux/netdevice.h> /* for network interface checks */ - #include <linux/netlink.h> + #include <net/netlink.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/dccp.h> @@@ -4364,11 -4363,6 +4364,11 @@@ static void selinux_inet_conn_establish selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); }
+static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk) +{ + skb_set_owner_w(skb, sk); +} + static int selinux_secmark_relabel_packet(u32 sid) { const struct task_security_struct *__tsec; @@@ -4481,7 -4475,7 +4481,7 @@@ static int selinux_nlmsg_perm(struct so struct nlmsghdr *nlh; struct sk_security_struct *sksec = sk->sk_security;
- if (skb->len < NLMSG_SPACE(0)) { + if (skb->len < NLMSG_HDRLEN) { err = -EINVAL; goto out; } @@@ -5670,7 -5664,6 +5670,7 @@@ static struct security_operations selin .tun_dev_attach_queue = selinux_tun_dev_attach_queue, .tun_dev_attach = selinux_tun_dev_attach, .tun_dev_open = selinux_tun_dev_open, + .skb_owned_by = selinux_skb_owned_by,
#ifdef CONFIG_SECURITY_NETWORK_XFRM .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
linux-merge@lists.open-mesh.org