[linux-next] LinuxNextTracking branch, master, updated. next-20130202

batman at open-mesh.org batman at open-mesh.org
Sun Feb 3 00:21:42 CET 2013


The following commit has been merged in the master branch:
commit f1e7b73acc26e8908af783bcd3a9900fd80688f5
Merge: 218774dc341f219bfcf940304a081b121a0e8099 fc16e884a2320198b8cb7bc2fdcf6b4485e79709
Author: David S. Miller <davem at davemloft.net>
Date:   Tue Jan 29 15:32:13 2013 -0500

    Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
    
    Bring in the 'net' tree so that we can get some ipv4/ipv6 bug
    fixes that some net-next work will build upon.
    
    Signed-off-by: David S. Miller <davem at davemloft.net>

diff --combined MAINTAINERS
index a7aadea,76ae4aa..b5ab4d9
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -1972,9 -1972,9 +1972,9 @@@ S:	Maintaine
  F:	drivers/usb/host/ohci-ep93xx.c
  
  CIRRUS LOGIC CS4270 SOUND DRIVER
- M:	Timur Tabi <timur at freescale.com>
+ M:	Timur Tabi <timur at tabi.org>
  L:	alsa-devel at alsa-project.org (moderated for non-subscribers)
- S:	Supported
+ S:	Odd Fixes
  F:	sound/soc/codecs/cs4270*
  
  CLEANCACHE API
@@@ -2966,7 -2966,7 +2966,7 @@@ S:	Maintaine
  F:	drivers/net/ethernet/i825xx/eexpress.*
  
  ETHERNET BRIDGE
- M:	Stephen Hemminger <shemminger at vyatta.com>
+ M:	Stephen Hemminger <stephen at networkplumber.org>
  L:	bridge at lists.linux-foundation.org
  L:	netdev at vger.kernel.org
  W:	http://www.linuxfoundation.org/en/Net:Bridge
@@@ -2974,6 -2974,11 +2974,6 @@@ S:	Maintaine
  F:	include/linux/netfilter_bridge/
  F:	net/bridge/
  
 -ETHERTEAM 16I DRIVER
 -M:	Mika Kuoppala <miku at iki.fi>
 -S:	Maintained
 -F:	drivers/net/ethernet/fujitsu/eth16i.c
 -
  EXT2 FILE SYSTEM
  M:	Jan Kara <jack at suse.cz>
  L:	linux-ext4 at vger.kernel.org
@@@ -3186,9 -3191,9 +3186,9 @@@ F:	include/uapi/video
  F:	include/uapi/linux/fb.h
  
  FREESCALE DIU FRAMEBUFFER DRIVER
- M:	Timur Tabi <timur at freescale.com>
+ M:	Timur Tabi <timur at tabi.org>
  L:	linux-fbdev at vger.kernel.org
- S:	Supported
+ S:	Maintained
  F:	drivers/video/fsl-diu-fb.*
  
  FREESCALE DMA DRIVER
@@@ -3223,9 -3228,8 +3223,8 @@@ F:	drivers/net/ethernet/freescale/fs_en
  F:	include/linux/fs_enet_pd.h
  
  FREESCALE QUICC ENGINE LIBRARY
- M:	Timur Tabi <timur at freescale.com>
  L:	linuxppc-dev at lists.ozlabs.org
- S:	Supported
+ S:	Orphan
  F:	arch/powerpc/sysdev/qe_lib/
  F:	arch/powerpc/include/asm/*qe.h
  
@@@ -3244,16 -3248,16 +3243,16 @@@ S:	Maintaine
  F:	drivers/net/ethernet/freescale/ucc_geth*
  
  FREESCALE QUICC ENGINE UCC UART DRIVER
- M:	Timur Tabi <timur at freescale.com>
+ M:	Timur Tabi <timur at tabi.org>
  L:	linuxppc-dev at lists.ozlabs.org
- S:	Supported
+ S:	Maintained
  F:	drivers/tty/serial/ucc_uart.c
  
  FREESCALE SOC SOUND DRIVERS
- M:	Timur Tabi <timur at freescale.com>
+ M:	Timur Tabi <timur at tabi.org>
  L:	alsa-devel at alsa-project.org (moderated for non-subscribers)
  L:	linuxppc-dev at lists.ozlabs.org
- S:	Supported
+ S:	Maintained
  F:	sound/soc/fsl/fsl*
  F:	sound/soc/fsl/mpc8610_hpcd.c
  
@@@ -4901,7 -4905,7 +4900,7 @@@ S:	Maintaine
  
  MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
  M:	Mirko Lindner <mlindner at marvell.com>
- M:	Stephen Hemminger <shemminger at vyatta.com>
+ M:	Stephen Hemminger <stephen at networkplumber.org>
  L:	netdev at vger.kernel.org
  S:	Maintained
  F:	drivers/net/ethernet/marvell/sk*
@@@ -5176,7 -5180,7 +5175,7 @@@ S:	Supporte
  F:	drivers/infiniband/hw/nes/
  
  NETEM NETWORK EMULATOR
- M:	Stephen Hemminger <shemminger at vyatta.com>
+ M:	Stephen Hemminger <stephen at networkplumber.org>
  L:	netem at lists.linux-foundation.org
  S:	Maintained
  F:	net/sched/sch_netem.c
@@@ -5365,6 -5369,13 +5364,6 @@@ F:	include/linux/sunrpc
  F:	include/uapi/linux/nfs*
  F:	include/uapi/linux/sunrpc/
  
 -NI5010 NETWORK DRIVER
 -M:	Jan-Pascal van Best <janpascal at vanbest.org>
 -M:	Andreas Mohr <andi at lisas.de>
 -L:	netdev at vger.kernel.org
 -S:	Maintained
 -F:	drivers/net/ethernet/racal/ni5010.*
 -
  NILFS2 FILESYSTEM
  M:	KONISHI Ryusuke <konishi.ryusuke at lab.ntt.co.jp>
  L:	linux-nilfs at vger.kernel.org
@@@ -6574,7 -6585,7 +6573,7 @@@ F:	drivers/media/platform/s3c-camif
  F:	include/media/s3c_camif.h
  
  SERIAL DRIVERS
- M:	Alan Cox <alan at linux.intel.com>
+ M:	Greg Kroah-Hartman <gregkh at linuxfoundation.org>
  L:	linux-serial at vger.kernel.org
  S:	Maintained
  F:	drivers/tty/serial
diff --combined drivers/net/can/c_can/c_can.c
index 57eb1e7,58607f1..285f763
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@@ -39,7 -39,6 +39,7 @@@
  #include <linux/can.h>
  #include <linux/can/dev.h>
  #include <linux/can/error.h>
 +#include <linux/can/led.h>
  
  #include "c_can.h"
  
@@@ -478,8 -477,6 +478,8 @@@ static int c_can_read_msg_object(struc
  	stats->rx_packets++;
  	stats->rx_bytes += frame->can_dlc;
  
 +	can_led_event(dev, CAN_LED_EVENT_RX);
 +
  	return 0;
  }
  
@@@ -754,7 -751,6 +754,7 @@@ static void c_can_do_tx(struct net_devi
  					C_CAN_IFACE(MSGCTRL_REG, 0))
  					& IF_MCONT_DLC_MASK;
  			stats->tx_packets++;
 +			can_led_event(dev, CAN_LED_EVENT_TX);
  			c_can_inval_msg_object(dev, 0, msg_obj_no);
  		} else {
  			break;
@@@ -964,7 -960,7 +964,7 @@@ static int c_can_handle_bus_err(struct 
  		break;
  	case LEC_ACK_ERROR:
  		netdev_dbg(dev, "ack error\n");
- 		cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
+ 		cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
  				CAN_ERR_PROT_LOC_ACK_DEL);
  		break;
  	case LEC_BIT1_ERROR:
@@@ -977,7 -973,7 +977,7 @@@
  		break;
  	case LEC_CRC_ERROR:
  		netdev_dbg(dev, "CRC error\n");
- 		cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ 		cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
  				CAN_ERR_PROT_LOC_CRC_DEL);
  		break;
  	default:
@@@ -1119,8 -1115,6 +1119,8 @@@ static int c_can_open(struct net_devic
  
  	napi_enable(&priv->napi);
  
 +	can_led_event(dev, CAN_LED_EVENT_OPEN);
 +
  	/* start the c_can controller */
  	c_can_start(dev);
  
@@@ -1149,8 -1143,6 +1149,8 @@@ static int c_can_close(struct net_devic
  	c_can_reset_ram(priv, false);
  	c_can_pm_runtime_put_sync(priv);
  
 +	can_led_event(dev, CAN_LED_EVENT_STOP);
 +
  	return 0;
  }
  
@@@ -1276,8 -1268,6 +1276,8 @@@ int register_c_can_dev(struct net_devic
  	err = register_candev(dev);
  	if (err)
  		c_can_pm_runtime_disable(priv);
 +	else
 +		devm_can_led_init(dev);
  
  	return err;
  }
diff --combined drivers/net/can/ti_hecc.c
index f52a975,300581b..f21fc37
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@@ -50,7 -50,6 +50,7 @@@
  
  #include <linux/can/dev.h>
  #include <linux/can/error.h>
 +#include <linux/can/led.h>
  #include <linux/can/platform/ti_hecc.h>
  
  #define DRV_NAME "ti_hecc"
@@@ -594,7 -593,6 +594,7 @@@ static int ti_hecc_rx_pkt(struct ti_hec
  	spin_unlock_irqrestore(&priv->mbx_lock, flags);
  
  	stats->rx_bytes += cf->can_dlc;
 +	can_led_event(priv->ndev, CAN_LED_EVENT_RX);
  	netif_receive_skb(skb);
  	stats->rx_packets++;
  
@@@ -748,12 -746,12 +748,12 @@@ static int ti_hecc_error(struct net_dev
  		}
  		if (err_status & HECC_CANES_CRCE) {
  			hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
- 			cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ 			cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
  					CAN_ERR_PROT_LOC_CRC_DEL;
  		}
  		if (err_status & HECC_CANES_ACKE) {
  			hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
- 			cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+ 			cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
  					CAN_ERR_PROT_LOC_ACK_DEL;
  		}
  	}
@@@ -798,7 -796,6 +798,7 @@@ static irqreturn_t ti_hecc_interrupt(in
  			stats->tx_bytes += hecc_read_mbx(priv, mbxno,
  						HECC_CANMCF) & 0xF;
  			stats->tx_packets++;
 +			can_led_event(ndev, CAN_LED_EVENT_TX);
  			can_get_echo_skb(ndev, mbxno);
  			--priv->tx_tail;
  		}
@@@ -854,8 -851,6 +854,8 @@@ static int ti_hecc_open(struct net_devi
  		return err;
  	}
  
 +	can_led_event(ndev, CAN_LED_EVENT_OPEN);
 +
  	ti_hecc_start(ndev);
  	napi_enable(&priv->napi);
  	netif_start_queue(ndev);
@@@ -874,8 -869,6 +874,8 @@@ static int ti_hecc_close(struct net_dev
  	close_candev(ndev);
  	ti_hecc_transceiver_switch(priv, 0);
  
 +	can_led_event(ndev, CAN_LED_EVENT_STOP);
 +
  	return 0;
  }
  
@@@ -968,9 -961,6 +968,9 @@@ static int ti_hecc_probe(struct platfor
  		dev_err(&pdev->dev, "register_candev() failed\n");
  		goto probe_exit_clk;
  	}
 +
 +	devm_can_led_init(ndev);
 +
  	dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
  		priv->base, (u32) ndev->irq);
  
diff --combined drivers/net/ethernet/calxeda/xgmac.c
index a345e24,f7f0290..a170065
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@@ -548,6 -548,10 +548,10 @@@ static int desc_get_rx_status(struct xg
  		return -1;
  	}
  
+ 	/* All frames should fit into a single buffer */
+ 	if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
+ 		return -1;
+ 
  	/* Check if packet has checksum already */
  	if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
  		!(ext_status & RXDESC_IP_PAYLOAD_MASK))
@@@ -1459,6 -1463,7 +1463,6 @@@ static int xgmac_set_mac_address(struc
  	if (!is_valid_ether_addr(addr->sa_data))
  		return -EADDRNOTAVAIL;
  
 -	dev->addr_assign_type &= ~NET_ADDR_RANDOM;
  	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  
  	xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 424f8ed,c306df7..c6c05bf
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@@ -1994,9 -1994,20 +1994,20 @@@ static int set_coalesce(struct net_devi
  {
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
- 
- 	return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
- 			c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
+ 	struct sge_rspq *q;
+ 	int i;
+ 	int r = 0;
+ 
+ 	for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
+ 		q = &adap->sge.ethrxq[i].rspq;
+ 		r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
+ 			c->rx_max_coalesced_frames);
+ 		if (r) {
+ 			dev_err(&dev->dev, "failed to set coalesce %d\n", r);
+ 			break;
+ 		}
+ 	}
+ 	return r;
  }
  
  static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@@ -4016,7 -4027,8 +4027,7 @@@ static int adap_init0_no_config(struct 
  						  VFRES_NEQ, VFRES_NETHCTRL,
  						  VFRES_NIQFLINT, VFRES_NIQ,
  						  VFRES_TC, VFRES_NVI,
 -						  FW_PFVF_CMD_CMASK_GET(
 -						  FW_PFVF_CMD_CMASK_MASK),
 +						  FW_PFVF_CMD_CMASK_MASK,
  						  pfvfres_pmask(
  						  adapter, pf, vf),
  						  VFRES_NEXACTF,
@@@ -5130,7 -5142,7 +5141,7 @@@ static int __init cxgb4_init_module(voi
  	/* Debugfs support is optional, just warn if this fails */
  	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
  	if (!cxgb4_debugfs_root)
 -		pr_warning("could not create debugfs entry, continuing\n");
 +		pr_warn("could not create debugfs entry, continuing\n");
  
  	ret = pci_register_driver(&cxgb4_driver);
  	if (ret < 0)
diff --combined drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 3c5ffd2,6771b69..30724d8
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@@ -515,6 -515,10 +515,6 @@@ static void build_inline_wqe(struct mlx
  		wmb();
  		inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
  	}
 -	tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
 -	tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
 -		(!!vlan_tx_tag_present(skb));
 -	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
  }
  
  u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
@@@ -588,21 -592,7 +588,21 @@@ netdev_tx_t mlx4_en_xmit(struct sk_buf
  		netif_tx_stop_queue(ring->tx_queue);
  		priv->port_stats.queue_stopped++;
  
 -		return NETDEV_TX_BUSY;
 +		/* If queue was emptied after the if, and before the
 +		 * stop_queue - need to wake the queue, or else it will remain
 +		 * stopped forever.
 +		 * Need a memory barrier to make sure ring->cons was not
 +		 * updated before queue was stopped.
 +		 */
 +		wmb();
 +
 +		if (unlikely(((int)(ring->prod - ring->cons)) <=
 +			     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
 +			netif_tx_wake_queue(ring->tx_queue);
 +			priv->port_stats.wake_queue++;
 +		} else {
 +			return NETDEV_TX_BUSY;
 +		}
  	}
  
  	/* Track current inflight packets for performance analysis */
@@@ -640,10 -630,15 +640,15 @@@
  		ring->tx_csum++;
  	}
  
- 	/* Copy dst mac address to wqe */
- 	ethh = (struct ethhdr *)skb->data;
- 	tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
- 	tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+ 	if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
+ 		/* Copy dst mac address to wqe. This allows loopback in eSwitch,
+ 		 * so that VFs and PF can communicate with each other
+ 		 */
+ 		ethh = (struct ethhdr *)skb->data;
+ 		tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
+ 		tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+ 	}
+ 
  	/* Handle LSO (TSO) packets */
  	if (lso_header_size) {
  		/* Mark opcode as LSO */
diff --combined drivers/net/ethernet/mellanox/mlx4/main.c
index 983fd3d,a6542d7..f1ee52d
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@@ -1790,15 -1790,8 +1790,8 @@@ static void mlx4_enable_msi_x(struct ml
  	int i;
  
  	if (msi_x) {
- 		/* In multifunction mode each function gets 2 msi-X vectors
- 		 * one for data path completions anf the other for asynch events
- 		 * or command completions */
- 		if (mlx4_is_mfunc(dev)) {
- 			nreq = 2;
- 		} else {
- 			nreq = min_t(int, dev->caps.num_eqs -
- 				     dev->caps.reserved_eqs, nreq);
- 		}
+ 		nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
+ 			     nreq);
  
  		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
  		if (!entries)
@@@ -2169,8 -2162,7 +2162,8 @@@ slave_start
  			dev->num_slaves = MLX4_MAX_NUM_SLAVES;
  		else {
  			dev->num_slaves = 0;
 -			if (mlx4_multi_func_init(dev)) {
 +			err = mlx4_multi_func_init(dev);
 +			if (err) {
  				mlx4_err(dev, "Failed to init slave mfunc"
  					 " interface, aborting.\n");
  				goto err_cmd;
@@@ -2194,8 -2186,7 +2187,8 @@@
  	/* In master functions, the communication channel must be initialized
  	 * after obtaining its address from fw */
  	if (mlx4_is_master(dev)) {
 -		if (mlx4_multi_func_init(dev)) {
 +		err = mlx4_multi_func_init(dev);
 +		if (err) {
  			mlx4_err(dev, "Failed to init master mfunc"
  				 "interface, aborting.\n");
  			goto err_close;
@@@ -2212,7 -2203,6 +2205,7 @@@
  	mlx4_enable_msi_x(dev);
  	if ((mlx4_is_mfunc(dev)) &&
  	    !(dev->flags & MLX4_FLAG_MSI_X)) {
 +		err = -ENOSYS;
  		mlx4_err(dev, "INTx is not supported in multi-function mode."
  			 " aborting.\n");
  		goto err_free_eq;
diff --combined drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 8e40ea0,69e321a..7a849fc
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@@ -501,11 -501,12 +501,11 @@@ netxen_read_mac_addr(struct netxen_adap
  	for (i = 0; i < 6; i++)
  		netdev->dev_addr[i] = *(p + 5 - i);
  
 -	memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
  	memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
  
  	/* set station address */
  
 -	if (!is_valid_ether_addr(netdev->perm_addr))
 +	if (!is_valid_ether_addr(netdev->dev_addr))
  		dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
  
  	return 0;
@@@ -1962,10 -1963,12 +1962,12 @@@ unwind
  	while (--i >= 0) {
  		nf = &pbuf->frag_array[i+1];
  		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ 		nf->dma = 0ULL;
  	}
  
  	nf = &pbuf->frag_array[0];
  	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ 	nf->dma = 0ULL;
  
  out_err:
  	return -ENOMEM;
diff --combined drivers/net/ethernet/realtek/r8169.c
index 97fdbb1,1170232..4208f28
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@@ -83,7 -83,7 +83,7 @@@ static const int multicast_filter_limi
  #define R8169_REGS_SIZE		256
  #define R8169_NAPI_WEIGHT	64
  #define NUM_TX_DESC	64	/* Number of Tx descriptor registers */
 -#define NUM_RX_DESC	256	/* Number of Rx descriptor registers */
 +#define NUM_RX_DESC	256U	/* Number of Rx descriptor registers */
  #define R8169_TX_RING_BYTES	(NUM_TX_DESC * sizeof(struct TxDesc))
  #define R8169_RX_RING_BYTES	(NUM_RX_DESC * sizeof(struct RxDesc))
  
@@@ -727,6 -727,7 +727,6 @@@ struct rtl8169_private 
  	u16 mac_version;
  	u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
  	u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
 -	u32 dirty_rx;
  	u32 dirty_tx;
  	struct rtl8169_stats rx_stats;
  	struct rtl8169_stats tx_stats;
@@@ -1825,8 -1826,6 +1825,6 @@@ static void rtl8169_rx_vlan_tag(struct 
  
  	if (opts2 & RxVlanTag)
  		__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
- 
- 	desc->opts2 = 0;
  }
  
  static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
@@@ -4176,7 -4175,7 +4174,7 @@@ static void rtl_init_rxcfg(struct rtl81
  
  static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
  {
 -	tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
 +	tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
  }
  
  static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
@@@ -5919,7 -5918,7 +5917,7 @@@ static void rtl8169_pcierr_interrupt(st
  		PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
  
  	/* The infamous DAC f*ckup only happens at boot time */
 -	if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
 +	if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
  		void __iomem *ioaddr = tp->mmio_addr;
  
  		netif_info(tp, intr, dev, "disabling PCI DAC\n");
@@@ -6034,8 -6033,10 +6032,8 @@@ static int rtl_rx(struct net_device *de
  	unsigned int count;
  
  	cur_rx = tp->cur_rx;
 -	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
 -	rx_left = min(rx_left, budget);
  
 -	for (; rx_left > 0; rx_left--, cur_rx++) {
 +	for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
  		unsigned int entry = cur_rx % NUM_RX_DESC;
  		struct RxDesc *desc = tp->RxDescArray + entry;
  		u32 status;
@@@ -6061,8 -6062,6 +6059,6 @@@
  			    !(status & (RxRWT | RxFOVF)) &&
  			    (dev->features & NETIF_F_RXALL))
  				goto process_pkt;
- 
- 			rtl8169_mark_to_asic(desc, rx_buf_sz);
  		} else {
  			struct sk_buff *skb;
  			dma_addr_t addr;
@@@ -6083,16 -6082,14 +6079,14 @@@ process_pkt
  			if (unlikely(rtl8169_fragmented_frame(status))) {
  				dev->stats.rx_dropped++;
  				dev->stats.rx_length_errors++;
- 				rtl8169_mark_to_asic(desc, rx_buf_sz);
- 				continue;
+ 				goto release_descriptor;
  			}
  
  			skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
  						  tp, pkt_size, addr);
- 			rtl8169_mark_to_asic(desc, rx_buf_sz);
  			if (!skb) {
  				dev->stats.rx_dropped++;
- 				continue;
+ 				goto release_descriptor;
  			}
  
  			rtl8169_rx_csum(skb, status);
@@@ -6108,18 -6105,17 +6102,15 @@@
  			tp->rx_stats.bytes += pkt_size;
  			u64_stats_update_end(&tp->rx_stats.syncp);
  		}
- 
- 		/* Work around for AMD plateform. */
- 		if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
- 		    (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
- 			desc->opts2 = 0;
- 			cur_rx++;
- 		}
+ release_descriptor:
+ 		desc->opts2 = 0;
+ 		wmb();
+ 		rtl8169_mark_to_asic(desc, rx_buf_sz);
  	}
  
  	count = cur_rx - tp->cur_rx;
  	tp->cur_rx = cur_rx;
  
 -	tp->dirty_rx += count;
 -
  	return count;
  }
  
@@@ -6943,6 -6939,7 +6934,6 @@@ rtl_init_one(struct pci_dev *pdev, cons
  	/* Get MAC address */
  	for (i = 0; i < ETH_ALEN; i++)
  		dev->dev_addr[i] = RTL_R8(MAC0 + i);
 -	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
  
  	SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
  	dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
diff --combined drivers/net/hyperv/netvsc_drv.c
index a9975c7,8264f0e..d5202a4
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@@ -304,9 -304,9 +304,9 @@@ int netvsc_recv_callback(struct hv_devi
  static void netvsc_get_drvinfo(struct net_device *net,
  			       struct ethtool_drvinfo *info)
  {
 -	strcpy(info->driver, KBUILD_MODNAME);
 -	strcpy(info->version, HV_DRV_VERSION);
 -	strcpy(info->fw_version, "N/A");
 +	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
 +	strlcpy(info->version, HV_DRV_VERSION, sizeof(info->version));
 +	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
  }
  
  static int netvsc_change_mtu(struct net_device *ndev, int mtu)
@@@ -349,7 -349,7 +349,7 @@@ static int netvsc_set_mac_addr(struct n
  	struct net_device_context *ndevctx = netdev_priv(ndev);
  	struct hv_device *hdev =  ndevctx->device_ctx;
  	struct sockaddr *addr = p;
- 	char save_adr[14];
+ 	char save_adr[ETH_ALEN];
  	unsigned char save_aatype;
  	int err;
  
diff --combined drivers/net/macvlan.c
index 1047e58,d3fb97d..7b44ebd
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@@ -375,6 -375,7 +375,6 @@@ static int macvlan_set_mac_address(stru
  
  	if (!(dev->flags & IFF_UP)) {
  		/* Just copy in the new address */
 -		dev->addr_assign_type &= ~NET_ADDR_RANDOM;
  		memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
  	} else {
  		/* Rehash and update the device filters */
@@@ -585,8 -586,8 +585,8 @@@ static int macvlan_fdb_del(struct ndms
  static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
  					struct ethtool_drvinfo *drvinfo)
  {
 -	snprintf(drvinfo->driver, 32, "macvlan");
 -	snprintf(drvinfo->version, 32, "0.1");
 +	strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
 +	strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
  }
  
  static int macvlan_ethtool_get_settings(struct net_device *dev,
@@@ -764,22 -765,16 +764,22 @@@ int macvlan_common_newlink(struct net *
  		memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
  	}
  
 +	err = netdev_upper_dev_link(lowerdev, dev);
 +	if (err)
 +		goto destroy_port;
 +
  	port->count += 1;
  	err = register_netdevice(dev);
  	if (err < 0)
 -		goto destroy_port;
 +		goto upper_dev_unlink;
  
  	list_add_tail(&vlan->list, &port->vlans);
  	netif_stacked_transfer_operstate(lowerdev, dev);
  
  	return 0;
  
 +upper_dev_unlink:
 +	netdev_upper_dev_unlink(lowerdev, dev);
  destroy_port:
  	port->count -= 1;
  	if (!port->count)
@@@ -803,7 -798,6 +803,7 @@@ void macvlan_dellink(struct net_device 
  
  	list_del(&vlan->list);
  	unregister_netdevice_queue(dev, head);
 +	netdev_upper_dev_unlink(vlan->lowerdev, dev);
  }
  EXPORT_SYMBOL_GPL(macvlan_dellink);
  
@@@ -828,7 -822,10 +828,10 @@@ static int macvlan_changelink(struct ne
  
  static size_t macvlan_get_size(const struct net_device *dev)
  {
- 	return nla_total_size(4);
+ 	return (0
+ 		+ nla_total_size(4) /* IFLA_MACVLAN_MODE */
+ 		+ nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
+ 		);
  }
  
  static int macvlan_fill_info(struct sk_buff *skb,
diff --combined drivers/net/tun.c
index 293ce8d,cc09b67..8d208dd
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@@ -109,11 -109,11 +109,11 @@@ struct tap_filter 
  	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
  };
  
- /* 1024 is probably a high enough limit: modern hypervisors seem to support on
-  * the order of 100-200 CPUs so this leaves us some breathing space if we want
-  * to match a queue per guest CPU.
-  */
- #define MAX_TAP_QUEUES 1024
+ /* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
+  * the netdevice to be fit in one page. So we can make sure the success of
+  * memory allocation. TODO: increase the limit. */
+ #define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
+ #define MAX_TAP_FLOWS  4096
  
  #define TUN_FLOW_EXPIRE (3 * HZ)
  
@@@ -186,6 -186,7 +186,7 @@@ struct tun_struct 
  	unsigned int numdisabled;
  	struct list_head disabled;
  	void *security;
+ 	u32 flow_count;
  };
  
  static inline u32 tun_hashfn(u32 rxhash)
@@@ -219,6 -220,7 +220,7 @@@ static struct tun_flow_entry *tun_flow_
  		e->queue_index = queue_index;
  		e->tun = tun;
  		hlist_add_head_rcu(&e->hash_link, head);
+ 		++tun->flow_count;
  	}
  	return e;
  }
@@@ -229,6 -231,7 +231,7 @@@ static void tun_flow_delete(struct tun_
  		  e->rxhash, e->queue_index);
  	hlist_del_rcu(&e->hash_link);
  	kfree_rcu(e, rcu);
+ 	--tun->flow_count;
  }
  
  static void tun_flow_flush(struct tun_struct *tun)
@@@ -318,7 -321,8 +321,8 @@@ static void tun_flow_update(struct tun_
  		e->updated = jiffies;
  	} else {
  		spin_lock_bh(&tun->lock);
- 		if (!tun_flow_find(head, rxhash))
+ 		if (!tun_flow_find(head, rxhash) &&
+ 		    tun->flow_count < MAX_TAP_FLOWS)
  			tun_flow_create(tun, head, rxhash, queue_index);
  
  		if (!timer_pending(&tun->flow_gc_timer))
@@@ -1005,7 -1009,6 +1009,7 @@@ static int zerocopy_sg_from_iovec(struc
  		skb->data_len += len;
  		skb->len += len;
  		skb->truesize += truesize;
 +		skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
  		atomic_add(truesize, &skb->sk->sk_wmem_alloc);
  		while (len) {
  			int off = base & ~PAGE_MASK;
@@@ -1151,18 -1154,16 +1155,18 @@@ static ssize_t tun_get_user(struct tun_
  	}
  
  	if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
 +		unsigned short gso_type = 0;
 +
  		pr_debug("GSO!\n");
  		switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
  		case VIRTIO_NET_HDR_GSO_TCPV4:
 -			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 +			gso_type = SKB_GSO_TCPV4;
  			break;
  		case VIRTIO_NET_HDR_GSO_TCPV6:
 -			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 +			gso_type = SKB_GSO_TCPV6;
  			break;
  		case VIRTIO_NET_HDR_GSO_UDP:
 -			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
 +			gso_type = SKB_GSO_UDP;
  			break;
  		default:
  			tun->dev->stats.rx_frame_errors++;
@@@ -1171,10 -1172,9 +1175,10 @@@
  		}
  
  		if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
 -			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 +			gso_type |= SKB_GSO_TCP_ECN;
  
  		skb_shinfo(skb)->gso_size = gso.gso_size;
 +		skb_shinfo(skb)->gso_type |= gso_type;
  		if (skb_shinfo(skb)->gso_size == 0) {
  			tun->dev->stats.rx_frame_errors++;
  			kfree_skb(skb);
@@@ -1587,6 -1587,8 +1591,8 @@@ static int tun_set_iff(struct net *net
  	else {
  		char *name;
  		unsigned long flags = 0;
+ 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
+ 			     MAX_TAP_QUEUES : 1;
  
  		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  			return -EPERM;
@@@ -1610,8 -1612,8 +1616,8 @@@
  			name = ifr->ifr_name;
  
  		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
- 				       tun_setup,
- 				       MAX_TAP_QUEUES, MAX_TAP_QUEUES);
+ 				       tun_setup, queues, queues);
+ 
  		if (!dev)
  			return -ENOMEM;
  
diff --combined drivers/net/usb/cdc_ncm.c
index 016aa6f,9197b2c..b5ad7ea
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@@ -65,9 -65,9 +65,9 @@@ cdc_ncm_get_drvinfo(struct net_device *
  {
  	struct usbnet *dev = netdev_priv(net);
  
 -	strncpy(info->driver, dev->driver_name, sizeof(info->driver));
 -	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
 -	strncpy(info->fw_version, dev->driver_info->description,
 +	strlcpy(info->driver, dev->driver_name, sizeof(info->driver));
 +	strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
 +	strlcpy(info->fw_version, dev->driver_info->description,
  		sizeof(info->fw_version));
  	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
  }
@@@ -435,6 -435,13 +435,13 @@@ advance
  		len -= temp;
  	}
  
+ 	/* some buggy devices have an IAD but no CDC Union */
+ 	if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
+ 		ctx->control = intf;
+ 		ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
+ 		dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
+ 	}
+ 
  	/* check if we got everything */
  	if ((ctx->control == NULL) || (ctx->data == NULL) ||
  	    ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
@@@ -497,7 -504,8 +504,8 @@@
  error2:
  	usb_set_intfdata(ctx->control, NULL);
  	usb_set_intfdata(ctx->data, NULL);
- 	usb_driver_release_interface(driver, ctx->data);
+ 	if (ctx->data != ctx->control)
+ 		usb_driver_release_interface(driver, ctx->data);
  error:
  	cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
  	dev->data[0] = 0;
@@@ -1155,6 -1163,20 +1163,20 @@@ static const struct driver_info wwan_in
  	.tx_fixup = cdc_ncm_tx_fixup,
  };
  
+ /* Same as wwan_info, but with FLAG_NOARP  */
+ static const struct driver_info wwan_noarp_info = {
+ 	.description = "Mobile Broadband Network Device (NO ARP)",
+ 	.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ 			| FLAG_WWAN | FLAG_NOARP,
+ 	.bind = cdc_ncm_bind,
+ 	.unbind = cdc_ncm_unbind,
+ 	.check_connect = cdc_ncm_check_connect,
+ 	.manage_power = usbnet_manage_power,
+ 	.status = cdc_ncm_status,
+ 	.rx_fixup = cdc_ncm_rx_fixup,
+ 	.tx_fixup = cdc_ncm_tx_fixup,
+ };
+ 
  static const struct usb_device_id cdc_devs[] = {
  	/* Ericsson MBM devices like F5521gw */
  	{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
@@@ -1194,6 -1216,13 +1216,13 @@@
  	  .driver_info = (unsigned long)&wwan_info,
  	},
  
+ 	/* Infineon(now Intel) HSPA Modem platform */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
+ 		USB_CLASS_COMM,
+ 		USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ 	  .driver_info = (unsigned long)&wwan_noarp_info,
+ 	},
+ 
  	/* Generic CDC-NCM devices */
  	{ USB_INTERFACE_INFO(USB_CLASS_COMM,
  		USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --combined drivers/net/usb/dm9601.c
index 0794004,d7e9944..174e5ec
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@@ -45,6 -45,12 +45,12 @@@
  #define DM_MCAST_ADDR	0x16	/* 8 bytes */
  #define DM_GPR_CTRL	0x1e
  #define DM_GPR_DATA	0x1f
+ #define DM_CHIP_ID	0x2c
+ #define DM_MODE_CTRL	0x91	/* only on dm9620 */
+ 
+ /* chip id values */
+ #define ID_DM9601	0
+ #define ID_DM9620	1
  
  #define DM_MAX_MCAST	64
  #define DM_MCAST_SIZE	8
@@@ -53,7 -59,6 +59,6 @@@
  #define DM_RX_OVERHEAD	7	/* 3 byte header + 4 byte crc tail */
  #define DM_TIMEOUT	1000
  
- 
  static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
  {
  	int err;
@@@ -84,32 -89,23 +89,23 @@@ static int dm_write(struct usbnet *dev
  
  static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
  {
- 	return usbnet_write_cmd(dev, DM_WRITE_REGS,
+ 	return usbnet_write_cmd(dev, DM_WRITE_REG,
  				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
  				value, reg, NULL, 0);
  }
  
- static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
- 				  u16 length, void *data)
+ static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
  {
  	usbnet_write_cmd_async(dev, DM_WRITE_REGS,
  			       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 			       value, reg, data, length);
- }
- 
- static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
- {
- 	netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length);
- 
- 	dm_write_async_helper(dev, reg, 0, length, data);
+ 			       0, reg, data, length);
  }
  
  static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
  {
- 	netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n",
- 		   reg, value);
- 
- 	dm_write_async_helper(dev, reg, value, 0, NULL);
+ 	usbnet_write_cmd_async(dev, DM_WRITE_REG,
+ 			       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 			       value, reg, NULL, 0);
  }
  
  static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value)
@@@ -122,7 -118,7 +118,7 @@@
  	dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0xc : 0x4);
  
  	for (i = 0; i < DM_TIMEOUT; i++) {
 -		u8 tmp;
 +		u8 tmp = 0;
  
  		udelay(1);
  		ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp);
@@@ -165,7 -161,7 +161,7 @@@ static int dm_write_shared_word(struct 
  	dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
  
  	for (i = 0; i < DM_TIMEOUT; i++) {
 -		u8 tmp;
 +		u8 tmp = 0;
  
  		udelay(1);
  		ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp);
@@@ -358,7 -354,7 +354,7 @@@ static const struct net_device_ops dm96
  static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
  {
  	int ret;
- 	u8 mac[ETH_ALEN];
+ 	u8 mac[ETH_ALEN], id;
  
  	ret = usbnet_get_endpoints(dev, intf);
  	if (ret)
@@@ -399,6 -395,24 +395,24 @@@
  		__dm9601_set_mac_address(dev);
  	}
  
+ 	if (dm_read_reg(dev, DM_CHIP_ID, &id) < 0) {
+ 		netdev_err(dev->net, "Error reading chip ID\n");
+ 		ret = -ENODEV;
+ 		goto out;
+ 	}
+ 
+ 	/* put dm9620 devices in dm9601 mode */
+ 	if (id == ID_DM9620) {
+ 		u8 mode;
+ 
+ 		if (dm_read_reg(dev, DM_MODE_CTRL, &mode) < 0) {
+ 			netdev_err(dev->net, "Error reading MODE_CTRL\n");
+ 			ret = -ENODEV;
+ 			goto out;
+ 		}
+ 		dm_write_reg(dev, DM_MODE_CTRL, mode & 0x7f);
+ 	}
+ 
  	/* power up phy */
  	dm_write_reg(dev, DM_GPR_CTRL, 1);
  	dm_write_reg(dev, DM_GPR_DATA, 0);
@@@ -581,6 -595,10 +595,10 @@@ static const struct usb_device_id produ
  	 USB_DEVICE(0x0a46, 0x9000),	/* DM9000E */
  	 .driver_info = (unsigned long)&dm9601_info,
  	 },
+ 	{
+ 	 USB_DEVICE(0x0a46, 0x9620),	/* DM9620 USB to Fast Ethernet Adapter */
+ 	 .driver_info = (unsigned long)&dm9601_info,
+ 	 },
  	{},			// END
  };
  
diff --combined drivers/net/virtio_net.c
index 58914c8,35c00c5..eda2042
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@@ -26,6 -26,7 +26,7 @@@
  #include <linux/scatterlist.h>
  #include <linux/if_vlan.h>
  #include <linux/slab.h>
+ #include <linux/cpu.h>
  
  static int napi_weight = 128;
  module_param(napi_weight, int, 0444);
@@@ -123,6 -124,12 +124,12 @@@ struct virtnet_info 
  
  	/* Does the affinity hint is set for virtqueues? */
  	bool affinity_hint_set;
+ 
+ 	/* Per-cpu variable to show the mapping from CPU to virtqueue */
+ 	int __percpu *vq_index;
+ 
+ 	/* CPU hot plug notifier */
+ 	struct notifier_block nb;
  };
  
  struct skb_vnet_hdr {
@@@ -220,7 -227,6 +227,7 @@@ static void set_skb_frag(struct sk_buf
  	skb->len += size;
  	skb->truesize += PAGE_SIZE;
  	skb_shinfo(skb)->nr_frags++;
 +	skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
  	*len -= size;
  }
  
@@@ -380,18 -386,16 +387,18 @@@ static void receive_buf(struct receive_
  		 ntohs(skb->protocol), skb->len, skb->pkt_type);
  
  	if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
 +		unsigned short gso_type = 0;
 +
  		pr_debug("GSO!\n");
  		switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
  		case VIRTIO_NET_HDR_GSO_TCPV4:
 -			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 +			gso_type = SKB_GSO_TCPV4;
  			break;
  		case VIRTIO_NET_HDR_GSO_UDP:
 -			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
 +			gso_type = SKB_GSO_UDP;
  			break;
  		case VIRTIO_NET_HDR_GSO_TCPV6:
 -			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 +			gso_type = SKB_GSO_TCPV6;
  			break;
  		default:
  			net_warn_ratelimited("%s: bad gso type %u.\n",
@@@ -400,7 -404,7 +407,7 @@@
  		}
  
  		if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
 -			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 +			gso_type |= SKB_GSO_TCP_ECN;
  
  		skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
  		if (skb_shinfo(skb)->gso_size == 0) {
@@@ -408,7 -412,6 +415,7 @@@
  			goto frame_err;
  		}
  
 +		skb_shinfo(skb)->gso_type |= gso_type;
  		/* Header must be checked, and gso_segs computed. */
  		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
  		skb_shinfo(skb)->gso_segs = 0;
@@@ -757,77 -760,19 +764,77 @@@ static netdev_tx_t start_xmit(struct sk
  	return NETDEV_TX_OK;
  }
  
 +/*
 + * Send command via the control virtqueue and check status.  Commands
 + * supported by the hypervisor, as indicated by feature bits, should
 + * never fail unless improperly formated.
 + */
 +static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 +				 struct scatterlist *data, int out, int in)
 +{
 +	struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
 +	struct virtio_net_ctrl_hdr ctrl;
 +	virtio_net_ctrl_ack status = ~0;
 +	unsigned int tmp;
 +	int i;
 +
 +	/* Caller should know better */
 +	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
 +		(out + in > VIRTNET_SEND_COMMAND_SG_MAX));
 +
 +	out++; /* Add header */
 +	in++; /* Add return status */
 +
 +	ctrl.class = class;
 +	ctrl.cmd = cmd;
 +
 +	sg_init_table(sg, out + in);
 +
 +	sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
 +	for_each_sg(data, s, out + in - 2, i)
 +		sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
 +	sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
 +
 +	BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
 +
 +	virtqueue_kick(vi->cvq);
 +
 +	/* Spin for a response, the kick causes an ioport write, trapping
 +	 * into the hypervisor, so the request should be handled immediately.
 +	 */
 +	while (!virtqueue_get_buf(vi->cvq, &tmp))
 +		cpu_relax();
 +
 +	return status == VIRTIO_NET_OK;
 +}
 +
  static int virtnet_set_mac_address(struct net_device *dev, void *p)
  {
  	struct virtnet_info *vi = netdev_priv(dev);
  	struct virtio_device *vdev = vi->vdev;
  	int ret;
 +	struct sockaddr *addr = p;
 +	struct scatterlist sg;
  
 -	ret = eth_mac_addr(dev, p);
 +	ret = eth_prepare_mac_addr_change(dev, p);
  	if (ret)
  		return ret;
  
 -	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
 +	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
 +		sg_init_one(&sg, addr->sa_data, dev->addr_len);
 +		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
 +					  VIRTIO_NET_CTRL_MAC_ADDR_SET,
 +					  &sg, 1, 0)) {
 +			dev_warn(&vdev->dev,
 +				 "Failed to set mac address by vq command.\n");
 +			return -EINVAL;
 +		}
 +	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
  		vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
 -		                  dev->dev_addr, dev->addr_len);
 +				  addr->sa_data, dev->addr_len);
 +	}
 +
 +	eth_commit_mac_addr_change(dev, p);
  
  	return 0;
  }
@@@ -881,6 -826,51 +888,6 @@@ static void virtnet_netpoll(struct net_
  }
  #endif
  
 -/*
 - * Send command via the control virtqueue and check status.  Commands
 - * supported by the hypervisor, as indicated by feature bits, should
 - * never fail unless improperly formated.
 - */
 -static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 -				 struct scatterlist *data, int out, int in)
 -{
 -	struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
 -	struct virtio_net_ctrl_hdr ctrl;
 -	virtio_net_ctrl_ack status = ~0;
 -	unsigned int tmp;
 -	int i;
 -
 -	/* Caller should know better */
 -	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
 -		(out + in > VIRTNET_SEND_COMMAND_SG_MAX));
 -
 -	out++; /* Add header */
 -	in++; /* Add return status */
 -
 -	ctrl.class = class;
 -	ctrl.cmd = cmd;
 -
 -	sg_init_table(sg, out + in);
 -
 -	sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
 -	for_each_sg(data, s, out + in - 2, i)
 -		sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
 -	sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
 -
 -	BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
 -
 -	virtqueue_kick(vi->cvq);
 -
 -	/*
 -	 * Spin for a response, the kick causes an ioport write, trapping
 -	 * into the hypervisor, so the request should be handled immediately.
 -	 */
 -	while (!virtqueue_get_buf(vi->cvq, &tmp))
 -		cpu_relax();
 -
 -	return status == VIRTIO_NET_OK;
 -}
 -
  static void virtnet_ack_link_announce(struct virtnet_info *vi)
  {
  	rtnl_lock();
@@@ -1030,32 -1020,75 +1037,75 @@@ static int virtnet_vlan_rx_kill_vid(str
  	return 0;
  }
  
- static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
+ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
  {
  	int i;
+ 	int cpu;
+ 
+ 	if (vi->affinity_hint_set) {
+ 		for (i = 0; i < vi->max_queue_pairs; i++) {
+ 			virtqueue_set_affinity(vi->rq[i].vq, -1);
+ 			virtqueue_set_affinity(vi->sq[i].vq, -1);
+ 		}
+ 
+ 		vi->affinity_hint_set = false;
+ 	}
+ 
+ 	i = 0;
+ 	for_each_online_cpu(cpu) {
+ 		if (cpu == hcpu) {
+ 			*per_cpu_ptr(vi->vq_index, cpu) = -1;
+ 		} else {
+ 			*per_cpu_ptr(vi->vq_index, cpu) =
+ 				++i % vi->curr_queue_pairs;
+ 		}
+ 	}
+ }
+ 
+ static void virtnet_set_affinity(struct virtnet_info *vi)
+ {
+ 	int i;
+ 	int cpu;
  
  	/* In multiqueue mode, when the number of cpu is equal to the number of
  	 * queue pairs, we let the queue pairs to be private to one cpu by
  	 * setting the affinity hint to eliminate the contention.
  	 */
- 	if ((vi->curr_queue_pairs == 1 ||
- 	     vi->max_queue_pairs != num_online_cpus()) && set) {
- 		if (vi->affinity_hint_set)
- 			set = false;
- 		else
- 			return;
+ 	if (vi->curr_queue_pairs == 1 ||
+ 	    vi->max_queue_pairs != num_online_cpus()) {
+ 		virtnet_clean_affinity(vi, -1);
+ 		return;
  	}
  
- 	for (i = 0; i < vi->max_queue_pairs; i++) {
- 		int cpu = set ? i : -1;
+ 	i = 0;
+ 	for_each_online_cpu(cpu) {
  		virtqueue_set_affinity(vi->rq[i].vq, cpu);
  		virtqueue_set_affinity(vi->sq[i].vq, cpu);
+ 		*per_cpu_ptr(vi->vq_index, cpu) = i;
+ 		i++;
  	}
  
- 	if (set)
- 		vi->affinity_hint_set = true;
- 	else
- 		vi->affinity_hint_set = false;
+ 	vi->affinity_hint_set = true;
+ }
+ 
+ static int virtnet_cpu_callback(struct notifier_block *nfb,
+ 			        unsigned long action, void *hcpu)
+ {
+ 	struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
+ 
+ 	switch(action & ~CPU_TASKS_FROZEN) {
+ 	case CPU_ONLINE:
+ 	case CPU_DOWN_FAILED:
+ 	case CPU_DEAD:
+ 		virtnet_set_affinity(vi);
+ 		break;
+ 	case CPU_DOWN_PREPARE:
+ 		virtnet_clean_affinity(vi, (long)hcpu);
+ 		break;
+ 	default:
+ 		break;
+ 	}
+ 	return NOTIFY_OK;
  }
  
  static void virtnet_get_ringparam(struct net_device *dev,
@@@ -1099,13 -1132,15 +1149,15 @@@ static int virtnet_set_channels(struct 
  	if (queue_pairs > vi->max_queue_pairs)
  		return -EINVAL;
  
+ 	get_online_cpus();
  	err = virtnet_set_queues(vi, queue_pairs);
  	if (!err) {
  		netif_set_real_num_tx_queues(dev, queue_pairs);
  		netif_set_real_num_rx_queues(dev, queue_pairs);
  
- 		virtnet_set_affinity(vi, true);
+ 		virtnet_set_affinity(vi);
  	}
+ 	put_online_cpus();
  
  	return err;
  }
@@@ -1144,12 -1179,19 +1196,19 @@@ static int virtnet_change_mtu(struct ne
  
  /* To avoid contending a lock hold by a vcpu who would exit to host, select the
   * txq based on the processor id.
   */
  static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
  {
- 	int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
- 		  smp_processor_id();
+ 	int txq;
+ 	struct virtnet_info *vi = netdev_priv(dev);
+ 
+ 	if (skb_rx_queue_recorded(skb)) {
+ 		txq = skb_get_rx_queue(skb);
+ 	} else {
+ 		txq = *__this_cpu_ptr(vi->vq_index);
+ 		if (txq == -1)
+ 			txq = 0;
+ 	}
  
  	while (unlikely(txq >= dev->real_num_tx_queues))
  		txq -= dev->real_num_tx_queues;
@@@ -1265,7 -1307,7 +1324,7 @@@ static void virtnet_del_vqs(struct virt
  {
  	struct virtio_device *vdev = vi->vdev;
  
- 	virtnet_set_affinity(vi, false);
+ 	virtnet_clean_affinity(vi, -1);
  
  	vdev->config->del_vqs(vdev);
  
@@@ -1388,7 -1430,10 +1447,10 @@@ static int init_vqs(struct virtnet_inf
  	if (ret)
  		goto err_free;
  
- 	virtnet_set_affinity(vi, true);
+ 	get_online_cpus();
+ 	virtnet_set_affinity(vi);
+ 	put_online_cpus();
+ 
  	return 0;
  
  err_free:
@@@ -1470,6 -1515,10 +1532,10 @@@ static int virtnet_probe(struct virtio_
  	if (vi->stats == NULL)
  		goto free;
  
+ 	vi->vq_index = alloc_percpu(int);
+ 	if (vi->vq_index == NULL)
+ 		goto free_stats;
+ 
  	mutex_init(&vi->config_lock);
  	vi->config_enable = true;
  	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
@@@ -1493,7 -1542,7 +1559,7 @@@
  	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
  	err = init_vqs(vi);
  	if (err)
- 		goto free_stats;
+ 		goto free_index;
  
  	netif_set_real_num_tx_queues(dev, 1);
  	netif_set_real_num_rx_queues(dev, 1);
@@@ -1516,6 -1565,13 +1582,13 @@@
  		}
  	}
  
+ 	vi->nb.notifier_call = &virtnet_cpu_callback;
+ 	err = register_hotcpu_notifier(&vi->nb);
+ 	if (err) {
+ 		pr_debug("virtio_net: registering cpu notifier failed\n");
+ 		goto free_recv_bufs;
+ 	}
+ 
  	/* Assume link up if device can't report link status,
  	   otherwise get link status from config. */
  	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
@@@ -1537,6 -1593,8 +1610,8 @@@ free_recv_bufs
  free_vqs:
  	cancel_delayed_work_sync(&vi->refill);
  	virtnet_del_vqs(vi);
+ free_index:
+ 	free_percpu(vi->vq_index);
  free_stats:
  	free_percpu(vi->stats);
  free:
@@@ -1560,6 -1618,8 +1635,8 @@@ static void virtnet_remove(struct virti
  {
  	struct virtnet_info *vi = vdev->priv;
  
+ 	unregister_hotcpu_notifier(&vi->nb);
+ 
  	/* Prevent config work handler from accessing the device. */
  	mutex_lock(&vi->config_lock);
  	vi->config_enable = false;
@@@ -1571,6 -1631,7 +1648,7 @@@
  
  	flush_work(&vi->config_work);
  
+ 	free_percpu(vi->vq_index);
  	free_percpu(vi->stats);
  	free_netdev(vi->dev);
  }
@@@ -1645,7 -1706,6 +1723,7 @@@ static unsigned int features[] = 
  	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
  	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
  	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
 +	VIRTIO_NET_F_CTRL_MAC_ADDR,
  };
  
  static struct virtio_driver virtio_net_driver = {
diff --combined net/batman-adv/distributed-arp-table.c
index 7485a78,183f97a..ea0bd31
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@@ -1,4 -1,4 +1,4 @@@
 -/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors:
   *
   * Antonio Quartulli
   *
@@@ -738,6 -738,7 +738,7 @@@ static uint16_t batadv_arp_get_type(str
  	struct arphdr *arphdr;
  	struct ethhdr *ethhdr;
  	__be32 ip_src, ip_dst;
+ 	uint8_t *hw_src, *hw_dst;
  	uint16_t type = 0;
  
  	/* pull the ethernet header */
@@@ -777,9 -778,23 +778,23 @@@
  	ip_src = batadv_arp_ip_src(skb, hdr_size);
  	ip_dst = batadv_arp_ip_dst(skb, hdr_size);
  	if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) ||
- 	    ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst))
+ 	    ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) ||
+ 	    ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) ||
+ 	    ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst))
  		goto out;
  
+ 	hw_src = batadv_arp_hw_src(skb, hdr_size);
+ 	if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src))
+ 		goto out;
+ 
+ 	/* we don't care about the destination MAC address in ARP requests */
+ 	if (arphdr->ar_op != htons(ARPOP_REQUEST)) {
+ 		hw_dst = batadv_arp_hw_dst(skb, hdr_size);
+ 		if (is_zero_ether_addr(hw_dst) ||
+ 		    is_multicast_ether_addr(hw_dst))
+ 			goto out;
+ 	}
+ 
  	type = ntohs(arphdr->ar_op);
  out:
  	return type;
@@@ -1012,6 -1027,8 +1027,8 @@@ bool batadv_dat_snoop_incoming_arp_repl
  	 */
  	ret = !batadv_is_my_client(bat_priv, hw_dst);
  out:
+ 	if (ret)
+ 		kfree_skb(skb);
  	/* if ret == false -> packet has to be delivered to the interface */
  	return ret;
  }
diff --combined net/ipv4/ip_gre.c
index 801e023,e81b1ca..00a14b9
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@@ -738,7 -738,7 +738,7 @@@ drop
  static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
  {
  	struct ip_tunnel *tunnel = netdev_priv(dev);
 -	const struct iphdr  *old_iph = ip_hdr(skb);
 +	const struct iphdr  *old_iph;
  	const struct iphdr  *tiph;
  	struct flowi4 fl4;
  	u8     tos;
@@@ -756,8 -756,6 +756,8 @@@
  	    skb_checksum_help(skb))
  		goto tx_error;
  
 +	old_iph = ip_hdr(skb);
 +
  	if (dev->type == ARPHRD_ETHER)
  		IPCB(skb)->flags = 0;
  
@@@ -820,8 -818,8 +820,8 @@@
  
  	ttl = tiph->ttl;
  	tos = tiph->tos;
 -	if (tos == 1) {
 -		tos = 0;
 +	if (tos & 0x1) {
 +		tos &= ~0x1;
  		if (skb->protocol == htons(ETH_P_IP))
  			tos = old_iph->tos;
  		else if (skb->protocol == htons(ETH_P_IPV6))
@@@ -965,8 -963,12 +965,12 @@@
  			ptr--;
  		}
  		if (tunnel->parms.o_flags&GRE_CSUM) {
+ 			int offset = skb_transport_offset(skb);
+ 
  			*ptr = 0;
- 			*(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
+ 			*(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
+ 								 skb->len - offset,
+ 								 0));
  		}
  	}
  
diff --combined net/ipv4/tcp_ipv4.c
index bbbdcc5,70b09ef..5a1cfc6
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@@ -369,11 -369,10 +369,10 @@@ void tcp_v4_err(struct sk_buff *icmp_sk
  	 * We do take care of PMTU discovery (RFC1191) special case :
  	 * we can receive locally generated ICMP messages while socket is held.
  	 */
- 	if (sock_owned_by_user(sk) &&
- 	    type != ICMP_DEST_UNREACH &&
- 	    code != ICMP_FRAG_NEEDED)
- 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
- 
+ 	if (sock_owned_by_user(sk)) {
+ 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
+ 			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+ 	}
  	if (sk->sk_state == TCP_CLOSE)
  		goto out;
  
@@@ -657,8 -656,7 +656,8 @@@ static void tcp_v4_send_reset(struct so
  		 * no RST generated if md5 hash doesn't match.
  		 */
  		sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
 -					     &tcp_hashinfo, ip_hdr(skb)->daddr,
 +					     &tcp_hashinfo, ip_hdr(skb)->saddr,
 +					     th->source, ip_hdr(skb)->daddr,
  					     ntohs(th->source), inet_iif(skb));
  		/* don't send rst if it can't find key */
  		if (!sk1)
@@@ -1569,7 -1567,7 +1568,7 @@@ int tcp_v4_conn_request(struct sock *sk
  		goto drop_and_free;
  
  	if (!want_cookie || tmp_opt.tstamp_ok)
 -		TCP_ECN_create_request(req, skb);
 +		TCP_ECN_create_request(req, skb, sock_net(sk));
  
  	if (want_cookie) {
  		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
@@@ -2075,7 -2073,6 +2074,7 @@@ do_time_wait
  	case TCP_TW_SYN: {
  		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
  							&tcp_hashinfo,
 +							iph->saddr, th->source,
  							iph->daddr, th->dest,
  							inet_iif(skb));
  		if (sk2) {
@@@ -2890,7 -2887,6 +2889,7 @@@ EXPORT_SYMBOL(tcp_prot)
  
  static int __net_init tcp_sk_init(struct net *net)
  {
 +	net->ipv4.sysctl_tcp_ecn = 2;
  	return 0;
  }
  
diff --combined net/ipv4/udp.c
index e0610e4,1f4d405..6791aac
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@@ -139,7 -139,6 +139,7 @@@ static int udp_lib_lport_inuse(struct n
  {
  	struct sock *sk2;
  	struct hlist_nulls_node *node;
 +	kuid_t uid = sock_i_uid(sk);
  
  	sk_nulls_for_each(sk2, node, &hslot->head)
  		if (net_eq(sock_net(sk2), net) &&
@@@ -148,8 -147,6 +148,8 @@@
  		    (!sk2->sk_reuse || !sk->sk_reuse) &&
  		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
  		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
 +		    (!sk2->sk_reuseport || !sk->sk_reuseport ||
 +		      !uid_eq(uid, sock_i_uid(sk2))) &&
  		    (*saddr_comp)(sk, sk2)) {
  			if (bitmap)
  				__set_bit(udp_sk(sk2)->udp_port_hash >> log,
@@@ -172,7 -169,6 +172,7 @@@ static int udp_lib_lport_inuse2(struct 
  {
  	struct sock *sk2;
  	struct hlist_nulls_node *node;
 +	kuid_t uid = sock_i_uid(sk);
  	int res = 0;
  
  	spin_lock(&hslot2->lock);
@@@ -183,8 -179,6 +183,8 @@@
  		    (!sk2->sk_reuse || !sk->sk_reuse) &&
  		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
  		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
 +		    (!sk2->sk_reuseport || !sk->sk_reuseport ||
 +		      !uid_eq(uid, sock_i_uid(sk2))) &&
  		    (*saddr_comp)(sk, sk2)) {
  			res = 1;
  			break;
@@@ -343,26 -337,26 +343,26 @@@ static inline int compute_score(struct 
  			!ipv6_only_sock(sk)) {
  		struct inet_sock *inet = inet_sk(sk);
  
 -		score = (sk->sk_family == PF_INET ? 1 : 0);
 +		score = (sk->sk_family == PF_INET ? 2 : 1);
  		if (inet->inet_rcv_saddr) {
  			if (inet->inet_rcv_saddr != daddr)
  				return -1;
 -			score += 2;
 +			score += 4;
  		}
  		if (inet->inet_daddr) {
  			if (inet->inet_daddr != saddr)
  				return -1;
 -			score += 2;
 +			score += 4;
  		}
  		if (inet->inet_dport) {
  			if (inet->inet_dport != sport)
  				return -1;
 -			score += 2;
 +			score += 4;
  		}
  		if (sk->sk_bound_dev_if) {
  			if (sk->sk_bound_dev_if != dif)
  				return -1;
 -			score += 2;
 +			score += 4;
  		}
  	}
  	return score;
@@@ -371,6 -365,7 +371,6 @@@
  /*
   * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
   */
 -#define SCORE2_MAX (1 + 2 + 2 + 2)
  static inline int compute_score2(struct sock *sk, struct net *net,
  				 __be32 saddr, __be16 sport,
  				 __be32 daddr, unsigned int hnum, int dif)
@@@ -385,21 -380,21 +385,21 @@@
  		if (inet->inet_num != hnum)
  			return -1;
  
 -		score = (sk->sk_family == PF_INET ? 1 : 0);
 +		score = (sk->sk_family == PF_INET ? 2 : 1);
  		if (inet->inet_daddr) {
  			if (inet->inet_daddr != saddr)
  				return -1;
 -			score += 2;
 +			score += 4;
  		}
  		if (inet->inet_dport) {
  			if (inet->inet_dport != sport)
  				return -1;
 -			score += 2;
 +			score += 4;
  		}
  		if (sk->sk_bound_dev_if) {
  			if (sk->sk_bound_dev_if != dif)
  				return -1;
 -			score += 2;
 +			score += 4;
  		}
  	}
  	return score;
@@@ -414,29 -409,19 +414,29 @@@ static struct sock *udp4_lib_lookup2(st
  {
  	struct sock *sk, *result;
  	struct hlist_nulls_node *node;
 -	int score, badness;
 +	int score, badness, matches = 0, reuseport = 0;
 +	u32 hash = 0;
  
  begin:
  	result = NULL;
 -	badness = -1;
 +	badness = 0;
  	udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
  		score = compute_score2(sk, net, saddr, sport,
  				      daddr, hnum, dif);
  		if (score > badness) {
  			result = sk;
  			badness = score;
 -			if (score == SCORE2_MAX)
 -				goto exact_match;
 +			reuseport = sk->sk_reuseport;
 +			if (reuseport) {
 +				hash = inet_ehashfn(net, daddr, hnum,
 +						    saddr, htons(sport));
 +				matches = 1;
 +			}
 +		} else if (score == badness && reuseport) {
 +			matches++;
 +			if (((u64)hash * matches) >> 32 == 0)
 +				result = sk;
 +			hash = next_pseudo_random32(hash);
  		}
  	}
  	/*
@@@ -446,7 -431,9 +446,7 @@@
  	 */
  	if (get_nulls_value(node) != slot2)
  		goto begin;
 -
  	if (result) {
 -exact_match:
  		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
  			result = NULL;
  		else if (unlikely(compute_score2(result, net, saddr, sport,
@@@ -470,8 -457,7 +470,8 @@@ struct sock *__udp4_lib_lookup(struct n
  	unsigned short hnum = ntohs(dport);
  	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
  	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
 -	int score, badness;
 +	int score, badness, matches = 0, reuseport = 0;
 +	u32 hash = 0;
  
  	rcu_read_lock();
  	if (hslot->count > 10) {
@@@ -500,24 -486,13 +500,24 @@@
  	}
  begin:
  	result = NULL;
 -	badness = -1;
 +	badness = 0;
  	sk_nulls_for_each_rcu(sk, node, &hslot->head) {
  		score = compute_score(sk, net, saddr, hnum, sport,
  				      daddr, dport, dif);
  		if (score > badness) {
  			result = sk;
  			badness = score;
 +			reuseport = sk->sk_reuseport;
 +			if (reuseport) {
 +				hash = inet_ehashfn(net, daddr, hnum,
 +						    saddr, htons(sport));
 +				matches = 1;
 +			}
 +		} else if (score == badness && reuseport) {
 +			matches++;
 +			if (((u64)hash * matches) >> 32 == 0)
 +				result = sk;
 +			hash = next_pseudo_random32(hash);
  		}
  	}
  	/*
@@@ -996,7 -971,7 +996,7 @@@ back_from_confirm
  				  sizeof(struct udphdr), &ipc, &rt,
  				  msg->msg_flags);
  		err = PTR_ERR(skb);
 -		if (skb && !IS_ERR(skb))
 +		if (!IS_ERR_OR_NULL(skb))
  			err = udp_send_skb(skb, fl4);
  		goto out;
  	}
@@@ -1977,6 -1952,7 +1977,7 @@@ struct proto udp_prot = 
  	.recvmsg	   = udp_recvmsg,
  	.sendpage	   = udp_sendpage,
  	.backlog_rcv	   = __udp_queue_rcv_skb,
+ 	.release_cb	   = ip4_datagram_release_cb,
  	.hash		   = udp_lib_hash,
  	.unhash		   = udp_lib_unhash,
  	.rehash		   = udp_v4_rehash,
diff --combined net/ipv6/ip6_output.c
index 7dea45a,0c7c03d..906b7e6
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@@ -56,6 -56,8 +56,6 @@@
  #include <net/checksum.h>
  #include <linux/mroute6.h>
  
 -int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
 -
  int __ip6_local_out(struct sk_buff *skb)
  {
  	int len;
@@@ -86,8 -88,7 +86,8 @@@ static int ip6_finish_output2(struct sk
  	struct dst_entry *dst = skb_dst(skb);
  	struct net_device *dev = dst->dev;
  	struct neighbour *neigh;
 -	struct rt6_info *rt;
 +	struct in6_addr *nexthop;
 +	int ret;
  
  	skb->protocol = htons(ETH_P_IPV6);
  	skb->dev = dev;
@@@ -122,17 -123,10 +122,17 @@@
  				skb->len);
  	}
  
 -	rt = (struct rt6_info *) dst;
 -	neigh = rt->n;
 -	if (neigh)
 -		return dst_neigh_output(dst, neigh, skb);
 +	rcu_read_lock_bh();
 +	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
 +	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
 +	if (unlikely(!neigh))
 +		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
 +	if (!IS_ERR(neigh)) {
 +		ret = dst_neigh_output(dst, neigh, skb);
 +		rcu_read_unlock_bh();
 +		return ret;
 +	}
 +	rcu_read_unlock_bh();
  
  	IP6_INC_STATS_BH(dev_net(dst->dev),
  			 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
@@@ -222,7 -216,7 +222,7 @@@ int ip6_xmit(struct sock *sk, struct sk
  	if (hlimit < 0)
  		hlimit = ip6_dst_hoplimit(dst);
  
 -	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel;
 +	ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
  
  	hdr->payload_len = htons(seg_len);
  	hdr->nexthdr = proto;
@@@ -252,6 -246,39 +252,6 @@@
  
  EXPORT_SYMBOL(ip6_xmit);
  
 -/*
 - *	To avoid extra problems ND packets are send through this
 - *	routine. It's code duplication but I really want to avoid
 - *	extra checks since ipv6_build_header is used by TCP (which
 - *	is for us performance critical)
 - */
 -
 -int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
 -	       const struct in6_addr *saddr, const struct in6_addr *daddr,
 -	       int proto, int len)
 -{
 -	struct ipv6_pinfo *np = inet6_sk(sk);
 -	struct ipv6hdr *hdr;
 -
 -	skb->protocol = htons(ETH_P_IPV6);
 -	skb->dev = dev;
 -
 -	skb_reset_network_header(skb);
 -	skb_put(skb, sizeof(struct ipv6hdr));
 -	hdr = ipv6_hdr(skb);
 -
 -	*(__be32*)hdr = htonl(0x60000000);
 -
 -	hdr->payload_len = htons(len);
 -	hdr->nexthdr = proto;
 -	hdr->hop_limit = np->hop_limit;
 -
 -	hdr->saddr = *saddr;
 -	hdr->daddr = *daddr;
 -
 -	return 0;
 -}
 -
  static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
  {
  	struct ip6_ra_chain *ra;
@@@ -886,12 -913,8 +886,12 @@@ static int ip6_dst_lookup_tail(struct s
  	 * dst entry of the nexthop router
  	 */
  	rt = (struct rt6_info *) *dst;
 -	n = rt->n;
 -	if (n && !(n->nud_state & NUD_VALID)) {
 +	rcu_read_lock_bh();
 +	n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr));
 +	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
 +	rcu_read_unlock_bh();
 +
 +	if (err) {
  		struct inet6_ifaddr *ifp;
  		struct flowi6 fl_gw6;
  		int redirect;
@@@ -1190,10 -1213,10 +1190,10 @@@ int ip6_append_data(struct sock *sk, in
  		if (dst_allfrag(rt->dst.path))
  			cork->flags |= IPCORK_ALLFRAG;
  		cork->length = 0;
- 		exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
+ 		exthdrlen = (opt ? opt->opt_flen : 0);
  		length += exthdrlen;
  		transhdrlen += exthdrlen;
- 		dst_exthdrlen = rt->dst.header_len;
+ 		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
  	} else {
  		rt = (struct rt6_info *)cork->dst;
  		fl6 = &inet->cork.fl.u.ip6;
@@@ -1525,7 -1548,9 +1525,7 @@@ int ip6_push_pending_frames(struct soc
  	skb_reset_network_header(skb);
  	hdr = ipv6_hdr(skb);
  
 -	*(__be32*)hdr = fl6->flowlabel |
 -		     htonl(0x60000000 | ((int)np->cork.tclass << 20));
 -
 +	ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
  	hdr->hop_limit = np->cork.hop_limit;
  	hdr->nexthdr = proto;
  	hdr->saddr = fl6->saddr;
diff --combined net/ipv6/ip6mr.c
index acc3249,8fd154e..351ce98
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@@ -1017,50 -1017,6 +1017,50 @@@ static struct mfc6_cache *ip6mr_cache_f
  	return NULL;
  }
  
 +/* Look for a (*,*,oif) entry */
 +static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
 +						      mifi_t mifi)
 +{
 +	int line = MFC6_HASH(&in6addr_any, &in6addr_any);
 +	struct mfc6_cache *c;
 +
 +	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
 +		if (ipv6_addr_any(&c->mf6c_origin) &&
 +		    ipv6_addr_any(&c->mf6c_mcastgrp) &&
 +		    (c->mfc_un.res.ttls[mifi] < 255))
 +			return c;
 +
 +	return NULL;
 +}
 +
 +/* Look for a (*,G) entry */
 +static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
 +					       struct in6_addr *mcastgrp,
 +					       mifi_t mifi)
 +{
 +	int line = MFC6_HASH(mcastgrp, &in6addr_any);
 +	struct mfc6_cache *c, *proxy;
 +
 +	if (ipv6_addr_any(mcastgrp))
 +		goto skip;
 +
 +	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
 +		if (ipv6_addr_any(&c->mf6c_origin) &&
 +		    ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
 +			if (c->mfc_un.res.ttls[mifi] < 255)
 +				return c;
 +
 +			/* It's ok if the mifi is part of the static tree */
 +			proxy = ip6mr_cache_find_any_parent(mrt,
 +							    c->mf6c_parent);
 +			if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
 +				return c;
 +		}
 +
 +skip:
 +	return ip6mr_cache_find_any_parent(mrt, mifi);
 +}
 +
  /*
   *	Allocate a multicast cache entry
   */
@@@ -1291,8 -1247,7 +1291,8 @@@ ip6mr_cache_unresolved(struct mr6_tabl
   *	MFC6 cache manipulation by user space
   */
  
 -static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
 +static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
 +			    int parent)
  {
  	int line;
  	struct mfc6_cache *c, *next;
@@@ -1301,9 -1256,7 +1301,9 @@@
  
  	list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
  		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
 -		    ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
 +		    ipv6_addr_equal(&c->mf6c_mcastgrp,
 +				    &mfc->mf6cc_mcastgrp.sin6_addr) &&
 +		    (parent == -1 || parent == c->mf6c_parent)) {
  			write_lock_bh(&mrt_lock);
  			list_del(&c->list);
  			write_unlock_bh(&mrt_lock);
@@@ -1438,7 -1391,7 +1438,7 @@@ void ip6_mr_cleanup(void
  }
  
  static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
 -			 struct mf6cctl *mfc, int mrtsock)
 +			 struct mf6cctl *mfc, int mrtsock, int parent)
  {
  	bool found = false;
  	int line;
@@@ -1460,9 -1413,7 +1460,9 @@@
  
  	list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
  		if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
 -		    ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
 +		    ipv6_addr_equal(&c->mf6c_mcastgrp,
 +				    &mfc->mf6cc_mcastgrp.sin6_addr) &&
 +		    (parent == -1 || parent == mfc->mf6cc_parent)) {
  			found = true;
  			break;
  		}
@@@ -1479,8 -1430,7 +1479,8 @@@
  		return 0;
  	}
  
 -	if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
 +	if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
 +	    !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
  		return -EINVAL;
  
  	c = ip6mr_cache_alloc();
@@@ -1646,7 -1596,7 +1646,7 @@@ struct sock *mroute6_socket(struct net 
  
  int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
  {
 -	int ret;
 +	int ret, parent = 0;
  	struct mif6ctl vif;
  	struct mf6cctl mfc;
  	mifi_t mifi;
@@@ -1703,21 -1653,15 +1703,21 @@@
  	 */
  	case MRT6_ADD_MFC:
  	case MRT6_DEL_MFC:
 +		parent = -1;
 +	case MRT6_ADD_MFC_PROXY:
 +	case MRT6_DEL_MFC_PROXY:
  		if (optlen < sizeof(mfc))
  			return -EINVAL;
  		if (copy_from_user(&mfc, optval, sizeof(mfc)))
  			return -EFAULT;
 +		if (parent == 0)
 +			parent = mfc.mf6cc_parent;
  		rtnl_lock();
 -		if (optname == MRT6_DEL_MFC)
 -			ret = ip6mr_mfc_delete(mrt, &mfc);
 +		if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
 +			ret = ip6mr_mfc_delete(mrt, &mfc, parent);
  		else
 -			ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk);
 +			ret = ip6mr_mfc_add(net, mrt, &mfc,
 +					    sk == mrt->mroute6_sk, parent);
  		rtnl_unlock();
  		return ret;
  
@@@ -1766,6 -1710,9 +1766,9 @@@
  			return -EINVAL;
  		if (get_user(v, (u32 __user *)optval))
  			return -EFAULT;
+ 		/* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
+ 		if (v != RT_TABLE_DEFAULT && v >= 100000000)
+ 			return -EINVAL;
  		if (sk == mrt->mroute6_sk)
  			return -EBUSY;
  
@@@ -2071,29 -2018,19 +2074,29 @@@ static int ip6_mr_forward(struct net *n
  {
  	int psend = -1;
  	int vif, ct;
 +	int true_vifi = ip6mr_find_vif(mrt, skb->dev);
  
  	vif = cache->mf6c_parent;
  	cache->mfc_un.res.pkt++;
  	cache->mfc_un.res.bytes += skb->len;
  
 +	if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
 +		struct mfc6_cache *cache_proxy;
 +
 +		/* For an (*,G) entry, we only check that the incomming
 +		 * interface is part of the static tree.
 +		 */
 +		cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
 +		if (cache_proxy &&
 +		    cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
 +			goto forward;
 +	}
 +
  	/*
  	 * Wrong interface: drop packet and (maybe) send PIM assert.
  	 */
  	if (mrt->vif6_table[vif].dev != skb->dev) {
 -		int true_vifi;
 -
  		cache->mfc_un.res.wrong_if++;
 -		true_vifi = ip6mr_find_vif(mrt, skb->dev);
  
  		if (true_vifi >= 0 && mrt->mroute_do_assert &&
  		    /* pimsm uses asserts, when switching from RPT to SPT,
@@@ -2111,32 -2048,14 +2114,32 @@@
  		goto dont_forward;
  	}
  
 +forward:
  	mrt->vif6_table[vif].pkt_in++;
  	mrt->vif6_table[vif].bytes_in += skb->len;
  
  	/*
  	 *	Forward the frame
  	 */
 +	if (ipv6_addr_any(&cache->mf6c_origin) &&
 +	    ipv6_addr_any(&cache->mf6c_mcastgrp)) {
 +		if (true_vifi >= 0 &&
 +		    true_vifi != cache->mf6c_parent &&
 +		    ipv6_hdr(skb)->hop_limit >
 +				cache->mfc_un.res.ttls[cache->mf6c_parent]) {
 +			/* It's an (*,*) entry and the packet is not coming from
 +			 * the upstream: forward the packet to the upstream
 +			 * only.
 +			 */
 +			psend = cache->mf6c_parent;
 +			goto last_forward;
 +		}
 +		goto dont_forward;
 +	}
  	for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
 -		if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
 +		/* For (*,G) entry, don't forward to the incoming interface */
 +		if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
 +		    ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
  			if (psend != -1) {
  				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  				if (skb2)
@@@ -2145,7 -2064,6 +2148,7 @@@
  			psend = ct;
  		}
  	}
 +last_forward:
  	if (psend != -1) {
  		ip6mr_forward2(net, mrt, skb, cache, psend);
  		return 0;
@@@ -2181,14 -2099,6 +2184,14 @@@ int ip6_mr_input(struct sk_buff *skb
  	read_lock(&mrt_lock);
  	cache = ip6mr_cache_find(mrt,
  				 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
 +	if (cache == NULL) {
 +		int vif = ip6mr_find_vif(mrt, skb->dev);
 +
 +		if (vif >= 0)
 +			cache = ip6mr_cache_find_any(mrt,
 +						     &ipv6_hdr(skb)->daddr,
 +						     vif);
 +	}
  
  	/*
  	 *	No usable cache entry
@@@ -2276,13 -2186,6 +2279,13 @@@ int ip6mr_get_route(struct net *net
  
  	read_lock(&mrt_lock);
  	cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
 +	if (!cache && skb->dev) {
 +		int vif = ip6mr_find_vif(mrt, skb->dev);
 +
 +		if (vif >= 0)
 +			cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
 +						     vif);
 +	}
  
  	if (!cache) {
  		struct sk_buff *skb2;

-- 
LinuxNextTracking


More information about the linux-merge mailing list