[linux-next] LinuxNextTracking branch, master, updated. next-20140723

batman at open-mesh.org batman at open-mesh.org
Thu Jul 24 00:22:09 CEST 2014


The following commit has been merged in the master branch:
commit 8fd90bb889635fa1e7f80a3950948cc2e74c1446
Merge: 1bb4238b17b5de6cdc120970a9d00dd8a44f40df 15ba2236f3556fc01b9ca91394465152b5ea74b6
Author: David S. Miller <davem at davemloft.net>
Date:   Tue Jul 22 00:44:59 2014 -0700

    Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
    
    Conflicts:
    	drivers/infiniband/hw/cxgb4/device.c
    
    The cxgb4 conflict was simply overlapping changes.
    
    Signed-off-by: David S. Miller <davem at davemloft.net>

diff --combined MAINTAINERS
index ae8cd00,d76e077..78215a5
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -1925,8 -1925,7 +1925,8 @@@ S:	Supporte
  F:	drivers/net/ethernet/broadcom/genet/
  
  BROADCOM BNX2 GIGABIT ETHERNET DRIVER
 -M:	Michael Chan <mchan at broadcom.com>
 +M:	Sony Chacko <sony.chacko at qlogic.com>
 +M:	Dept-HSGLinuxNICDev at qlogic.com
  L:	netdev at vger.kernel.org
  S:	Supported
  F:	drivers/net/ethernet/broadcom/bnx2.*
@@@ -1971,7 -1970,7 +1971,7 @@@ F:	arch/arm/boot/dts/bcm5301x.dts
  F:	arch/arm/boot/dts/bcm470*
  
  BROADCOM TG3 GIGABIT ETHERNET DRIVER
 -M:	Nithin Nayak Sujir <nsujir at broadcom.com>
 +M:	Prashant Sreedharan <prashant at broadcom.com>
  M:	Michael Chan <mchan at broadcom.com>
  L:	netdev at vger.kernel.org
  S:	Supported
@@@ -5656,6 -5655,16 +5656,6 @@@ F:	Documentation/networking/mac80211-in
  F:	include/net/mac80211.h
  F:	net/mac80211/
  
 -MAC80211 PID RATE CONTROL
 -M:	Stefano Brivio <stefano.brivio at polimi.it>
 -M:	Mattias Nissler <mattias.nissler at gmx.de>
 -L:	linux-wireless at vger.kernel.org
 -W:	http://wireless.kernel.org/en/developers/Documentation/mac80211/RateControl/PID
 -T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
 -T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
 -S:	Maintained
 -F:	net/mac80211/rc80211_pid*
 -
  MACVLAN DRIVER
  M:	Patrick McHardy <kaber at trash.net>
  L:	netdev at vger.kernel.org
@@@ -8010,6 -8019,16 +8010,16 @@@ F:	drivers/ata
  F:	include/linux/ata.h
  F:	include/linux/libata.h
  
+ SERIAL ATA AHCI PLATFORM devices support
+ M:	Hans de Goede <hdegoede at redhat.com>
+ M:	Tejun Heo <tj at kernel.org>
+ L:	linux-ide at vger.kernel.org
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+ S:	Supported
+ F:	drivers/ata/ahci_platform.c
+ F:	drivers/ata/libahci_platform.c
+ F:	include/linux/ahci_platform.h
+ 
  SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
  M:	Jayamohan Kallickal <jayamohan.kallickal at emulex.com>
  L:	linux-scsi at vger.kernel.org
diff --combined drivers/infiniband/hw/cxgb4/cm.c
index 6d61a16,768a0fb..c2fb71c
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@@ -79,10 -79,9 +79,10 @@@ static int dack_mode = 1
  module_param(dack_mode, int, 0644);
  MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
  
 -int c4iw_max_read_depth = 8;
 +uint c4iw_max_read_depth = 32;
  module_param(c4iw_max_read_depth, int, 0644);
 -MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
 +MODULE_PARM_DESC(c4iw_max_read_depth,
 +		 "Per-connection max ORD/IRD (default=32)");
  
  static int enable_tcp_timestamps;
  module_param(enable_tcp_timestamps, int, 0644);
@@@ -433,8 -432,17 +433,17 @@@ static void arp_failure_discard(void *h
   */
  static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
  {
+ 	struct c4iw_ep *ep = handle;
+ 
  	printk(KERN_ERR MOD "ARP failure duing connect\n");
  	kfree_skb(skb);
+ 	connect_reply_upcall(ep, -EHOSTUNREACH);
+ 	state_set(&ep->com, DEAD);
+ 	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ 	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+ 	dst_release(ep->dst);
+ 	cxgb4_l2t_release(ep->l2t);
+ 	c4iw_put_ep(&ep->com);
  }
  
  /*
@@@ -466,8 -474,7 +475,8 @@@ static void send_flowc(struct c4iw_ep *
  					  16)) | FW_WR_FLOWID(ep->hwtid));
  
  	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
 -	flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
 +	flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN
 +					    (ep->com.dev->rdev.lldi.pf));
  	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
  	flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
  	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
@@@ -660,7 -667,7 +669,7 @@@ static int send_connect(struct c4iw_ep 
  		opt2 |= T5_OPT_2_VALID;
  		opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
  	}
- 	t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
+ 	t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
  
  	if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
  		if (ep->com.remote_addr.ss_family == AF_INET) {
@@@ -814,8 -821,6 +823,8 @@@ static void send_mpa_req(struct c4iw_e
  	if (mpa_rev_to_use == 2) {
  		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
  					       sizeof (struct mpa_v2_conn_params));
 +		PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
 +		     ep->ord);
  		mpa_v2_params.ird = htons((u16)ep->ird);
  		mpa_v2_params.ord = htons((u16)ep->ord);
  
@@@ -1185,8 -1190,8 +1194,8 @@@ static int connect_request_upcall(struc
  			sizeof(struct mpa_v2_conn_params);
  	} else {
  		/* this means MPA_v1 is used. Send max supported */
 -		event.ord = c4iw_max_read_depth;
 -		event.ird = c4iw_max_read_depth;
 +		event.ord = cur_max_read_depth(ep->com.dev);
 +		event.ird = cur_max_read_depth(ep->com.dev);
  		event.private_data_len = ep->plen;
  		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
  	}
@@@ -1250,8 -1255,6 +1259,8 @@@ static int update_rx_credits(struct c4i
  	return credits;
  }
  
 +#define RELAXED_IRD_NEGOTIATION 1
 +
  static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
  {
  	struct mpa_message *mpa;
@@@ -1363,33 -1366,17 +1372,33 @@@
  				MPA_V2_IRD_ORD_MASK;
  			resp_ord = ntohs(mpa_v2_params->ord) &
  				MPA_V2_IRD_ORD_MASK;
 +			PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
 +			     __func__, resp_ird, resp_ord, ep->ird, ep->ord);
  
  			/*
  			 * This is a double-check. Ideally, below checks are
  			 * not required since ird/ord stuff has been taken
  			 * care of in c4iw_accept_cr
  			 */
 -			if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
 +			if (ep->ird < resp_ord) {
 +				if (RELAXED_IRD_NEGOTIATION && resp_ord <=
 +				    ep->com.dev->rdev.lldi.max_ordird_qp)
 +					ep->ird = resp_ord;
 +				else
 +					insuff_ird = 1;
 +			} else if (ep->ird > resp_ord) {
 +				ep->ird = resp_ord;
 +			}
 +			if (ep->ord > resp_ird) {
 +				if (RELAXED_IRD_NEGOTIATION)
 +					ep->ord = resp_ird;
 +				else
 +					insuff_ird = 1;
 +			}
 +			if (insuff_ird) {
  				err = -ENOMEM;
  				ep->ird = resp_ord;
  				ep->ord = resp_ird;
 -				insuff_ird = 1;
  			}
  
  			if (ntohs(mpa_v2_params->ird) &
@@@ -1592,8 -1579,6 +1601,8 @@@ static void process_mpa_request(struct 
  				MPA_V2_IRD_ORD_MASK;
  			ep->ord = ntohs(mpa_v2_params->ord) &
  				MPA_V2_IRD_ORD_MASK;
 +			PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
 +			     ep->ord);
  			if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
  				if (peer2peer) {
  					if (ntohs(mpa_v2_params->ord) &
@@@ -1813,20 -1798,6 +1822,20 @@@ static int is_neg_adv(unsigned int stat
  	       status == CPL_ERR_KEEPALV_NEG_ADVICE;
  }
  
 +static char *neg_adv_str(unsigned int status)
 +{
 +	switch (status) {
 +	case CPL_ERR_RTX_NEG_ADVICE:
 +		return "Retransmit timeout";
 +	case CPL_ERR_PERSIST_NEG_ADVICE:
 +		return "Persist timeout";
 +	case CPL_ERR_KEEPALV_NEG_ADVICE:
 +		return "Keepalive timeout";
 +	default:
 +		return "Unknown";
 +	}
 +}
 +
  static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
  {
  	ep->snd_win = snd_win;
@@@ -2025,9 -1996,8 +2034,9 @@@ static int act_open_rpl(struct c4iw_de
  	     status, status2errno(status));
  
  	if (is_neg_adv(status)) {
 -		printk(KERN_WARNING MOD "Connection problems for atid %u\n",
 -			atid);
 +		dev_warn(&dev->rdev.lldi.pdev->dev,
 +			 "Connection problems for atid %u status %u (%s)\n",
 +			 atid, status, neg_adv_str(status));
  		return 0;
  	}
  
@@@ -2219,7 -2189,6 +2228,6 @@@ static void reject_cr(struct c4iw_dev *
  	PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
  	BUG_ON(skb_cloned(skb));
  	skb_trim(skb, sizeof(struct cpl_tid_release));
- 	skb_get(skb);
  	release_tid(&dev->rdev, hwtid, skb);
  	return;
  }
@@@ -2503,9 -2472,8 +2511,9 @@@ static int peer_abort(struct c4iw_dev *
  
  	ep = lookup_tid(t, tid);
  	if (is_neg_adv(req->status)) {
 -		PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
 -		     ep->hwtid);
 +		dev_warn(&dev->rdev.lldi.pdev->dev,
 +			 "Negative advice on abort - tid %u status %d (%s)\n",
 +			 ep->hwtid, req->status, neg_adv_str(req->status));
  		return 0;
  	}
  	PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
@@@ -2763,8 -2731,8 +2771,8 @@@ int c4iw_accept_cr(struct iw_cm_id *cm_
  	BUG_ON(!qp);
  
  	set_bit(ULP_ACCEPT, &ep->com.history);
 -	if ((conn_param->ord > c4iw_max_read_depth) ||
 -	    (conn_param->ird > c4iw_max_read_depth)) {
 +	if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
 +	    (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
  		abort_connection(ep, NULL, GFP_KERNEL);
  		err = -EINVAL;
  		goto err;
@@@ -2772,41 -2740,31 +2780,41 @@@
  
  	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
  		if (conn_param->ord > ep->ird) {
 -			ep->ird = conn_param->ird;
 -			ep->ord = conn_param->ord;
 -			send_mpa_reject(ep, conn_param->private_data,
 -					conn_param->private_data_len);
 -			abort_connection(ep, NULL, GFP_KERNEL);
 -			err = -ENOMEM;
 -			goto err;
 +			if (RELAXED_IRD_NEGOTIATION) {
 +				ep->ord = ep->ird;
 +			} else {
 +				ep->ird = conn_param->ird;
 +				ep->ord = conn_param->ord;
 +				send_mpa_reject(ep, conn_param->private_data,
 +						conn_param->private_data_len);
 +				abort_connection(ep, NULL, GFP_KERNEL);
 +				err = -ENOMEM;
 +				goto err;
 +			}
  		}
 -		if (conn_param->ird > ep->ord) {
 -			if (!ep->ord)
 -				conn_param->ird = 1;
 -			else {
 +		if (conn_param->ird < ep->ord) {
 +			if (RELAXED_IRD_NEGOTIATION &&
 +			    ep->ord <= h->rdev.lldi.max_ordird_qp) {
 +				conn_param->ird = ep->ord;
 +			} else {
  				abort_connection(ep, NULL, GFP_KERNEL);
  				err = -ENOMEM;
  				goto err;
  			}
  		}
 -
  	}
  	ep->ird = conn_param->ird;
  	ep->ord = conn_param->ord;
  
 -	if (ep->mpa_attr.version != 2)
 +	if (ep->mpa_attr.version == 1) {
  		if (peer2peer && ep->ird == 0)
  			ep->ird = 1;
 +	} else {
 +		if (peer2peer &&
 +		    (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
 +		    (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0)
 +			ep->ird = 1;
 +	}
  
  	PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
  
@@@ -2845,7 -2803,6 +2853,7 @@@
  	return 0;
  err1:
  	ep->com.cm_id = NULL;
 +	abort_connection(ep, NULL, GFP_KERNEL);
  	cm_id->rem_ref(cm_id);
  err:
  	mutex_unlock(&ep->com.mutex);
@@@ -2929,8 -2886,8 +2937,8 @@@ int c4iw_connect(struct iw_cm_id *cm_id
  	int iptype;
  	int iwpm_err = 0;
  
 -	if ((conn_param->ord > c4iw_max_read_depth) ||
 -	    (conn_param->ird > c4iw_max_read_depth)) {
 +	if ((conn_param->ord > cur_max_read_depth(dev)) ||
 +	    (conn_param->ird > cur_max_read_depth(dev))) {
  		err = -EINVAL;
  		goto out;
  	}
@@@ -3910,9 -3867,8 +3918,9 @@@ static int peer_abort_intr(struct c4iw_
  		return 0;
  	}
  	if (is_neg_adv(req->status)) {
 -		PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
 -		     ep->hwtid);
 +		dev_warn(&dev->rdev.lldi.pdev->dev,
 +			 "Negative advice on abort - tid %u status %d (%s)\n",
 +			 ep->hwtid, req->status, neg_adv_str(req->status));
  		kfree_skb(skb);
  		return 0;
  	}
@@@ -3969,7 -3925,7 +3977,7 @@@ int __init c4iw_cm_init(void
  	return 0;
  }
  
- void __exit c4iw_cm_term(void)
+ void c4iw_cm_term(void)
  {
  	WARN_ON(!list_empty(&timeout_list));
  	flush_workqueue(workq);
diff --combined drivers/infiniband/hw/cxgb4/device.c
index bda9492,7db82b2..f25df52
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@@ -33,7 -33,6 +33,7 @@@
  #include <linux/moduleparam.h>
  #include <linux/debugfs.h>
  #include <linux/vmalloc.h>
 +#include <linux/math64.h>
  
  #include <rdma/ib_verbs.h>
  
@@@ -56,15 -55,6 +56,15 @@@ module_param(allow_db_coalescing_on_t5
  MODULE_PARM_DESC(allow_db_coalescing_on_t5,
  		 "Allow DB Coalescing on T5 (default = 0)");
  
 +int c4iw_wr_log = 0;
 +module_param(c4iw_wr_log, int, 0444);
 +MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
 +
 +int c4iw_wr_log_size_order = 12;
 +module_param(c4iw_wr_log_size_order, int, 0444);
 +MODULE_PARM_DESC(c4iw_wr_log_size_order,
 +		 "Number of entries (log2) in the work request timing log.");
 +
  struct uld_ctx {
  	struct list_head entry;
  	struct cxgb4_lld_info lldi;
@@@ -113,117 -103,6 +113,117 @@@ static ssize_t debugfs_read(struct fil
  	return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
  }
  
 +void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
 +{
 +	struct wr_log_entry le;
 +	int idx;
 +
 +	if (!wq->rdev->wr_log)
 +		return;
 +
 +	idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
 +		(wq->rdev->wr_log_size - 1);
 +	le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
 +	getnstimeofday(&le.poll_host_ts);
 +	le.valid = 1;
 +	le.cqe_sge_ts = CQE_TS(cqe);
 +	if (SQ_TYPE(cqe)) {
 +		le.qid = wq->sq.qid;
 +		le.opcode = CQE_OPCODE(cqe);
 +		le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
 +		le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
 +		le.wr_id = CQE_WRID_SQ_IDX(cqe);
 +	} else {
 +		le.qid = wq->rq.qid;
 +		le.opcode = FW_RI_RECEIVE;
 +		le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
 +		le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
 +		le.wr_id = CQE_WRID_MSN(cqe);
 +	}
 +	wq->rdev->wr_log[idx] = le;
 +}
 +
 +static int wr_log_show(struct seq_file *seq, void *v)
 +{
 +	struct c4iw_dev *dev = seq->private;
 +	struct timespec prev_ts = {0, 0};
 +	struct wr_log_entry *lep;
 +	int prev_ts_set = 0;
 +	int idx, end;
 +
 +#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000)
 +
 +	idx = atomic_read(&dev->rdev.wr_log_idx) &
 +		(dev->rdev.wr_log_size - 1);
 +	end = idx - 1;
 +	if (end < 0)
 +		end = dev->rdev.wr_log_size - 1;
 +	lep = &dev->rdev.wr_log[idx];
 +	while (idx != end) {
 +		if (lep->valid) {
 +			if (!prev_ts_set) {
 +				prev_ts_set = 1;
 +				prev_ts = lep->poll_host_ts;
 +			}
 +			seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
 +				   "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
 +				   "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
 +				   "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
 +				   "cqe_poll_delta_ns %llu\n",
 +				   idx,
 +				   timespec_sub(lep->poll_host_ts,
 +						prev_ts).tv_sec,
 +				   timespec_sub(lep->poll_host_ts,
 +						prev_ts).tv_nsec,
 +				   lep->qid, lep->opcode,
 +				   lep->opcode == FW_RI_RECEIVE ?
 +							"msn" : "wrid",
 +				   lep->wr_id,
 +				   timespec_sub(lep->poll_host_ts,
 +						lep->post_host_ts).tv_sec,
 +				   timespec_sub(lep->poll_host_ts,
 +						lep->post_host_ts).tv_nsec,
 +				   lep->post_sge_ts, lep->cqe_sge_ts,
 +				   lep->poll_sge_ts,
 +				   ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
 +				   ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
 +			prev_ts = lep->poll_host_ts;
 +		}
 +		idx++;
 +		if (idx > (dev->rdev.wr_log_size - 1))
 +			idx = 0;
 +		lep = &dev->rdev.wr_log[idx];
 +	}
 +#undef ts2ns
 +	return 0;
 +}
 +
 +static int wr_log_open(struct inode *inode, struct file *file)
 +{
 +	return single_open(file, wr_log_show, inode->i_private);
 +}
 +
 +static ssize_t wr_log_clear(struct file *file, const char __user *buf,
 +			    size_t count, loff_t *pos)
 +{
 +	struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
 +	int i;
 +
 +	if (dev->rdev.wr_log)
 +		for (i = 0; i < dev->rdev.wr_log_size; i++)
 +			dev->rdev.wr_log[i].valid = 0;
 +	return count;
 +}
 +
 +static const struct file_operations wr_log_debugfs_fops = {
 +	.owner   = THIS_MODULE,
 +	.open    = wr_log_open,
 +	.release = single_release,
 +	.read    = seq_read,
 +	.llseek  = seq_lseek,
 +	.write   = wr_log_clear,
 +};
 +
  static int dump_qp(int id, void *p, void *data)
  {
  	struct c4iw_qp *qp = p;
@@@ -362,32 -241,12 +362,32 @@@ static int dump_stag(int id, void *p, v
  	struct c4iw_debugfs_data *stagd = data;
  	int space;
  	int cc;
 +	struct fw_ri_tpte tpte;
 +	int ret;
  
  	space = stagd->bufsize - stagd->pos - 1;
  	if (space == 0)
  		return 1;
  
 -	cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
 +	ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
 +			      (__be32 *)&tpte);
 +	if (ret) {
 +		dev_err(&stagd->devp->rdev.lldi.pdev->dev,
 +			"%s cxgb4_read_tpte err %d\n", __func__, ret);
 +		return ret;
 +	}
 +	cc = snprintf(stagd->buf + stagd->pos, space,
 +		      "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
 +		      "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
 +		      (u32)id<<8,
 +		      G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
 +		      G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
 +		      G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
 +		      G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
 +		      G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
 +		      G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
 +		      ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
 +		      ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
  	if (cc < space)
  		stagd->pos += cc;
  	return 0;
@@@ -400,7 -259,7 +400,7 @@@ static int stag_release(struct inode *i
  		printk(KERN_INFO "%s null stagd?\n", __func__);
  		return 0;
  	}
 -	kfree(stagd->buf);
 +	vfree(stagd->buf);
  	kfree(stagd);
  	return 0;
  }
@@@ -423,8 -282,8 +423,8 @@@ static int stag_open(struct inode *inod
  	idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
  	spin_unlock_irq(&stagd->devp->lock);
  
 -	stagd->bufsize = count * sizeof("0x12345678\n");
 -	stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
 +	stagd->bufsize = count * 256;
 +	stagd->buf = vmalloc(stagd->bufsize);
  	if (!stagd->buf) {
  		ret = -ENOMEM;
  		goto err1;
@@@ -489,7 -348,6 +489,7 @@@ static int stats_show(struct seq_file *
  		   dev->rdev.stats.act_ofld_conn_fails);
  	seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
  		   dev->rdev.stats.pas_ofld_conn_fails);
 +	seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
  	return 0;
  }
  
@@@ -725,12 -583,6 +725,12 @@@ static int setup_debugfs(struct c4iw_de
  	if (de && de->d_inode)
  		de->d_inode->i_size = 4096;
  
 +	if (c4iw_wr_log) {
 +		de = debugfs_create_file("wr_log", S_IWUSR, devp->debugfs_root,
 +					 (void *)devp, &wr_log_debugfs_fops);
 +		if (de && de->d_inode)
 +			de->d_inode->i_size = 4096;
 +	}
  	return 0;
  }
  
@@@ -844,16 -696,7 +844,20 @@@ static int c4iw_rdev_open(struct c4iw_r
  		pr_err(MOD "error allocating status page\n");
  		goto err4;
  	}
++
 +	if (c4iw_wr_log) {
 +		rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
 +				       sizeof(*rdev->wr_log), GFP_KERNEL);
 +		if (rdev->wr_log) {
 +			rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
 +			atomic_set(&rdev->wr_log_idx, 0);
 +		} else {
 +			pr_err(MOD "error allocating wr_log. Logging disabled\n");
 +		}
 +	}
++
+ 	rdev->status_page->db_off = 0;
++
  	return 0;
  err4:
  	c4iw_rqtpool_destroy(rdev);
@@@ -867,7 -710,6 +871,7 @@@ err1
  
  static void c4iw_rdev_close(struct c4iw_rdev *rdev)
  {
 +	kfree(rdev->wr_log);
  	free_page((unsigned long)rdev->status_page);
  	c4iw_pblpool_destroy(rdev);
  	c4iw_rqtpool_destroy(rdev);
@@@ -888,7 -730,6 +892,6 @@@ static void c4iw_dealloc(struct uld_ct
  	if (ctx->dev->rdev.oc_mw_kva)
  		iounmap(ctx->dev->rdev.oc_mw_kva);
  	ib_dealloc_device(&ctx->dev->ibdev);
- 	iwpm_exit(RDMA_NL_C4IW);
  	ctx->dev = NULL;
  }
  
@@@ -927,27 -768,6 +930,27 @@@ static struct c4iw_dev *c4iw_alloc(cons
  	}
  	devp->rdev.lldi = *infop;
  
 +	/* init various hw-queue params based on lld info */
 +	PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
 +	     __func__, devp->rdev.lldi.sge_ingpadboundary,
 +	     devp->rdev.lldi.sge_egrstatuspagesize);
 +
 +	devp->rdev.hw_queue.t4_eq_status_entries =
 +		devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
 +	devp->rdev.hw_queue.t4_max_eq_size = 65520;
 +	devp->rdev.hw_queue.t4_max_iq_size = 65520;
 +	devp->rdev.hw_queue.t4_max_rq_size = 8192 -
 +		devp->rdev.hw_queue.t4_eq_status_entries - 1;
 +	devp->rdev.hw_queue.t4_max_sq_size =
 +		devp->rdev.hw_queue.t4_max_eq_size -
 +		devp->rdev.hw_queue.t4_eq_status_entries - 1;
 +	devp->rdev.hw_queue.t4_max_qp_depth =
 +		devp->rdev.hw_queue.t4_max_rq_size;
 +	devp->rdev.hw_queue.t4_max_cq_depth =
 +		devp->rdev.hw_queue.t4_max_iq_size - 2;
 +	devp->rdev.hw_queue.t4_stat_len =
 +		devp->rdev.lldi.sge_egrstatuspagesize;
 +
  	/*
  	 * For T5 devices, we map all of BAR2 with WC.
  	 * For T4 devices with onchip qp mem, we map only that part
@@@ -998,7 -818,6 +1001,7 @@@
  	mutex_init(&devp->rdev.stats.lock);
  	mutex_init(&devp->db_mutex);
  	INIT_LIST_HEAD(&devp->db_fc_list);
 +	devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
  
  	if (c4iw_debugfs_root) {
  		devp->debugfs_root = debugfs_create_dir(
@@@ -1007,12 -826,6 +1010,6 @@@
  		setup_debugfs(devp);
  	}
  
- 	ret = iwpm_init(RDMA_NL_C4IW);
- 	if (ret) {
- 		pr_err("port mapper initialization failed with %d\n", ret);
- 		ib_dealloc_device(&devp->ibdev);
- 		return ERR_PTR(ret);
- 	}
  
  	return devp;
  }
@@@ -1513,6 -1326,15 +1510,15 @@@ static int __init c4iw_init_module(void
  		pr_err("%s[%u]: Failed to add netlink callback\n"
  		       , __func__, __LINE__);
  
+ 	err = iwpm_init(RDMA_NL_C4IW);
+ 	if (err) {
+ 		pr_err("port mapper initialization failed with %d\n", err);
+ 		ibnl_remove_client(RDMA_NL_C4IW);
+ 		c4iw_cm_term();
+ 		debugfs_remove_recursive(c4iw_debugfs_root);
+ 		return err;
+ 	}
+ 
  	cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
  
  	return 0;
@@@ -1530,6 -1352,7 +1536,7 @@@ static void __exit c4iw_exit_module(voi
  	}
  	mutex_unlock(&dev_mutex);
  	cxgb4_unregister_uld(CXGB4_ULD_RDMA);
+ 	iwpm_exit(RDMA_NL_C4IW);
  	ibnl_remove_client(RDMA_NL_C4IW);
  	c4iw_cm_term();
  	debugfs_remove_recursive(c4iw_debugfs_root);
diff --combined drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index c378fd2,361fff7..b5678ac
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@@ -139,29 -139,6 +139,29 @@@ struct c4iw_stats 
  	u64  pas_ofld_conn_fails;
  };
  
 +struct c4iw_hw_queue {
 +	int t4_eq_status_entries;
 +	int t4_max_eq_size;
 +	int t4_max_iq_size;
 +	int t4_max_rq_size;
 +	int t4_max_sq_size;
 +	int t4_max_qp_depth;
 +	int t4_max_cq_depth;
 +	int t4_stat_len;
 +};
 +
 +struct wr_log_entry {
 +	struct timespec post_host_ts;
 +	struct timespec poll_host_ts;
 +	u64 post_sge_ts;
 +	u64 cqe_sge_ts;
 +	u64 poll_sge_ts;
 +	u16 qid;
 +	u16 wr_id;
 +	u8 opcode;
 +	u8 valid;
 +};
 +
  struct c4iw_rdev {
  	struct c4iw_resource resource;
  	unsigned long qpshift;
@@@ -179,11 -156,7 +179,11 @@@
  	unsigned long oc_mw_pa;
  	void __iomem *oc_mw_kva;
  	struct c4iw_stats stats;
 +	struct c4iw_hw_queue hw_queue;
  	struct t4_dev_status_page *status_page;
 +	atomic_t wr_log_idx;
 +	struct wr_log_entry *wr_log;
 +	int wr_log_size;
  };
  
  static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@@ -193,7 -166,7 +193,7 @@@
  
  static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
  {
 -	return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
 +	return (int)(rdev->lldi.vr->stag.size >> 5);
  }
  
  #define C4IW_WR_TO (30*HZ)
@@@ -264,7 -237,6 +264,7 @@@ struct c4iw_dev 
  	struct idr atid_idr;
  	struct idr stid_idr;
  	struct list_head db_fc_list;
 +	u32 avail_ird;
  };
  
  static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@@ -346,13 -318,6 +346,13 @@@ static inline void remove_handle_nolock
  	_remove_handle(rhp, idr, id, 0);
  }
  
 +extern uint c4iw_max_read_depth;
 +
 +static inline int cur_max_read_depth(struct c4iw_dev *dev)
 +{
 +	return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth);
 +}
 +
  struct c4iw_pd {
  	struct ib_pd ibpd;
  	u32 pdid;
@@@ -943,7 -908,7 +943,7 @@@ int c4iw_destroy_ctrl_qp(struct c4iw_rd
  int c4iw_register_device(struct c4iw_dev *dev);
  void c4iw_unregister_device(struct c4iw_dev *dev);
  int __init c4iw_cm_init(void);
- void __exit c4iw_cm_term(void);
+ void c4iw_cm_term(void);
  void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
  			       struct c4iw_dev_ucontext *uctx);
  void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
@@@ -1026,8 -991,7 +1026,8 @@@ void c4iw_ev_dispatch(struct c4iw_dev *
  
  extern struct cxgb4_client t4c_client;
  extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
 -extern int c4iw_max_read_depth;
 +extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
 +extern int c4iw_wr_log;
  extern int db_fc_threshold;
  extern int db_coalescing_threshold;
  extern int use_dsgl;
diff --combined drivers/net/ethernet/realtek/r8169.c
index 6175bd5,61623e9..9887bcb
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@@ -27,8 -27,6 +27,8 @@@
  #include <linux/firmware.h>
  #include <linux/pci-aspm.h>
  #include <linux/prefetch.h>
 +#include <linux/ipv6.h>
 +#include <net/ip6_checksum.h>
  
  #include <asm/io.h>
  #include <asm/irq.h>
@@@ -629,22 -627,39 +629,22 @@@ enum rtl_tx_desc_bit_0 
  
  /* 8102e, 8168c and beyond. */
  enum rtl_tx_desc_bit_1 {
 +	/* First doubleword. */
 +	TD1_GTSENV4	= (1 << 26),		/* Giant Send for IPv4 */
 +	TD1_GTSENV6	= (1 << 25),		/* Giant Send for IPv6 */
 +#define GTTCPHO_SHIFT			18
 +#define GTTCPHO_MAX			0x7fU
 +
  	/* Second doubleword. */
 +#define TCPHO_SHIFT			18
 +#define TCPHO_MAX			0x3ffU
  #define TD1_MSS_SHIFT			18	/* MSS position (11 bits) */
 -	TD1_IP_CS	= (1 << 29),		/* Calculate IP checksum */
 +	TD1_IPv6_CS	= (1 << 28),		/* Calculate IPv6 checksum */
 +	TD1_IPv4_CS	= (1 << 29),		/* Calculate IPv4 checksum */
  	TD1_TCP_CS	= (1 << 30),		/* Calculate TCP/IP checksum */
  	TD1_UDP_CS	= (1 << 31),		/* Calculate UDP/IP checksum */
  };
  
 -static const struct rtl_tx_desc_info {
 -	struct {
 -		u32 udp;
 -		u32 tcp;
 -	} checksum;
 -	u16 mss_shift;
 -	u16 opts_offset;
 -} tx_desc_info [] = {
 -	[RTL_TD_0] = {
 -		.checksum = {
 -			.udp	= TD0_IP_CS | TD0_UDP_CS,
 -			.tcp	= TD0_IP_CS | TD0_TCP_CS
 -		},
 -		.mss_shift	= TD0_MSS_SHIFT,
 -		.opts_offset	= 0
 -	},
 -	[RTL_TD_1] = {
 -		.checksum = {
 -			.udp	= TD1_IP_CS | TD1_UDP_CS,
 -			.tcp	= TD1_IP_CS | TD1_TCP_CS
 -		},
 -		.mss_shift	= TD1_MSS_SHIFT,
 -		.opts_offset	= 1
 -	}
 -};
 -
  enum rtl_rx_desc_bit {
  	/* Rx private */
  	PID1		= (1 << 18), /* Protocol ID bit 1/2 */
@@@ -768,7 -783,6 +768,7 @@@ struct rtl8169_private 
  	unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
  	unsigned int (*link_ok)(void __iomem *);
  	int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
 +	bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
  
  	struct {
  		DECLARE_BITMAP(flags, RTL_FLAG_MAX);
@@@ -4226,6 -4240,8 +4226,8 @@@ static void rtl_init_rxcfg(struct rtl81
  		RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
  		break;
  	case RTL_GIGA_MAC_VER_40:
+ 		RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ 		break;
  	case RTL_GIGA_MAC_VER_41:
  	case RTL_GIGA_MAC_VER_42:
  	case RTL_GIGA_MAC_VER_43:
@@@ -5952,179 -5968,32 +5954,179 @@@ static bool rtl_test_hw_pad_bug(struct 
  	return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
  }
  
 -static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
 -				    struct sk_buff *skb, u32 *opts)
 +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 +				      struct net_device *dev);
 +/* r8169_csum_workaround()
 + * The hw limites the value the transport offset. When the offset is out of the
 + * range, calculate the checksum by sw.
 + */
 +static void r8169_csum_workaround(struct rtl8169_private *tp,
 +				  struct sk_buff *skb)
 +{
 +	if (skb_shinfo(skb)->gso_size) {
 +		netdev_features_t features = tp->dev->features;
 +		struct sk_buff *segs, *nskb;
 +
 +		features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
 +		segs = skb_gso_segment(skb, features);
 +		if (IS_ERR(segs) || !segs)
 +			goto drop;
 +
 +		do {
 +			nskb = segs;
 +			segs = segs->next;
 +			nskb->next = NULL;
 +			rtl8169_start_xmit(nskb, tp->dev);
 +		} while (segs);
 +
 +		dev_kfree_skb(skb);
 +	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 +		if (skb_checksum_help(skb) < 0)
 +			goto drop;
 +
 +		rtl8169_start_xmit(skb, tp->dev);
 +	} else {
 +		struct net_device_stats *stats;
 +
 +drop:
 +		stats = &tp->dev->stats;
 +		stats->tx_dropped++;
 +		dev_kfree_skb(skb);
 +	}
 +}
 +
 +/* msdn_giant_send_check()
 + * According to the document of microsoft, the TCP Pseudo Header excludes the
 + * packet length for IPv6 TCP large packets.
 + */
 +static int msdn_giant_send_check(struct sk_buff *skb)
 +{
 +	const struct ipv6hdr *ipv6h;
 +	struct tcphdr *th;
 +	int ret;
 +
 +	ret = skb_cow_head(skb, 0);
 +	if (ret)
 +		return ret;
 +
 +	ipv6h = ipv6_hdr(skb);
 +	th = tcp_hdr(skb);
 +
 +	th->check = 0;
 +	th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
 +
 +	return ret;
 +}
 +
 +static inline __be16 get_protocol(struct sk_buff *skb)
 +{
 +	__be16 protocol;
 +
 +	if (skb->protocol == htons(ETH_P_8021Q))
 +		protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
 +	else
 +		protocol = skb->protocol;
 +
 +	return protocol;
 +}
 +
 +static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
 +				struct sk_buff *skb, u32 *opts)
  {
 -	const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
  	u32 mss = skb_shinfo(skb)->gso_size;
  
  	if (mss) {
  		opts[0] |= TD_LSO;
 -		opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
 +		opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT;
  	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  		const struct iphdr *ip = ip_hdr(skb);
  
 +		if (ip->protocol == IPPROTO_TCP)
 +			opts[0] |= TD0_IP_CS | TD0_TCP_CS;
 +		else if (ip->protocol == IPPROTO_UDP)
 +			opts[0] |= TD0_IP_CS | TD0_UDP_CS;
 +		else
 +			WARN_ON_ONCE(1);
 +	}
 +
 +	return true;
 +}
 +
 +static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
 +				struct sk_buff *skb, u32 *opts)
 +{
 +	u32 transport_offset = (u32)skb_transport_offset(skb);
 +	u32 mss = skb_shinfo(skb)->gso_size;
 +
 +	if (mss) {
 +		if (transport_offset > GTTCPHO_MAX) {
 +			netif_warn(tp, tx_err, tp->dev,
 +				   "Invalid transport offset 0x%x for TSO\n",
 +				   transport_offset);
 +			return false;
 +		}
 +
 +		switch (get_protocol(skb)) {
 +		case htons(ETH_P_IP):
 +			opts[0] |= TD1_GTSENV4;
 +			break;
 +
 +		case htons(ETH_P_IPV6):
 +			if (msdn_giant_send_check(skb))
 +				return false;
 +
 +			opts[0] |= TD1_GTSENV6;
 +			break;
 +
 +		default:
 +			WARN_ON_ONCE(1);
 +			break;
 +		}
 +
 +		opts[0] |= transport_offset << GTTCPHO_SHIFT;
 +		opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT;
 +	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 +		u8 ip_protocol;
 +
  		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
  			return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
  
 -		if (ip->protocol == IPPROTO_TCP)
 -			opts[offset] |= info->checksum.tcp;
 -		else if (ip->protocol == IPPROTO_UDP)
 -			opts[offset] |= info->checksum.udp;
 +		if (transport_offset > TCPHO_MAX) {
 +			netif_warn(tp, tx_err, tp->dev,
 +				   "Invalid transport offset 0x%x\n",
 +				   transport_offset);
 +			return false;
 +		}
 +
 +		switch (get_protocol(skb)) {
 +		case htons(ETH_P_IP):
 +			opts[1] |= TD1_IPv4_CS;
 +			ip_protocol = ip_hdr(skb)->protocol;
 +			break;
 +
 +		case htons(ETH_P_IPV6):
 +			opts[1] |= TD1_IPv6_CS;
 +			ip_protocol = ipv6_hdr(skb)->nexthdr;
 +			break;
 +
 +		default:
 +			ip_protocol = IPPROTO_RAW;
 +			break;
 +		}
 +
 +		if (ip_protocol == IPPROTO_TCP)
 +			opts[1] |= TD1_TCP_CS;
 +		else if (ip_protocol == IPPROTO_UDP)
 +			opts[1] |= TD1_UDP_CS;
  		else
  			WARN_ON_ONCE(1);
 +
 +		opts[1] |= transport_offset << TCPHO_SHIFT;
  	} else {
  		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
  			return rtl_skb_pad(skb);
  	}
 +
  	return true;
  }
  
@@@ -6152,10 -6021,8 +6154,10 @@@ static netdev_tx_t rtl8169_start_xmit(s
  	opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
  	opts[0] = DescOwn;
  
 -	if (!rtl8169_tso_csum(tp, skb, opts))
 -		goto err_update_stats;
 +	if (!tp->tso_csum(tp, skb, opts)) {
 +		r8169_csum_workaround(tp, skb);
 +		return NETDEV_TX_OK;
 +	}
  
  	len = skb_headlen(skb);
  	mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
@@@ -6220,6 -6087,7 +6222,6 @@@ err_dma_1
  	rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
  err_dma_0:
  	dev_kfree_skb_any(skb);
 -err_update_stats:
  	dev->stats.tx_dropped++;
  	return NETDEV_TX_OK;
  
@@@ -7304,14 -7172,6 +7306,14 @@@ rtl_init_one(struct pci_dev *pdev, cons
  		/* 8110SCd requires hardware Rx VLAN - disallow toggling */
  		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
  
 +	if (tp->txd_version == RTL_TD_0)
 +		tp->tso_csum = rtl8169_tso_csum_v1;
 +	else if (tp->txd_version == RTL_TD_1) {
 +		tp->tso_csum = rtl8169_tso_csum_v2;
 +		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
 +	} else
 +		WARN_ON_ONCE(1);
 +
  	dev->hw_features |= NETIF_F_RXALL;
  	dev->hw_features |= NETIF_F_RXFCS;
  
diff --combined drivers/net/ppp/ppp_generic.c
index 2031ce4,d5b77ef..765248b
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@@ -655,10 -655,6 +655,10 @@@ static long ppp_ioctl(struct file *file
  			break;
  		ppp_lock(ppp);
  		cflags = ppp->flags & ~val;
 +#ifdef CONFIG_PPP_MULTILINK
 +		if (!(ppp->flags & SC_MULTILINK) && (val & SC_MULTILINK))
 +			ppp->nextseq = 0;
 +#endif
  		ppp->flags = val & SC_FLAG_BITS;
  		ppp_unlock(ppp);
  		if (cflags & SC_CCP_OPEN)
@@@ -761,10 -757,15 +761,15 @@@
  			};
  
  			ppp_lock(ppp);
- 			if (ppp->pass_filter)
+ 			if (ppp->pass_filter) {
  				sk_unattached_filter_destroy(ppp->pass_filter);
- 			err = sk_unattached_filter_create(&ppp->pass_filter,
- 							  &fprog);
+ 				ppp->pass_filter = NULL;
+ 			}
+ 			if (fprog.filter != NULL)
+ 				err = sk_unattached_filter_create(&ppp->pass_filter,
+ 								  &fprog);
+ 			else
+ 				err = 0;
  			kfree(code);
  			ppp_unlock(ppp);
  		}
@@@ -782,10 -783,15 +787,15 @@@
  			};
  
  			ppp_lock(ppp);
- 			if (ppp->active_filter)
+ 			if (ppp->active_filter) {
  				sk_unattached_filter_destroy(ppp->active_filter);
- 			err = sk_unattached_filter_create(&ppp->active_filter,
- 							  &fprog);
+ 				ppp->active_filter = NULL;
+ 			}
+ 			if (fprog.filter != NULL)
+ 				err = sk_unattached_filter_create(&ppp->active_filter,
+ 								  &fprog);
+ 			else
+ 				err = 0;
  			kfree(code);
  			ppp_unlock(ppp);
  		}
@@@ -2663,8 -2669,7 +2673,8 @@@ ppp_create_interface(struct net *net, i
  	int ret = -ENOMEM;
  	int i;
  
 -	dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup);
 +	dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_UNKNOWN,
 +			   ppp_setup);
  	if (!dev)
  		goto out1;
  
diff --combined drivers/net/wan/x25_asy.c
index df6c073,fa9fdfa..5c47b01
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@@ -81,8 -81,8 +81,8 @@@ static struct x25_asy *x25_asy_alloc(vo
  		char name[IFNAMSIZ];
  		sprintf(name, "x25asy%d", i);
  
 -		dev = alloc_netdev(sizeof(struct x25_asy),
 -				   name, x25_asy_setup);
 +		dev = alloc_netdev(sizeof(struct x25_asy), name,
 +				   NET_NAME_UNKNOWN, x25_asy_setup);
  		if (!dev)
  			return NULL;
  
@@@ -122,8 -122,12 +122,12 @@@ static int x25_asy_change_mtu(struct ne
  {
  	struct x25_asy *sl = netdev_priv(dev);
  	unsigned char *xbuff, *rbuff;
- 	int len = 2 * newmtu;
+ 	int len;
  
+ 	if (newmtu > 65534)
+ 		return -EINVAL;
+ 
+ 	len = 2 * newmtu;
  	xbuff = kmalloc(len + 4, GFP_ATOMIC);
  	rbuff = kmalloc(len + 4, GFP_ATOMIC);
  
diff --combined drivers/net/xen-netback/netback.c
index 77127ca,c65b636..769e553
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@@ -1030,14 -1030,21 +1030,21 @@@ static int xenvif_tx_check_gop(struct x
  {
  	struct gnttab_map_grant_ref *gop_map = *gopp_map;
  	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ 	/* This always points to the shinfo of the skb being checked, which
+ 	 * could be either the first or the one on the frag_list
+ 	 */
  	struct skb_shared_info *shinfo = skb_shinfo(skb);
+ 	/* If this is non-NULL, we are currently checking the frag_list skb, and
+ 	 * this points to the shinfo of the first one
+ 	 */
+ 	struct skb_shared_info *first_shinfo = NULL;
  	int nr_frags = shinfo->nr_frags;
+ 	const bool sharedslot = nr_frags &&
+ 				frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
  	int i, err;
  
  	/* Check status of header. */
  	err = (*gopp_copy)->status;
- 	(*gopp_copy)++;
  	if (unlikely(err)) {
  		if (net_ratelimit())
  			netdev_dbg(queue->vif->dev,
@@@ -1045,8 -1052,12 +1052,12 @@@
  				   (*gopp_copy)->status,
  				   pending_idx,
  				   (*gopp_copy)->source.u.ref);
- 		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
+ 		/* The first frag might still have this slot mapped */
+ 		if (!sharedslot)
+ 			xenvif_idx_release(queue, pending_idx,
+ 					   XEN_NETIF_RSP_ERROR);
  	}
+ 	(*gopp_copy)++;
  
  check_frags:
  	for (i = 0; i < nr_frags; i++, gop_map++) {
@@@ -1062,8 -1073,19 +1073,19 @@@
  						pending_idx,
  						gop_map->handle);
  			/* Had a previous error? Invalidate this fragment. */
- 			if (unlikely(err))
+ 			if (unlikely(err)) {
  				xenvif_idx_unmap(queue, pending_idx);
+ 				/* If the mapping of the first frag was OK, but
+ 				 * the header's copy failed, and they are
+ 				 * sharing a slot, send an error
+ 				 */
+ 				if (i == 0 && sharedslot)
+ 					xenvif_idx_release(queue, pending_idx,
+ 							   XEN_NETIF_RSP_ERROR);
+ 				else
+ 					xenvif_idx_release(queue, pending_idx,
+ 							   XEN_NETIF_RSP_OKAY);
+ 			}
  			continue;
  		}
  
@@@ -1075,42 -1097,53 +1097,53 @@@
  				   gop_map->status,
  				   pending_idx,
  				   gop_map->ref);
+ 
  		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
  
  		/* Not the first error? Preceding frags already invalidated. */
  		if (err)
  			continue;
- 		/* First error: invalidate preceding fragments. */
+ 
+ 		/* First error: if the header haven't shared a slot with the
+ 		 * first frag, release it as well.
+ 		 */
+ 		if (!sharedslot)
+ 			xenvif_idx_release(queue,
+ 					   XENVIF_TX_CB(skb)->pending_idx,
+ 					   XEN_NETIF_RSP_OKAY);
+ 
+ 		/* Invalidate preceding fragments of this skb. */
  		for (j = 0; j < i; j++) {
  			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
  			xenvif_idx_unmap(queue, pending_idx);
+ 			xenvif_idx_release(queue, pending_idx,
+ 					   XEN_NETIF_RSP_OKAY);
+ 		}
+ 
+ 		/* And if we found the error while checking the frag_list, unmap
+ 		 * the first skb's frags
+ 		 */
+ 		if (first_shinfo) {
+ 			for (j = 0; j < first_shinfo->nr_frags; j++) {
+ 				pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
+ 				xenvif_idx_unmap(queue, pending_idx);
+ 				xenvif_idx_release(queue, pending_idx,
+ 						   XEN_NETIF_RSP_OKAY);
+ 			}
  		}
  
  		/* Remember the error: invalidate all subsequent fragments. */
  		err = newerr;
  	}
  
- 	if (skb_has_frag_list(skb)) {
- 		first_skb = skb;
- 		skb = shinfo->frag_list;
- 		shinfo = skb_shinfo(skb);
+ 	if (skb_has_frag_list(skb) && !first_shinfo) {
+ 		first_shinfo = skb_shinfo(skb);
+ 		shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
  		nr_frags = shinfo->nr_frags;
  
  		goto check_frags;
  	}
  
- 	/* There was a mapping error in the frag_list skb. We have to unmap
- 	 * the first skb's frags
- 	 */
- 	if (first_skb && err) {
- 		int j;
- 		shinfo = skb_shinfo(first_skb);
- 		for (j = 0; j < shinfo->nr_frags; j++) {
- 			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- 			xenvif_idx_unmap(queue, pending_idx);
- 		}
- 	}
- 
  	*gopp_map = gop_map;
  	return err;
  }
@@@ -1518,7 -1551,16 +1551,16 @@@ static int xenvif_tx_submit(struct xenv
  
  		/* Check the remap error code. */
  		if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
+ 			/* If there was an error, xenvif_tx_check_gop is
+ 			 * expected to release all the frags which were mapped,
+ 			 * so kfree_skb shouldn't do it again
+ 			 */
  			skb_shinfo(skb)->nr_frags = 0;
+ 			if (skb_has_frag_list(skb)) {
+ 				struct sk_buff *nskb =
+ 						skb_shinfo(skb)->frag_list;
+ 				skb_shinfo(nskb)->nr_frags = 0;
+ 			}
  			kfree_skb(skb);
  			continue;
  		}
@@@ -1822,8 -1864,6 +1864,6 @@@ void xenvif_idx_unmap(struct xenvif_que
  			   tx_unmap_op.status);
  		BUG();
  	}
- 
- 	xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
  }
  
  static inline int rx_work_todo(struct xenvif_queue *queue)
@@@ -1987,13 -2027,6 +2027,13 @@@ static int __init netback_init(void
  
  	rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
  
 +#ifdef CONFIG_DEBUG_FS
 +	xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
 +	if (IS_ERR_OR_NULL(xen_netback_dbg_root))
 +		pr_warn("Init of debugfs returned %ld!\n",
 +			PTR_ERR(xen_netback_dbg_root));
 +#endif /* CONFIG_DEBUG_FS */
 +
  	return 0;
  
  failed_init:
@@@ -2004,10 -2037,6 +2044,10 @@@ module_init(netback_init)
  
  static void __exit netback_fini(void)
  {
 +#ifdef CONFIG_DEBUG_FS
 +	if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
 +		debugfs_remove_recursive(xen_netback_dbg_root);
 +#endif /* CONFIG_DEBUG_FS */
  	xenvif_xenbus_fini();
  }
  module_exit(netback_fini);
diff --combined net/batman-adv/soft-interface.c
index d551e63,cbd677f..e0a7239
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@@ -448,10 -448,15 +448,15 @@@ out
   *  possibly free it
   * @softif_vlan: the vlan object to release
   */
- void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan)
+ void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
  {
- 	if (atomic_dec_and_test(&softif_vlan->refcount))
- 		kfree_rcu(softif_vlan, rcu);
+ 	if (atomic_dec_and_test(&vlan->refcount)) {
+ 		spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+ 		hlist_del_rcu(&vlan->list);
+ 		spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+ 
+ 		kfree_rcu(vlan, rcu);
+ 	}
  }
  
  /**
@@@ -505,6 -510,7 +510,7 @@@ int batadv_softif_create_vlan(struct ba
  	if (!vlan)
  		return -ENOMEM;
  
+ 	vlan->bat_priv = bat_priv;
  	vlan->vid = vid;
  	atomic_set(&vlan->refcount, 1);
  
@@@ -516,6 -522,10 +522,10 @@@
  		return err;
  	}
  
+ 	spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+ 	hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+ 	spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+ 
  	/* add a new TT local entry. This one will be marked with the NOPURGE
  	 * flag
  	 */
@@@ -523,10 -533,6 +533,6 @@@
  			    bat_priv->soft_iface->dev_addr, vid,
  			    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
  
- 	spin_lock_bh(&bat_priv->softif_vlan_list_lock);
- 	hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
- 	spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
- 
  	return 0;
  }
  
@@@ -538,18 -544,13 +544,13 @@@
  static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
  				       struct batadv_softif_vlan *vlan)
  {
- 	spin_lock_bh(&bat_priv->softif_vlan_list_lock);
- 	hlist_del_rcu(&vlan->list);
- 	spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
- 
- 	batadv_sysfs_del_vlan(bat_priv, vlan);
- 
  	/* explicitly remove the associated TT local entry because it is marked
  	 * with the NOPURGE flag
  	 */
  	batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
  			       vlan->vid, "vlan interface destroyed", false);
  
+ 	batadv_sysfs_del_vlan(bat_priv, vlan);
  	batadv_softif_vlan_free_ref(vlan);
  }
  
@@@ -567,6 -568,8 +568,8 @@@ static int batadv_interface_add_vid(str
  				    unsigned short vid)
  {
  	struct batadv_priv *bat_priv = netdev_priv(dev);
+ 	struct batadv_softif_vlan *vlan;
+ 	int ret;
  
  	/* only 802.1Q vlans are supported.
  	 * batman-adv does not know how to handle other types
@@@ -576,7 -579,36 +579,36 @@@
  
  	vid |= BATADV_VLAN_HAS_TAG;
  
- 	return batadv_softif_create_vlan(bat_priv, vid);
+ 	/* if a new vlan is getting created and it already exists, it means that
+ 	 * it was not deleted yet. batadv_softif_vlan_get() increases the
+ 	 * refcount in order to revive the object.
+ 	 *
+ 	 * if it does not exist then create it.
+ 	 */
+ 	vlan = batadv_softif_vlan_get(bat_priv, vid);
+ 	if (!vlan)
+ 		return batadv_softif_create_vlan(bat_priv, vid);
+ 
+ 	/* recreate the sysfs object if it was already destroyed (and it should
+ 	 * be since we received a kill_vid() for this vlan
+ 	 */
+ 	if (!vlan->kobj) {
+ 		ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
+ 		if (ret) {
+ 			batadv_softif_vlan_free_ref(vlan);
+ 			return ret;
+ 		}
+ 	}
+ 
+ 	/* add a new TT local entry. This one will be marked with the NOPURGE
+ 	 * flag. This must be added again, even if the vlan object already
+ 	 * exists, because the entry was deleted by kill_vid()
+ 	 */
+ 	batadv_tt_local_add(bat_priv->soft_iface,
+ 			    bat_priv->soft_iface->dev_addr, vid,
+ 			    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
+ 
+ 	return 0;
  }
  
  /**
@@@ -895,7 -927,7 +927,7 @@@ struct net_device *batadv_softif_create
  	int ret;
  
  	soft_iface = alloc_netdev(sizeof(struct batadv_priv), name,
 -				  batadv_softif_init_early);
 +				  NET_NAME_UNKNOWN, batadv_softif_init_early);
  	if (!soft_iface)
  		return NULL;
  
diff --combined net/core/dev.c
index e52a378,367a586..e1b7cfa
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -1085,7 -1085,6 +1085,7 @@@ static int dev_get_valid_name(struct ne
   */
  int dev_change_name(struct net_device *dev, const char *newname)
  {
 +	unsigned char old_assign_type;
  	char oldname[IFNAMSIZ];
  	int err = 0;
  	int ret;
@@@ -1113,17 -1112,10 +1113,17 @@@
  		return err;
  	}
  
 +	if (oldname[0] && !strchr(oldname, '%'))
 +		netdev_info(dev, "renamed from %s\n", oldname);
 +
 +	old_assign_type = dev->name_assign_type;
 +	dev->name_assign_type = NET_NAME_RENAMED;
 +
  rollback:
  	ret = device_rename(&dev->dev, dev->name);
  	if (ret) {
  		memcpy(dev->name, oldname, IFNAMSIZ);
 +		dev->name_assign_type = old_assign_type;
  		write_seqcount_end(&devnet_rename_seq);
  		return ret;
  	}
@@@ -1152,8 -1144,6 +1152,8 @@@
  			write_seqcount_begin(&devnet_rename_seq);
  			memcpy(dev->name, oldname, IFNAMSIZ);
  			memcpy(oldname, newname, IFNAMSIZ);
 +			dev->name_assign_type = old_assign_type;
 +			old_assign_type = NET_NAME_RENAMED;
  			goto rollback;
  		} else {
  			pr_err("%s: name change rollback failed: %d\n",
@@@ -2424,8 -2414,8 +2424,8 @@@ struct sk_buff *__skb_gso_segment(struc
  
  		skb_warn_bad_offload(skb);
  
 -		if (skb_header_cloned(skb) &&
 -		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
 +		err = skb_cow_head(skb, 0);
 +		if (err < 0)
  			return ERR_PTR(err);
  	}
  
@@@ -2755,8 -2745,8 +2755,8 @@@ static inline int __dev_xmit_skb(struc
  	/*
  	 * Heuristic to force contended enqueues to serialize on a
  	 * separate lock before trying to get qdisc main lock.
 -	 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
 -	 * and dequeue packets faster.
 +	 * This permits __QDISC___STATE_RUNNING owner to get the lock more
 +	 * often and dequeue packets faster.
  	 */
  	contended = qdisc_is_running(q);
  	if (unlikely(contended))
@@@ -4106,6 -4096,8 +4106,8 @@@ static void napi_reuse_skb(struct napi_
  	skb->vlan_tci = 0;
  	skb->dev = napi->dev;
  	skb->skb_iif = 0;
+ 	skb->encapsulation = 0;
+ 	skb_shinfo(skb)->gso_type = 0;
  	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
  
  	napi->skb = skb;
@@@ -5448,9 -5440,13 +5450,9 @@@ int __dev_change_flags(struct net_devic
  	 */
  
  	ret = 0;
 -	if ((old_flags ^ flags) & IFF_UP) {	/* Bit is different  ? */
 +	if ((old_flags ^ flags) & IFF_UP)
  		ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
  
 -		if (!ret)
 -			dev_set_rx_mode(dev);
 -	}
 -
  	if ((flags ^ dev->gflags) & IFF_PROMISC) {
  		int inc = (flags & IFF_PROMISC) ? 1 : -1;
  		unsigned int old_flags = dev->flags;
@@@ -6450,19 -6446,17 +6452,19 @@@ void netdev_freemem(struct net_device *
  
  /**
   *	alloc_netdev_mqs - allocate network device
 - *	@sizeof_priv:	size of private data to allocate space for
 - *	@name:		device name format string
 - *	@setup:		callback to initialize device
 - *	@txqs:		the number of TX subqueues to allocate
 - *	@rxqs:		the number of RX subqueues to allocate
 + *	@sizeof_priv:		size of private data to allocate space for
 + *	@name:			device name format string
 + *	@name_assign_type: 	origin of device name
 + *	@setup:			callback to initialize device
 + *	@txqs:			the number of TX subqueues to allocate
 + *	@rxqs:			the number of RX subqueues to allocate
   *
   *	Allocates a struct net_device with private data area for driver use
   *	and performs basic initialization.  Also allocates subqueue structs
   *	for each queue on the device.
   */
  struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 +		unsigned char name_assign_type,
  		void (*setup)(struct net_device *),
  		unsigned int txqs, unsigned int rxqs)
  {
@@@ -6541,7 -6535,6 +6543,7 @@@
  #endif
  
  	strcpy(dev->name, name);
 +	dev->name_assign_type = name_assign_type;
  	dev->group = INIT_NETDEV_GROUP;
  	if (!dev->ethtool_ops)
  		dev->ethtool_ops = &default_ethtool_ops;
@@@ -6953,14 -6946,12 +6955,14 @@@ static int __netdev_printk(const char *
  	if (dev && dev->dev.parent) {
  		r = dev_printk_emit(level[1] - '0',
  				    dev->dev.parent,
 -				    "%s %s %s: %pV",
 +				    "%s %s %s%s: %pV",
  				    dev_driver_string(dev->dev.parent),
  				    dev_name(dev->dev.parent),
 -				    netdev_name(dev), vaf);
 +				    netdev_name(dev), netdev_reg_state(dev),
 +				    vaf);
  	} else if (dev) {
 -		r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
 +		r = printk("%s%s%s: %pV", level, netdev_name(dev),
 +			   netdev_reg_state(dev), vaf);
  	} else {
  		r = printk("%s(NULL net_device): %pV", level, vaf);
  	}
@@@ -7112,7 -7103,7 +7114,7 @@@ static void __net_exit default_device_e
  	rtnl_lock_unregistering(net_list);
  	list_for_each_entry(net, net_list, exit_list) {
  		for_each_netdev_reverse(net, dev) {
 -			if (dev->rtnl_link_ops)
 +			if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
  				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
  			else
  				unregister_netdevice_queue(dev, &dev_kill_list);

-- 
LinuxNextTracking


More information about the linux-merge mailing list