[linux-next] LinuxNextTracking branch, master, updated. next-20211029
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit 7df621a3eea6761bc83e641aaca6963210c7290d
Merge: f2edaa4ad5d51371709196f2c258fbe875962dee 411a44c24a561e449b592ff631b7ae321f1eb559
Author: Jakub Kicinski <kuba(a)kernel.org>
Date: Thu Oct 28 10:43:58 2021 -0700
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
include/net/sock.h
7b50ecfcc6cd ("net: Rename ->stream_memory_read to ->sock_is_readable")
4c1e34c0dbff ("vsock: Enable y2038 safe timeval for timeout")
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
0daa55d033b0 ("octeontx2-af: cn10k: debugfs for dumping LMTST map table")
e77bcdd1f639 ("octeontx2-af: Display all enabled PF VF rsrc_alloc entries.")
Adjacent code addition in both cases, keep both.
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
diff --combined MAINTAINERS
index 975086c5345d,c34486524d40..3b85f039fbf9
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -2899,12 -2899,6 +2899,12 @@@ S: Maintaine
F: Documentation/hwmon/asc7621.rst
F: drivers/hwmon/asc7621.c
+ASIX AX88796C SPI ETHERNET ADAPTER
+M: ��ukasz Stelmach <l.stelmach(a)samsung.com>
+S: Maintained
+F: Documentation/devicetree/bindings/net/asix,ax88796c.yaml
+F: drivers/net/ethernet/asix/ax88796c_*
+
ASPEED PINCTRL DRIVERS
M: Andrew Jeffery <andrew(a)aj.id.au>
L: linux-aspeed(a)lists.ozlabs.org (moderated for non-subscribers)
@@@ -5464,6 -5458,19 +5464,19 @@@ F: include/net/devlink.
F: include/uapi/linux/devlink.h
F: net/core/devlink.c
+ DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT
+ M: Christoph Niedermaier <cniedermaier(a)dh-electronics.com>
+ L: kernel(a)dh-electronics.com
+ S: Maintained
+ F: arch/arm/boot/dts/imx6*-dhcom-*
+
+ DH ELECTRONICS STM32MP1 DHCOM/DHCOR BOARD SUPPORT
+ M: Marek Vasut <marex(a)denx.de>
+ L: kernel(a)dh-electronics.com
+ S: Maintained
+ F: arch/arm/boot/dts/stm32mp1*-dhcom-*
+ F: arch/arm/boot/dts/stm32mp1*-dhcor-*
+
DIALOG SEMICONDUCTOR DRIVERS
M: Support Opensource <support.opensource(a)diasemi.com>
S: Supported
@@@ -7018,6 -7025,7 +7031,6 @@@ F: drivers/net/mdio/fwnode_mdio.
F: drivers/net/mdio/of_mdio.c
F: drivers/net/pcs/
F: drivers/net/phy/
-F: drivers/of/of_net.c
F: include/dt-bindings/net/qca-ar803x.h
F: include/linux/*mdio*.h
F: include/linux/mdio/*.h
@@@ -7029,7 -7037,6 +7042,7 @@@ F: include/linux/platform_data/mdio-gpi
F: include/trace/events/mdio.h
F: include/uapi/linux/mdio.h
F: include/uapi/linux/mii.h
+F: net/core/of_net.c
EXFAT FILE SYSTEM
M: Namjae Jeon <linkinjeon(a)kernel.org>
@@@ -11284,7 -11291,6 +11297,6 @@@ F: Documentation/networking/device_driv
F: drivers/net/ethernet/marvell/octeontx2/af/
MARVELL PRESTERA ETHERNET SWITCH DRIVER
- M: Vadym Kochan <vkochan(a)marvell.com>
M: Taras Chornyi <tchornyi(a)marvell.com>
S: Supported
W: https://github.com/Marvell-switching/switchdev-prestera
@@@ -11830,9 -11836,7 +11842,9 @@@ F: drivers/mmc/host/mtk-sd.
MEDIATEK MT76 WIRELESS LAN DRIVER
M: Felix Fietkau <nbd(a)nbd.name>
M: Lorenzo Bianconi <lorenzo.bianconi83(a)gmail.com>
-R: Ryder Lee <ryder.lee(a)mediatek.com>
+M: Ryder Lee <ryder.lee(a)mediatek.com>
+R: Shayne Chen <shayne.chen(a)mediatek.com>
+R: Sean Wang <sean.wang(a)mediatek.com>
L: linux-wireless(a)vger.kernel.org
S: Maintained
F: drivers/net/wireless/mediatek/mt76/
@@@ -13056,7 -13060,6 +13068,7 @@@ F: include/linux/dsa
F: include/linux/platform_data/dsa.h
F: include/net/dsa.h
F: net/dsa/
+F: tools/testing/selftests/drivers/net/dsa/
NETWORKING [GENERAL]
M: "David S. Miller" <davem(a)davemloft.net>
@@@ -15472,7 -15475,6 +15484,7 @@@ M: ath9k-devel(a)qca.qualcomm.co
L: linux-wireless(a)vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
+F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
F: drivers/net/wireless/ath/ath9k/
QUALCOMM CAMERA SUBSYSTEM DRIVER
@@@ -15894,12 -15896,6 +15906,12 @@@ L: linux-wireless(a)vger.kernel.or
S: Maintained
F: drivers/net/wireless/realtek/rtw88/
+REALTEK WIRELESS DRIVER (rtw89)
+M: Ping-Ke Shih <pkshih(a)realtek.com>
+L: linux-wireless(a)vger.kernel.org
+S: Maintained
+F: drivers/net/wireless/realtek/rtw89/
+
REDPINE WIRELESS DRIVER
M: Amitkumar Karwar <amitkarwar(a)gmail.com>
M: Siva Rebbagondla <siva8118(a)gmail.com>
@@@ -20352,6 -20348,7 +20364,7 @@@ X86 ARCHITECTURE (32-BIT AND 64-BIT
M: Thomas Gleixner <tglx(a)linutronix.de>
M: Ingo Molnar <mingo(a)redhat.com>
M: Borislav Petkov <bp(a)alien8.de>
+ M: Dave Hansen <dave.hansen(a)linux.intel.com>
M: x86(a)kernel.org
R: "H. Peter Anvin" <hpa(a)zytor.com>
L: linux-kernel(a)vger.kernel.org
diff --combined drivers/infiniband/hw/mlx5/mr.c
index 14c5564428ab,22e2f4d79743..d2044df30394
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@@ -88,8 -88,9 +88,8 @@@ static void set_mkc_access_pd_addr_fiel
MLX5_SET64(mkc, mkc, start_addr, start_addr);
}
-static void
-assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *in)
+static void assign_mkey_variant(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mkey *mkey, u32 *in)
{
u8 key = atomic_inc_return(&dev->mkey_var);
void *mkc;
@@@ -99,22 -100,17 +99,22 @@@
mkey->key = key;
}
-static int
-mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *in, int inlen)
+static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mkey *mkey, u32 *in, int inlen)
{
+ int ret;
+
assign_mkey_variant(dev, mkey, in);
- return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen);
+ ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
+ if (!ret)
+ init_waitqueue_head(&mkey->wait);
+
+ return ret;
}
static int
mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
- struct mlx5_core_mkey *mkey,
+ struct mlx5_ib_mkey *mkey,
struct mlx5_async_ctx *async_ctx,
u32 *in, int inlen, u32 *out, int outlen,
struct mlx5_async_work *context)
@@@ -137,7 -133,7 +137,7 @@@ static int destroy_mkey(struct mlx5_ib_
{
WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
- return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+ return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
}
static void create_mkey_callback(int status, struct mlx5_async_work *context)
@@@ -264,11 -260,10 +264,11 @@@ static struct mlx5_ib_mr *create_cache_
goto free_in;
}
- err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen);
+ err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen);
if (err)
goto free_mr;
+ init_waitqueue_head(&mr->mmkey.wait);
mr->mmkey.type = MLX5_MKEY_MR;
WRITE_ONCE(ent->dev->cache.last_add, jiffies);
spin_lock_irq(&ent->lock);
@@@ -295,7 -290,7 +295,7 @@@ static void remove_cache_mr_locked(stru
ent->available_mrs--;
ent->total_mrs--;
spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey);
+ mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key);
kfree(mr);
spin_lock_irq(&ent->lock);
}
@@@ -663,7 -658,7 +663,7 @@@ static void clean_keys(struct mlx5_ib_d
ent->available_mrs--;
ent->total_mrs--;
spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+ mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
}
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
@@@ -916,13 -911,12 +916,13 @@@ static struct mlx5_cache_ent *mr_cache_
}
static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
- u64 length, int access_flags)
+ u64 length, int access_flags, u64 iova)
{
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
mr->ibmr.length = length;
mr->ibmr.device = &dev->ib_dev;
+ mr->ibmr.iova = iova;
mr->access_flags = access_flags;
}
@@@ -980,8 -974,11 +980,8 @@@ static struct mlx5_ib_mr *alloc_cacheab
mr->ibmr.pd = pd;
mr->umem = umem;
- mr->mmkey.iova = iova;
- mr->mmkey.size = umem->length;
- mr->mmkey.pd = to_mpd(pd)->pdn;
mr->page_shift = order_base_2(page_size);
- set_mr_fields(dev, mr, umem->length, access_flags);
+ set_mr_fields(dev, mr, umem->length, access_flags, iova);
return mr;
}
@@@ -1090,8 -1087,8 +1090,8 @@@ static void *mlx5_ib_create_xlt_wr(stru
wr->wr.opcode = MLX5_IB_WR_UMR;
wr->pd = mr->ibmr.pd;
wr->mkey = mr->mmkey.key;
- wr->length = mr->mmkey.size;
- wr->virt_addr = mr->mmkey.iova;
+ wr->length = mr->ibmr.length;
+ wr->virt_addr = mr->ibmr.iova;
wr->access_flags = mr->access_flags;
wr->page_shift = mr->page_shift;
wr->xlt_size = sg->length;
@@@ -1342,9 -1339,8 +1342,8 @@@ static struct mlx5_ib_mr *reg_create(st
goto err_2;
}
mr->mmkey.type = MLX5_MKEY_MR;
- mr->desc_size = sizeof(struct mlx5_mtt);
mr->umem = umem;
- set_mr_fields(dev, mr, umem->length, access_flags);
+ set_mr_fields(dev, mr, umem->length, access_flags, iova);
kvfree(in);
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
@@@ -1391,7 -1387,7 +1390,7 @@@ static struct ib_mr *mlx5_ib_get_dm_mr(
kfree(in);
- set_mr_fields(dev, mr, length, acc);
+ set_mr_fields(dev, mr, length, acc, start_addr);
return &mr->ibmr;
@@@ -1536,6 -1532,7 +1535,7 @@@ static struct ib_mr *create_user_odp_mr
ib_umem_release(&odp->umem);
return ERR_CAST(mr);
}
+ xa_init(&mr->implicit_children);
odp->private = mr;
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
@@@ -1712,6 -1709,7 +1712,6 @@@ static int umr_rereg_pd_access(struct m
return err;
mr->access_flags = access_flags;
- mr->mmkey.pd = to_mpd(pd)->pdn;
return 0;
}
@@@ -1756,6 -1754,7 +1756,6 @@@ static int umr_rereg_pas(struct mlx5_ib
if (flags & IB_MR_REREG_PD) {
mr->ibmr.pd = pd;
- mr->mmkey.pd = to_mpd(pd)->pdn;
upd_flags |= MLX5_IB_UPD_XLT_PD;
}
if (flags & IB_MR_REREG_ACCESS) {
@@@ -1764,8 -1763,8 +1764,8 @@@
}
mr->ibmr.length = new_umem->length;
- mr->mmkey.iova = iova;
- mr->mmkey.size = new_umem->length;
+ mr->ibmr.iova = iova;
+ mr->ibmr.length = new_umem->length;
mr->page_shift = order_base_2(page_size);
mr->umem = new_umem;
err = mlx5_ib_update_mr_pas(mr, upd_flags);
@@@ -1835,7 -1834,7 +1835,7 @@@ struct ib_mr *mlx5_ib_rereg_user_mr(str
mr->umem = NULL;
atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
- return create_real_mr(new_pd, umem, mr->mmkey.iova,
+ return create_real_mr(new_pd, umem, mr->ibmr.iova,
new_access_flags);
}
@@@ -2264,9 -2263,9 +2264,9 @@@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw
struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mw *mw = to_mmw(ibmw);
+ unsigned int ndescs;
u32 *in = NULL;
void *mkc;
- int ndescs;
int err;
struct mlx5_ib_alloc_mw req = {};
struct {
@@@ -2311,7 -2310,7 +2311,7 @@@
mw->mmkey.type = MLX5_MKEY_MW;
ibmw->rkey = mw->mmkey.key;
- mw->ndescs = ndescs;
+ mw->mmkey.ndescs = ndescs;
resp.response_length =
min(offsetofend(typeof(resp), response_length), udata->outlen);
@@@ -2331,7 -2330,7 +2331,7 @@@
return 0;
free_mkey:
- mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
+ mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key);
free:
kfree(in);
return err;
@@@ -2350,7 -2349,7 +2350,7 @@@ int mlx5_ib_dealloc_mw(struct ib_mw *mw
*/
mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
- return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
+ return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key);
}
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
@@@ -2407,7 -2406,7 +2407,7 @@@ mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *i
mr->meta_length = 0;
if (data_sg_nents == 1) {
n++;
- mr->ndescs = 1;
+ mr->mmkey.ndescs = 1;
if (data_sg_offset)
sg_offset = *data_sg_offset;
mr->data_length = sg_dma_len(data_sg) - sg_offset;
@@@ -2460,7 -2459,7 +2460,7 @@@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *m
if (sg_offset_p)
*sg_offset_p = sg_offset;
- mr->ndescs = i;
+ mr->mmkey.ndescs = i;
mr->data_length = mr->ibmr.length;
if (meta_sg_nents) {
@@@ -2493,11 -2492,11 +2493,11 @@@ static int mlx5_set_page(struct ib_mr *
struct mlx5_ib_mr *mr = to_mmr(ibmr);
__be64 *descs;
- if (unlikely(mr->ndescs == mr->max_descs))
+ if (unlikely(mr->mmkey.ndescs == mr->max_descs))
return -ENOMEM;
descs = mr->descs;
- descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
+ descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
return 0;
}
@@@ -2507,11 -2506,11 +2507,11 @@@ static int mlx5_set_page_pi(struct ib_m
struct mlx5_ib_mr *mr = to_mmr(ibmr);
__be64 *descs;
- if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
+ if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs))
return -ENOMEM;
descs = mr->descs;
- descs[mr->ndescs + mr->meta_ndescs++] =
+ descs[mr->mmkey.ndescs + mr->meta_ndescs++] =
cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
return 0;
@@@ -2527,7 -2526,7 +2527,7 @@@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *
struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
int n;
- pi_mr->ndescs = 0;
+ pi_mr->mmkey.ndescs = 0;
pi_mr->meta_ndescs = 0;
pi_mr->meta_length = 0;
@@@ -2561,7 -2560,7 +2561,7 @@@
* metadata offset at the first metadata page
*/
pi_mr->pi_iova = (iova & page_mask) +
- pi_mr->ndescs * ibmr->page_size +
+ pi_mr->mmkey.ndescs * ibmr->page_size +
(pi_mr->ibmr.iova & ~page_mask);
/*
* In order to use one MTT MR for data and metadata, we register
@@@ -2592,7 -2591,7 +2592,7 @@@ mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *
struct mlx5_ib_mr *pi_mr = mr->klm_mr;
int n;
- pi_mr->ndescs = 0;
+ pi_mr->mmkey.ndescs = 0;
pi_mr->meta_ndescs = 0;
pi_mr->meta_length = 0;
@@@ -2627,7 -2626,7 +2627,7 @@@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *
WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
- mr->ndescs = 0;
+ mr->mmkey.ndescs = 0;
mr->data_length = 0;
mr->data_iova = 0;
mr->meta_ndescs = 0;
@@@ -2683,7 -2682,7 +2683,7 @@@ int mlx5_ib_map_mr_sg(struct ib_mr *ibm
struct mlx5_ib_mr *mr = to_mmr(ibmr);
int n;
- mr->ndescs = 0;
+ mr->mmkey.ndescs = 0;
ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
mr->desc_size * mr->max_descs,
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 1a1bebd453d3,e54f96251fea..67364ab63a1f
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@@ -137,7 -137,7 +137,7 @@@ static struct hns3_dbg_cmd_info hns3_db
.name = "uc",
.cmd = HNAE3_DBG_CMD_MAC_UC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
.init = hns3_dbg_common_file_init,
},
{
@@@ -256,7 -256,7 +256,7 @@@
.name = "tqp",
.cmd = HNAE3_DBG_CMD_REG_TQP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
.init = hns3_dbg_common_file_init,
},
{
@@@ -298,7 -298,7 +298,7 @@@
.name = "fd_tcam",
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
.init = hns3_dbg_common_file_init,
},
{
@@@ -336,20 -336,6 +336,20 @@@
.buf_len = HNS3_DBG_READ_LEN,
.init = hns3_dbg_common_file_init,
},
+ {
+ .name = "page_pool_info",
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "coalesce_info",
+ .cmd = HNAE3_DBG_CMD_COAL_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
};
static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
@@@ -398,26 -384,6 +398,26 @@@
}
};
+static const struct hns3_dbg_item coal_info_items[] = {
+ { "VEC_ID", 2 },
+ { "ALGO_STATE", 2 },
+ { "PROFILE_ID", 2 },
+ { "CQE_MODE", 2 },
+ { "TUNE_STATE", 2 },
+ { "STEPS_LEFT", 2 },
+ { "STEPS_RIGHT", 2 },
+ { "TIRED", 2 },
+ { "SW_GL", 2 },
+ { "SW_QL", 2 },
+ { "HW_GL", 2 },
+ { "HW_QL", 2 },
+};
+
+static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" };
+static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" };
+static const char * const
+dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" };
+
static void hns3_dbg_fill_content(char *content, u16 len,
const struct hns3_dbg_item *items,
const char **result, u16 size)
@@@ -439,94 -405,6 +439,94 @@@
*pos++ = '\0';
}
+static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
+ char **result, int i, bool is_tx)
+{
+ unsigned int gl_offset, ql_offset;
+ struct hns3_enet_coalesce *coal;
+ unsigned int reg_val;
+ unsigned int j = 0;
+ struct dim *dim;
+ bool ql_enable;
+
+ if (is_tx) {
+ coal = &tqp_vector->tx_group.coal;
+ dim = &tqp_vector->tx_group.dim;
+ gl_offset = HNS3_VECTOR_GL1_OFFSET;
+ ql_offset = HNS3_VECTOR_TX_QL_OFFSET;
+ ql_enable = tqp_vector->tx_group.coal.ql_enable;
+ } else {
+ coal = &tqp_vector->rx_group.coal;
+ dim = &tqp_vector->rx_group.dim;
+ gl_offset = HNS3_VECTOR_GL0_OFFSET;
+ ql_offset = HNS3_VECTOR_RX_QL_OFFSET;
+ ql_enable = tqp_vector->rx_group.coal.ql_enable;
+ }
+
+ sprintf(result[j++], "%d", i);
+ sprintf(result[j++], "%s", dim_state_str[dim->state]);
+ sprintf(result[j++], "%u", dim->profile_ix);
+ sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
+ sprintf(result[j++], "%s",
+ dim_tune_stat_str[dim->tune_state]);
+ sprintf(result[j++], "%u", dim->steps_left);
+ sprintf(result[j++], "%u", dim->steps_right);
+ sprintf(result[j++], "%u", dim->tired);
+ sprintf(result[j++], "%u", coal->int_gl);
+ sprintf(result[j++], "%u", coal->int_ql);
+ reg_val = readl(tqp_vector->mask_addr + gl_offset) &
+ HNS3_VECTOR_GL_MASK;
+ sprintf(result[j++], "%u", reg_val);
+ if (ql_enable) {
+ reg_val = readl(tqp_vector->mask_addr + ql_offset) &
+ HNS3_VECTOR_QL_MASK;
+ sprintf(result[j++], "%u", reg_val);
+ } else {
+ sprintf(result[j++], "NA");
+ }
+}
+
+static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len,
+ int *pos, bool is_tx)
+{
+ char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(coal_info_items)];
+ struct hns3_enet_tqp_vector *tqp_vector;
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(coal_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%s interrupt coalesce info:\n",
+ is_tx ? "tx" : "rx");
+ hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
+ NULL, ARRAY_SIZE(coal_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ hns3_get_coal_info(tqp_vector, result, i, is_tx);
+ hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
+ (const char **)result,
+ ARRAY_SIZE(coal_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+}
+
+static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len)
+{
+ int pos = 0;
+
+ hns3_dump_coal_info(h, buf, len, &pos, true);
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ hns3_dump_coal_info(h, buf, len, &pos, false);
+
+ return 0;
+}
+
static const struct hns3_dbg_item tx_spare_info_items[] = {
{ "QUEUE_ID", 2 },
{ "COPYBREAK", 2 },
@@@ -584,7 -462,7 +584,7 @@@ static const struct hns3_dbg_item rx_qu
{ "TAIL", 2 },
{ "HEAD", 2 },
{ "FBDNUM", 2 },
- { "PKTNUM", 2 },
+ { "PKTNUM", 5 },
{ "COPYBREAK", 2 },
{ "RING_EN", 2 },
{ "RX_RING_EN", 2 },
@@@ -687,7 -565,7 +687,7 @@@ static const struct hns3_dbg_item tx_qu
{ "HEAD", 2 },
{ "FBDNUM", 2 },
{ "OFFSET", 2 },
- { "PKTNUM", 2 },
+ { "PKTNUM", 5 },
{ "RING_EN", 2 },
{ "TX_RING_EN", 2 },
{ "BASE_ADDR", 10 },
@@@ -912,13 -790,13 +912,13 @@@ static int hns3_dbg_rx_bd_info(struct h
}
static const struct hns3_dbg_item tx_bd_info_items[] = {
- { "BD_IDX", 5 },
- { "ADDRESS", 2 },
+ { "BD_IDX", 2 },
+ { "ADDRESS", 13 },
{ "VLAN_TAG", 2 },
{ "SIZE", 2 },
{ "T_CS_VLAN_TSO", 2 },
{ "OT_VLAN_TAG", 3 },
- { "TV", 2 },
+ { "TV", 5 },
{ "OLT_VLAN_LEN", 2 },
{ "PAYLEN_OL4CS", 2 },
{ "BD_FE_SC_VLD", 2 },
@@@ -1046,12 -924,6 +1046,12 @@@ hns3_dbg_dev_specs(struct hnae3_handle
dev_specs->max_tm_rate);
*pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
dev_specs->max_qset_num);
+ *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n",
+ dev_specs->umv_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n",
+ dev_specs->mc_mac_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
+ dev_specs->mac_stats_num);
}
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
@@@ -1065,69 -937,6 +1065,69 @@@
return 0;
}
+static const struct hns3_dbg_item page_pool_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "ALLOCATE_CNT", 2 },
+ { "FREE_CNT", 6 },
+ { "POOL_SIZE(PAGE_NUM)", 2 },
+ { "ORDER", 2 },
+ { "NUMA_ID", 2 },
+ { "MAX_LEN", 2 },
+};
+
+static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
+ char **result, u32 index)
+{
+ u32 j = 0;
+
+ sprintf(result[j++], "%u", index);
+ sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt);
+ sprintf(result[j++], "%u",
+ atomic_read(&ring->page_pool->pages_state_release_cnt));
+ sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
+ sprintf(result[j++], "%u", ring->page_pool->p.order);
+ sprintf(result[j++], "%d", ring->page_pool->p.nid);
+ sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024);
+}
+
+static int
+hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(page_pool_info_items)];
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ int pos = 0;
+ u32 i;
+
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items,
+ NULL, ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+ ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
+ hns3_dump_page_pool_info(ring, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ page_pool_info_items,
+ (const char **)result,
+ ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
{
u32 i;
@@@ -1169,14 -978,6 +1169,14 @@@ static const struct hns3_dbg_func hns3_
.cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
.dbg_dump = hns3_dbg_tx_queue_info,
},
+ {
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dbg_dump = hns3_dbg_page_pool_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_COAL_INFO,
+ .dbg_dump = hns3_dbg_coal_info,
+ },
};
static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index f0aa4fbd2200,9cda8b3562b8..4e0a8c2f7c05
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@@ -391,7 -391,7 +391,7 @@@ static int hclge_dbg_dump_mac(struct hc
static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u16 qset_id, qset_num;
int ret;
@@@ -408,12 -408,12 +408,12 @@@
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%04u %#x %#x %#x %#x\n",
- qset_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2, bitmap->bit3);
+ qset_id, req.bit0, req.bit1, req.bit2,
+ req.bit3);
}
return 0;
@@@ -422,7 -422,7 +422,7 @@@
static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pri_id, pri_num;
int ret;
@@@ -439,12 -439,11 +439,11 @@@
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%03u %#x %#x %#x\n",
- pri_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2);
+ pri_id, req.bit0, req.bit1, req.bit2);
}
return 0;
@@@ -453,7 -452,7 +452,7 @@@
static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pg_id;
int ret;
@@@ -466,12 -465,11 +465,11 @@@
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%03u %#x %#x %#x\n",
- pg_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2);
+ pg_id, req.bit0, req.bit1, req.bit2);
}
return 0;
@@@ -511,7 -509,7 +509,7 @@@ static int hclge_dbg_dump_dcb_queue(str
static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 port_id = 0;
int ret;
@@@ -521,12 -519,12 +519,12 @@@
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
- bitmap->bit0);
+ req.bit0);
*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
- bitmap->bit1);
+ req.bit1);
return 0;
}
@@@ -1992,9 -1990,6 +1990,9 @@@ static int hclge_dbg_dump_umv_info(stru
}
mutex_unlock(&hdev->vport_lock);
+ pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
+ hdev->used_mc_mac_num);
+
return 0;
}
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index f1db6699f81f,d891390d492f..2e41aa2d1df8
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@@ -156,210 -156,174 +156,210 @@@ static const char hns3_nic_test_strs[][
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
- {"mac_tx_mac_pause_num",
+ {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
- {"mac_rx_mac_pause_num",
+ {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
- {"mac_tx_control_pkt_num",
+ {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
+ {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
+ {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
- {"mac_rx_control_pkt_num",
+ {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
- {"mac_tx_pfc_pkt_num",
+ {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
- {"mac_tx_pfc_pri0_pkt_num",
+ {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
- {"mac_tx_pfc_pri1_pkt_num",
+ {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
- {"mac_tx_pfc_pri2_pkt_num",
+ {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
- {"mac_tx_pfc_pri3_pkt_num",
+ {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
- {"mac_tx_pfc_pri4_pkt_num",
+ {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
- {"mac_tx_pfc_pri5_pkt_num",
+ {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
- {"mac_tx_pfc_pri6_pkt_num",
+ {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
- {"mac_tx_pfc_pri7_pkt_num",
+ {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
- {"mac_rx_pfc_pkt_num",
+ {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
+ {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
+ {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
+ {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
+ {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
+ {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
+ {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
+ {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
+ {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
- {"mac_rx_pfc_pri0_pkt_num",
+ {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
- {"mac_rx_pfc_pri1_pkt_num",
+ {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
- {"mac_rx_pfc_pri2_pkt_num",
+ {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
- {"mac_rx_pfc_pri3_pkt_num",
+ {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
- {"mac_rx_pfc_pri4_pkt_num",
+ {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
- {"mac_rx_pfc_pri5_pkt_num",
+ {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
- {"mac_rx_pfc_pri6_pkt_num",
+ {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
- {"mac_rx_pfc_pri7_pkt_num",
+ {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
- {"mac_tx_total_pkt_num",
+ {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
+ {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
+ {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
+ {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
+ {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
+ {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
+ {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
+ {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
+ {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
- {"mac_tx_total_oct_num",
+ {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
- {"mac_tx_good_pkt_num",
+ {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
- {"mac_tx_bad_pkt_num",
+ {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
- {"mac_tx_good_oct_num",
+ {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
- {"mac_tx_bad_oct_num",
+ {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
- {"mac_tx_uni_pkt_num",
+ {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
- {"mac_tx_multi_pkt_num",
+ {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
- {"mac_tx_broad_pkt_num",
+ {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
- {"mac_tx_undersize_pkt_num",
+ {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
- {"mac_tx_oversize_pkt_num",
+ {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
- {"mac_tx_64_oct_pkt_num",
+ {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
- {"mac_tx_65_127_oct_pkt_num",
+ {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
- {"mac_tx_128_255_oct_pkt_num",
+ {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
- {"mac_tx_256_511_oct_pkt_num",
+ {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
- {"mac_tx_512_1023_oct_pkt_num",
+ {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
- {"mac_tx_1024_1518_oct_pkt_num",
+ {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
- {"mac_tx_1519_2047_oct_pkt_num",
+ {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
- {"mac_tx_2048_4095_oct_pkt_num",
+ {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
- {"mac_tx_4096_8191_oct_pkt_num",
+ {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
- {"mac_tx_8192_9216_oct_pkt_num",
+ {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
- {"mac_tx_9217_12287_oct_pkt_num",
+ {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
- {"mac_tx_12288_16383_oct_pkt_num",
+ {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
- {"mac_tx_1519_max_good_pkt_num",
+ {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
- {"mac_tx_1519_max_bad_pkt_num",
+ {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
- {"mac_rx_total_pkt_num",
+ {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
- {"mac_rx_total_oct_num",
+ {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
- {"mac_rx_good_pkt_num",
+ {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
- {"mac_rx_bad_pkt_num",
+ {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
- {"mac_rx_good_oct_num",
+ {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
- {"mac_rx_bad_oct_num",
+ {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
- {"mac_rx_uni_pkt_num",
+ {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
- {"mac_rx_multi_pkt_num",
+ {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
- {"mac_rx_broad_pkt_num",
+ {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
- {"mac_rx_undersize_pkt_num",
+ {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
- {"mac_rx_oversize_pkt_num",
+ {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
- {"mac_rx_64_oct_pkt_num",
+ {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
- {"mac_rx_65_127_oct_pkt_num",
+ {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
- {"mac_rx_128_255_oct_pkt_num",
+ {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
- {"mac_rx_256_511_oct_pkt_num",
+ {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
- {"mac_rx_512_1023_oct_pkt_num",
+ {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
- {"mac_rx_1024_1518_oct_pkt_num",
+ {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
- {"mac_rx_1519_2047_oct_pkt_num",
+ {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
- {"mac_rx_2048_4095_oct_pkt_num",
+ {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
- {"mac_rx_4096_8191_oct_pkt_num",
+ {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
- {"mac_rx_8192_9216_oct_pkt_num",
+ {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
- {"mac_rx_9217_12287_oct_pkt_num",
+ {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
- {"mac_rx_12288_16383_oct_pkt_num",
+ {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
- {"mac_rx_1519_max_good_pkt_num",
+ {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
- {"mac_rx_1519_max_bad_pkt_num",
+ {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
- {"mac_tx_fragment_pkt_num",
+ {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
- {"mac_tx_undermin_pkt_num",
+ {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
- {"mac_tx_jabber_pkt_num",
+ {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
- {"mac_tx_err_all_pkt_num",
+ {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
- {"mac_tx_from_app_good_pkt_num",
+ {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
- {"mac_tx_from_app_bad_pkt_num",
+ {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
- {"mac_rx_fragment_pkt_num",
+ {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
- {"mac_rx_undermin_pkt_num",
+ {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
- {"mac_rx_jabber_pkt_num",
+ {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
- {"mac_rx_fcs_err_pkt_num",
+ {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
- {"mac_rx_send_app_good_pkt_num",
+ {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
- {"mac_rx_send_app_bad_pkt_num",
+ {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
};
@@@ -487,9 -451,8 +487,9 @@@ static int hclge_mac_update_stats_defec
u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
__le64 *desc_data;
- int i, k, n;
+ u32 data_size;
int ret;
+ u32 i;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
@@@ -500,37 -463,33 +500,37 @@@
return ret;
}
- for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
- /* for special opcode 0032, only the first desc has the head */
- if (unlikely(i == 0)) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_RD_FIRST_STATS_NUM;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_RD_OTHER_STATS_NUM;
- }
+ /* The first desc has a 64-bit header, so data size need to minus 1 */
+ data_size = sizeof(desc) / (sizeof(u64)) - 1;
- for (k = 0; k < n; k++) {
- *data += le64_to_cpu(*desc_data);
- data++;
- desc_data++;
- }
+ desc_data = (__le64 *)(&desc[0].data[0]);
+ for (i = 0; i < data_size; i++) {
+ /* data memory is continuous becase only the first desc has a
+ * header in this command
+ */
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
}
return 0;
}
-static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
+static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
{
+#define HCLGE_REG_NUM_PER_DESC 4
+
+ u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc *desc;
__le64 *desc_data;
- u16 i, k, n;
+ u32 data_size;
+ u32 desc_num;
int ret;
+ u32 i;
+
+ /* The first desc has a 64-bit header, so need to consider it */
+ desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections,
* so GFP_ATOMIC is more suitalbe here
@@@ -546,16 -505,21 +546,16 @@@
return ret;
}
- for (i = 0; i < desc_num; i++) {
- /* for special opcode 0034, only the first desc has the head */
- if (i == 0) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_RD_FIRST_STATS_NUM;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_RD_OTHER_STATS_NUM;
- }
+ data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
- for (k = 0; k < n; k++) {
- *data += le64_to_cpu(*desc_data);
- data++;
- desc_data++;
- }
+ desc_data = (__le64 *)(&desc[0].data[0]);
+ for (i = 0; i < data_size; i++) {
+ /* data memory is continuous becase only the first desc has a
+ * header in this command
+ */
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
}
kfree(desc);
@@@ -563,37 -527,42 +563,37 @@@
return 0;
}
-static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
+static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
{
struct hclge_desc desc;
- __le32 *desc_data;
- u32 reg_num;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query mac statistic reg number, ret = %d\n",
+ ret);
return ret;
+ }
- desc_data = (__le32 *)(&desc.data[0]);
- reg_num = le32_to_cpu(*desc_data);
-
- *desc_num = 1 + ((reg_num - 3) >> 2) +
- (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
+ *reg_num = le32_to_cpu(desc.data[0]);
+ if (*reg_num == 0) {
+ dev_err(&hdev->pdev->dev,
+ "mac statistic reg number is invalid!\n");
+ return -ENODATA;
+ }
return 0;
}
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
- u32 desc_num;
- int ret;
-
- ret = hclge_mac_query_reg_num(hdev, &desc_num);
/* The firmware supports the new statistics acquisition method */
- if (!ret)
- ret = hclge_mac_update_stats_complete(hdev, desc_num);
- else if (ret == -EOPNOTSUPP)
- ret = hclge_mac_update_stats_defective(hdev);
+ if (hdev->ae_dev->dev_specs.mac_stats_num)
+ return hclge_mac_update_stats_complete(hdev);
else
- dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
-
- return ret;
+ return hclge_mac_update_stats_defective(hdev);
}
static int hclge_tqps_update_stats(struct hnae3_handle *handle)
@@@ -701,39 -670,20 +701,39 @@@ static u8 *hclge_tqps_get_strings(struc
return buff;
}
-static u64 *hclge_comm_get_stats(const void *comm_stats,
+static int hclge_comm_get_count(struct hclge_dev *hdev,
+ const struct hclge_comm_stats_str strs[],
+ u32 size)
+{
+ int count = 0;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
+ count++;
+
+ return count;
+}
+
+static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
const struct hclge_comm_stats_str strs[],
int size, u64 *data)
{
u64 *buf = data;
u32 i;
- for (i = 0; i < size; i++)
- buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
+ for (i = 0; i < size; i++) {
+ if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
+ continue;
+
+ *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
+ buf++;
+ }
- return buf + size;
+ return buf;
}
-static u8 *hclge_comm_get_strings(u32 stringset,
+static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
const struct hclge_comm_stats_str strs[],
int size, u8 *data)
{
@@@ -744,9 -694,6 +744,9 @@@
return buff;
for (i = 0; i < size; i++) {
+ if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
+ continue;
+
snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
buff = buff + ETH_GSTRING_LEN;
}
@@@ -838,8 -785,7 +838,8 @@@ static int hclge_get_sset_count(struct
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
} else if (stringset == ETH_SS_STATS) {
- count = ARRAY_SIZE(g_mac_stats_string) +
+ count = hclge_comm_get_count(hdev, g_mac_stats_string,
+ ARRAY_SIZE(g_mac_stats_string)) +
hclge_tqps_get_sset_count(handle, stringset);
}
@@@ -849,14 -795,12 +849,14 @@@
static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
u8 *data)
{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
u8 *p = (char *)data;
int size;
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
- p = hclge_comm_get_strings(stringset, g_mac_stats_string,
+ p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
size, p);
p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
@@@ -890,7 -834,7 +890,7 @@@ static void hclge_get_stats(struct hnae
struct hclge_dev *hdev = vport->back;
u64 *p;
- p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
+ p = hclge_comm_get_stats(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
@@@ -1093,100 -1037,96 +1093,100 @@@ static int hclge_check_port_speed(struc
return -EINVAL;
}
-static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
+static void hclge_convert_setting_sr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
}
-static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
+static void hclge_convert_setting_lr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
- mac->supported);
+ link_mode);
}
-static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
+static void hclge_convert_setting_cr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
}
-static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
+static void hclge_convert_setting_kr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
}
static void hclge_convert_setting_fec(struct hclge_mac *mac)
@@@ -1230,9 -1170,9 +1230,9 @@@ static void hclge_parse_fiber_link_mode
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
mac->supported);
- hclge_convert_setting_sr(mac, speed_ability);
- hclge_convert_setting_lr(mac, speed_ability);
- hclge_convert_setting_cr(mac, speed_ability);
+ hclge_convert_setting_sr(speed_ability, mac->supported);
+ hclge_convert_setting_lr(speed_ability, mac->supported);
+ hclge_convert_setting_cr(speed_ability, mac->supported);
if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
@@@ -1248,7 -1188,7 +1248,7 @@@ static void hclge_parse_backplane_link_
{
struct hclge_mac *mac = &hdev->hw.mac;
- hclge_convert_setting_kr(mac, speed_ability);
+ hclge_convert_setting_kr(speed_ability, mac->supported);
if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
@@@ -1402,6 -1342,8 +1402,6 @@@ static void hclge_parse_cfg(struct hclg
cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_UMV_TBL_SPACE_M,
HCLGE_CFG_UMV_TBL_SPACE_S);
- if (!cfg->umv_space)
- cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
HCLGE_CFG_PF_RSS_SIZE_M,
@@@ -1477,7 -1419,6 +1477,7 @@@ static void hclge_set_default_dev_specs
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
+ ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@@ -1499,8 -1440,6 +1499,8 @@@
ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
+ ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
+ ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@@ -1521,21 -1460,6 +1521,21 @@@
dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
if (!dev_specs->max_frm_size)
dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
+ if (!dev_specs->umv_size)
+ dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+}
+
+static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
+{
+ u32 reg_num = 0;
+ int ret;
+
+ ret = hclge_mac_query_reg_num(hdev, ®_num);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
+ return 0;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
@@@ -1544,10 -1468,6 +1544,10 @@@
int ret;
int i;
+ ret = hclge_query_mac_stats_num(hdev);
+ if (ret)
+ return ret;
+
/* set default specifications as devices lower than version V3 do not
* support querying specifications from firmware.
*/
@@@ -1629,10 -1549,7 +1629,10 @@@ static int hclge_configure(struct hclge
hdev->tm_info.num_pg = 1;
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
- hdev->wanted_umv_size = cfg.umv_space;
+ if (cfg.umv_space)
+ hdev->wanted_umv_size = cfg.umv_space;
+ else
+ hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
hdev->gro_en = true;
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
@@@ -2930,33 -2847,29 +2930,29 @@@ static void hclge_mbx_task_schedule(str
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task,
- delay_time);
+ mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
@@@ -3051,82 -2964,6 +3047,82 @@@ static void hclge_update_link_status(st
clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
}
+static void hclge_update_speed_advertising(struct hclge_mac *mac)
+{
+ u32 speed_ability;
+
+ if (hclge_get_speed_bit(mac->speed, &speed_ability))
+ return;
+
+ switch (mac->module_type) {
+ case HNAE3_MODULE_TYPE_FIBRE_LR:
+ hclge_convert_setting_lr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_FIBRE_SR:
+ case HNAE3_MODULE_TYPE_AOC:
+ hclge_convert_setting_sr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_CR:
+ hclge_convert_setting_cr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_KR:
+ hclge_convert_setting_kr(speed_ability, mac->advertising);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_update_fec_advertising(struct hclge_mac *mac)
+{
+ if (mac->fec_mode & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->advertising);
+ else
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->advertising);
+}
+
+static void hclge_update_pause_advertising(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ bool rx_en, tx_en;
+
+ switch (hdev->fc_mode_last_time) {
+ case HCLGE_FC_RX_PAUSE:
+ rx_en = true;
+ tx_en = false;
+ break;
+ case HCLGE_FC_TX_PAUSE:
+ rx_en = false;
+ tx_en = true;
+ break;
+ case HCLGE_FC_FULL:
+ rx_en = true;
+ tx_en = true;
+ break;
+ default:
+ rx_en = false;
+ tx_en = false;
+ break;
+ }
+
+ linkmode_set_pause(mac->advertising, tx_en, rx_en);
+}
+
+static void hclge_update_advertising(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ linkmode_zero(mac->advertising);
+ hclge_update_speed_advertising(mac);
+ hclge_update_fec_advertising(mac);
+ hclge_update_pause_advertising(hdev);
+}
+
static void hclge_update_port_capability(struct hclge_dev *hdev,
struct hclge_mac *mac)
{
@@@ -3149,7 -2986,7 +3145,7 @@@
} else {
linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
mac->supported);
- linkmode_zero(mac->advertising);
+ hclge_update_advertising(hdev);
}
}
@@@ -3650,33 -3487,14 +3646,14 @@@ static void hclge_get_misc_vector(struc
hdev->num_msi_used += 1;
}
- static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
- {
- struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
- affinity_notify);
-
- cpumask_copy(&hdev->affinity_mask, mask);
- }
-
- static void hclge_irq_affinity_release(struct kref *ref)
- {
- }
-
static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
{
irq_set_affinity_hint(hdev->misc_vector.vector_irq,
&hdev->affinity_mask);
-
- hdev->affinity_notify.notify = hclge_irq_affinity_notify;
- hdev->affinity_notify.release = hclge_irq_affinity_release;
- irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
- &hdev->affinity_notify);
}
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
{
- irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
}
@@@ -8657,9 -8475,6 +8634,9 @@@ static int hclge_init_umv_space(struct
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ if (hdev->ae_dev->dev_specs.mc_mac_size)
+ set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
+
return 0;
}
@@@ -8677,8 -8492,6 +8654,8 @@@ static void hclge_reset_umv_space(struc
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_alloc_vport + 1);
mutex_unlock(&hdev->vport_lock);
+
+ hdev->used_mc_mac_num = 0;
}
static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
@@@ -8933,7 -8746,6 +8910,7 @@@ int hclge_add_mc_addr_common(struct hcl
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
+ bool is_new_addr = false;
int status;
/* mac addr check */
@@@ -8947,13 -8759,6 +8924,13 @@@
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (status) {
+ if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
+ hdev->used_mc_mac_num >=
+ hdev->ae_dev->dev_specs.mc_mac_size)
+ goto err_no_space;
+
+ is_new_addr = true;
+
/* This mac addr do not exist, add new entry for it */
memset(desc[0].data, 0, sizeof(desc[0].data));
memset(desc[1].data, 0, sizeof(desc[0].data));
@@@ -8963,18 -8768,12 +8940,18 @@@
if (status)
return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- /* if already overflow, not to print each time */
- if (status == -ENOSPC &&
- !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
- dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ if (status == -ENOSPC)
+ goto err_no_space;
+ else if (!status && is_new_addr)
+ hdev->used_mc_mac_num++;
return status;
+
+err_no_space:
+ /* if already overflow, not to print each time */
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ return -ENOSPC;
}
static int hclge_rm_mc_addr(struct hnae3_handle *handle,
@@@ -9011,15 -8810,12 +8988,15 @@@ int hclge_rm_mc_addr_common(struct hclg
if (status)
return status;
- if (hclge_is_all_function_id_zero(desc))
+ if (hclge_is_all_function_id_zero(desc)) {
/* All the vfid is zero, so need to delete this entry */
status = hclge_remove_mac_vlan_tbl(vport, &req);
- else
+ if (!status)
+ hdev->used_mc_mac_num--;
+ } else {
/* Not all the vfid is zero, update the vfid */
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
+ }
} else if (status == -ENOENT) {
status = 0;
}
@@@ -9595,7 -9391,7 +9572,7 @@@ int hclge_update_mac_node_for_dev_addr(
return 0;
}
-static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
+static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
const unsigned char *new_addr = (const unsigned char *)p;
@@@ -13233,7 -13029,7 +13210,7 @@@ static int hclge_init(void
{
pr_info("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 4f8403af84be,69cd8f87b4c8..9e1eede599ec
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@@ -403,13 -403,8 +403,13 @@@ struct hclge_tm_info
u8 pfc_en; /* PFC enabled or not for user priority */
};
+/* max number of mac statistics on each version */
+#define HCLGE_MAC_STATS_MAX_NUM_V1 84
+#define HCLGE_MAC_STATS_MAX_NUM_V2 105
+
struct hclge_comm_stats_str {
char desc[ETH_GSTRING_LEN];
+ u32 stats_num;
unsigned long offset;
};
@@@ -417,7 -412,6 +417,7 @@@
struct hclge_mac_stats {
u64 mac_tx_mac_pause_num;
u64 mac_rx_mac_pause_num;
+ u64 rsv0;
u64 mac_tx_pfc_pri0_pkt_num;
u64 mac_tx_pfc_pri1_pkt_num;
u64 mac_tx_pfc_pri2_pkt_num;
@@@ -454,7 -448,7 +454,7 @@@
u64 mac_tx_1519_2047_oct_pkt_num;
u64 mac_tx_2048_4095_oct_pkt_num;
u64 mac_tx_4096_8191_oct_pkt_num;
- u64 rsv0;
+ u64 rsv1;
u64 mac_tx_8192_9216_oct_pkt_num;
u64 mac_tx_9217_12287_oct_pkt_num;
u64 mac_tx_12288_16383_oct_pkt_num;
@@@ -481,7 -475,7 +481,7 @@@
u64 mac_rx_1519_2047_oct_pkt_num;
u64 mac_rx_2048_4095_oct_pkt_num;
u64 mac_rx_4096_8191_oct_pkt_num;
- u64 rsv1;
+ u64 rsv2;
u64 mac_rx_8192_9216_oct_pkt_num;
u64 mac_rx_9217_12287_oct_pkt_num;
u64 mac_rx_12288_16383_oct_pkt_num;
@@@ -504,28 -498,6 +504,28 @@@
u64 mac_rx_pfc_pause_pkt_num;
u64 mac_tx_ctrl_pkt_num;
u64 mac_rx_ctrl_pkt_num;
+
+ /* duration of pfc */
+ u64 mac_tx_pfc_pri0_xoff_time;
+ u64 mac_tx_pfc_pri1_xoff_time;
+ u64 mac_tx_pfc_pri2_xoff_time;
+ u64 mac_tx_pfc_pri3_xoff_time;
+ u64 mac_tx_pfc_pri4_xoff_time;
+ u64 mac_tx_pfc_pri5_xoff_time;
+ u64 mac_tx_pfc_pri6_xoff_time;
+ u64 mac_tx_pfc_pri7_xoff_time;
+ u64 mac_rx_pfc_pri0_xoff_time;
+ u64 mac_rx_pfc_pri1_xoff_time;
+ u64 mac_rx_pfc_pri2_xoff_time;
+ u64 mac_rx_pfc_pri3_xoff_time;
+ u64 mac_rx_pfc_pri4_xoff_time;
+ u64 mac_rx_pfc_pri5_xoff_time;
+ u64 mac_rx_pfc_pri6_xoff_time;
+ u64 mac_rx_pfc_pri7_xoff_time;
+
+ /* duration of pause */
+ u64 mac_tx_pause_xoff_time;
+ u64 mac_rx_pause_xoff_time;
};
#define HCLGE_STATS_TIMER_INTERVAL 300UL
@@@ -966,15 -938,12 +966,14 @@@ struct hclge_dev
u16 priv_umv_size;
/* unicast mac vlan space shared by PF and its VFs */
u16 share_umv_size;
+ /* multicast mac address number used by PF and its VFs */
+ u16 used_mc_mac_num;
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
/* affinity mask and notify for misc interrupt */
cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
struct hclge_ptp *ptp;
struct devlink *devlink;
};
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 3306050ad72c,cf00ad7bb881..645b2c0011e6
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@@ -1349,7 -1349,7 +1349,7 @@@ static void hclgevf_get_mac_addr(struc
ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
-static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@@ -2232,6 -2232,7 +2232,7 @@@ static void hclgevf_get_misc_vector(str
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
!test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
&hdev->state))
mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
@@@ -3449,6 -3450,8 +3450,8 @@@ static int hclgevf_init_hdev(struct hcl
hclgevf_init_rxd_adv_layout(hdev);
+ set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
+
hdev->last_reset_time = jiffies;
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
HCLGEVF_DRIVER_NAME);
@@@ -3899,7 -3902,7 +3902,7 @@@ static int hclgevf_init(void
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
- hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
+ hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
if (!hclgevf_wq) {
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
return -ENOMEM;
diff --combined drivers/net/ethernet/intel/ice/ice_ptp.c
index a1be0d04a2d0,d1ef3d48a4b0..bf7247c6f58e
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@@ -6,252 -6,6 +6,252 @@@
#define E810_OUT_PROP_DELAY_NS 1
+static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
+ /* name idx func chan */
+ { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
+ { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
+ { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
+ { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
+ { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
+};
+
+/**
+ * ice_get_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Read the configuration of the SMA control logic and put it into the
+ * ptp_pin_desc structure
+ */
+static int
+ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
+{
+ u8 data, i;
+ int status;
+
+ /* Read initial pin state */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* initialize with defaults */
+ for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
+ snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
+ "%s", ice_pin_desc_e810t[i].name);
+ ptp_pins[i].index = ice_pin_desc_e810t[i].index;
+ ptp_pins[i].func = ice_pin_desc_e810t[i].func;
+ ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
+ }
+
+ /* Parse SMA1/UFL1 */
+ switch (data & ICE_SMA1_MASK_E810T) {
+ case ICE_SMA1_MASK_E810T:
+ default:
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_DIR_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_PEROUT;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_TX_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case 0:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_PEROUT;
+ break;
+ }
+
+ /* Parse SMA2/UFL2 */
+ switch (data & ICE_SMA2_MASK_E810T) {
+ case ICE_SMA2_MASK_E810T:
+ default:
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_EXTTS;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ case ICE_SMA2_DIR_EN_E810T:
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_set_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Set the configuration of the SMA control logic based on the configuration in
+ * num_pins parameter
+ */
+static int
+ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
+ const struct ptp_pin_desc *ptp_pins)
+{
+ int status;
+ u8 data;
+
+ /* SMA1 and UFL1 cannot be set to TX at the same time */
+ if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT)
+ return -EINVAL;
+
+ /* SMA2 and UFL2 cannot be set to RX at the same time */
+ if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS)
+ return -EINVAL;
+
+ /* Read initial pin state value */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* Set the right sate based on the desired configuration */
+ data &= ~ICE_SMA1_MASK_E810T;
+ if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
+ data |= ICE_SMA1_MASK_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX");
+ data |= ICE_SMA1_TX_EN_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ /* U.FL 1 TX will always enable SMA 1 RX */
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 TX");
+ data |= ICE_SMA1_DIR_EN_E810T;
+ }
+
+ data &= ~ICE_SMA2_MASK_E810T;
+ if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
+ data |= ICE_SMA2_MASK_E810T;
+ } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 RX");
+ data |= (ICE_SMA2_TX_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "UFL2 RX");
+ data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX");
+ data |= (ICE_SMA2_DIR_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
+ data |= ICE_SMA2_DIR_EN_E810T;
+ }
+
+ return ice_write_sma_ctrl_e810t(hw, data);
+}
+
+/**
+ * ice_ptp_set_sma_e810t
+ * @info: the driver's PTP info structure
+ * @pin: pin index in kernel structure
+ * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
+ *
+ * Set the configuration of a single SMA pin
+ */
+static int
+ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func)
+{
+ struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (pin < SMA1 || func > PTP_PF_PEROUT)
+ return -EOPNOTSUPP;
+
+ err = ice_get_sma_config_e810t(hw, ptp_pins);
+ if (err)
+ return err;
+
+ /* Disable the same function on the other pin sharing the channel */
+ if (pin == SMA1 && ptp_pins[UFL1].func == func)
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ if (pin == UFL1 && ptp_pins[SMA1].func == func)
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+
+ if (pin == SMA2 && ptp_pins[UFL2].func == func)
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ if (pin == UFL2 && ptp_pins[SMA2].func == func)
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+
+ /* Set up new pin function in the temp table */
+ ptp_pins[pin].func = func;
+
+ return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
+}
+
+/**
+ * ice_verify_pin_e810t
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Verify if pin supports requested pin function. If the Check pins consistency.
+ * Reconfigure the SMA logic attached to the given pin to enable its
+ * desired functionality
+ */
+static int
+ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ /* Don't allow channel reassignment */
+ if (chan != ice_pin_desc_e810t[pin].chan)
+ return -EOPNOTSUPP;
+
+ /* Check if functions are properly assigned */
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ if (pin == UFL1)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PEROUT:
+ if (pin == UFL2 || pin == GNSS)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PHYSYNC:
+ return -EOPNOTSUPP;
+ }
+
+ return ice_ptp_set_sma_e810t(info, pin, func);
+}
+
/**
* ice_set_tx_tstamp - Enable or disable Tx timestamping
* @pf: The PF pointer to search in
@@@ -981,34 -735,17 +981,34 @@@ ice_ptp_gpio_enable_e810(struct ptp_clo
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct ice_perout_channel clk_cfg = {0};
+ bool sma_pres = false;
unsigned int chan;
u32 gpio_pin;
int err;
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
+ sma_pres = true;
+
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
chan = rq->perout.index;
- if (chan == PPS_CLK_GEN_CHAN)
+ if (sma_pres) {
+ if (chan == ice_pin_desc_e810t[SMA1].chan)
+ clk_cfg.gpio_pin = GPIO_20;
+ else if (chan == ice_pin_desc_e810t[SMA2].chan)
+ clk_cfg.gpio_pin = GPIO_22;
+ else
+ return -1;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ clk_cfg.gpio_pin = GPIO_20;
+ else
+ clk_cfg.gpio_pin = GPIO_22;
+ } else if (chan == PPS_CLK_GEN_CHAN) {
clk_cfg.gpio_pin = PPS_PIN_INDEX;
- else
+ } else {
clk_cfg.gpio_pin = chan;
+ }
clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
rq->perout.period.nsec);
@@@ -1020,19 -757,7 +1020,19 @@@
break;
case PTP_CLK_REQ_EXTTS:
chan = rq->extts.index;
- gpio_pin = chan;
+ if (sma_pres) {
+ if (chan < ice_pin_desc_e810t[SMA2].chan)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else {
+ gpio_pin = chan;
+ }
err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
rq->extts.flags);
@@@ -1287,7 -1012,7 +1287,7 @@@ int ice_ptp_set_ts_config(struct ice_p
* The timestamp is in ns, so we must convert the result first.
*/
void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
u32 ts_high;
@@@ -1312,94 -1037,14 +1312,94 @@@
}
}
+/**
+ * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Disable the OS access to the SMA pins. Called to clear out the OS
+ * indications of pin support when we fail to setup the E810-T SMA control
+ * register.
+ */
+static void
+ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
+
+ info->enable = NULL;
+ info->verify = NULL;
+ info->n_pins = 0;
+ info->n_ext_ts = 0;
+ info->n_per_out = 0;
+}
+
+/**
+ * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Finish setting up the SMA pins by allocating pin_config, and setting it up
+ * according to the current status of the SMA. On failure, disable all of the
+ * extended SMA pin support.
+ */
+static void
+ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ /* Allocate memory for kernel pins interface */
+ info->pin_config = devm_kcalloc(dev, info->n_pins,
+ sizeof(*info->pin_config), GFP_KERNEL);
+ if (!info->pin_config) {
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+ return;
+ }
+
+ /* Read current SMA status */
+ err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
+ if (err)
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+}
+
+/**
+ * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
+ * @pf: pointer to the PF instance
+ * @info: PTP clock capabilities
+ */
+static void
+ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ /* Check if SMA controller is in the netlist */
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
+ !ice_is_pca9575_present(&pf->hw))
+ ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
+
+ if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
+ info->n_per_out = N_PER_OUT_E810T_NO_SMA;
+ return;
+ }
+
+ info->n_per_out = N_PER_OUT_E810T;
+ info->n_ext_ts = N_EXT_TS_E810;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
+
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
+}
+
/**
* ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
* @info: PTP clock capabilities
*/
static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
{
- info->n_per_out = E810_N_PER_OUT;
- info->n_ext_ts = E810_N_EXT_TS;
+ info->n_per_out = N_PER_OUT_E810;
+ info->n_ext_ts = N_EXT_TS_E810;
}
/**
@@@ -1417,10 -1062,7 +1417,10 @@@ ice_ptp_set_funcs_e810(struct ice_pf *p
{
info->enable = ice_ptp_gpio_enable_e810;
- ice_ptp_setup_pins_e810(info);
+ if (ice_is_e810t(&pf->hw))
+ ice_ptp_setup_pins_e810t(pf, info);
+ else
+ ice_ptp_setup_pins_e810(info);
}
/**
@@@ -1929,6 -1571,9 +1929,9 @@@ err_kworker
*/
void ice_ptp_release(struct ice_pf *pf)
{
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return;
+
/* Disable timestamping for both Tx and Rx */
ice_ptp_cfg_timestamp(pf, false);
diff --combined drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 94d479010410,49d822a98ada..c7fd466a0efd
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@@ -95,7 -95,7 +95,7 @@@ static char *cgx_tx_stats_fields[] =
[CGX_STAT5] = "Total frames sent on the interface",
[CGX_STAT6] = "Packets sent with an octet count < 64",
[CGX_STAT7] = "Packets sent with an octet count == 64",
- [CGX_STAT8] = "Packets sent with an octet count of 65���127",
+ [CGX_STAT8] = "Packets sent with an octet count of 65-127",
[CGX_STAT9] = "Packets sent with an octet count of 128-255",
[CGX_STAT10] = "Packets sent with an octet count of 256-511",
[CGX_STAT11] = "Packets sent with an octet count of 512-1023",
@@@ -125,7 -125,7 +125,7 @@@ static char *rpm_rx_stats_fields[] =
"Total frames received on interface",
"Packets received with an octet count < 64",
"Packets received with an octet count == 64",
- "Packets received with an octet count of 65������127",
+ "Packets received with an octet count of 65-127",
"Packets received with an octet count of 128-255",
"Packets received with an octet count of 256-511",
"Packets received with an octet count of 512-1023",
@@@ -164,7 -164,7 +164,7 @@@ static char *rpm_tx_stats_fields[] =
"Packets sent to the multicast DMAC",
"Packets sent to a broadcast DMAC",
"Packets sent with an octet count == 64",
- "Packets sent with an octet count of 65������127",
+ "Packets sent with an octet count of 65-127",
"Packets sent with an octet count of 128-255",
"Packets sent with an octet count of 256-511",
"Packets sent with an octet count of 512-1023",
@@@ -226,108 -226,85 +226,175 @@@ static const struct file_operations rvu
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+#define LMT_MAPTBL_ENTRY_SIZE 16
+/* Dump LMTST map table */
+static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rvu *rvu = filp->private_data;
+ u64 lmt_addr, val, tbl_base;
+ int pf, vf, num_vfs, hw_vfs;
+ void __iomem *lmt_map_base;
+ int index = 0, off = 0;
+ int bytes_not_copied;
+ int buf_size = 10240;
+ char *buf;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ kfree(buf);
+ return false;
+ }
+
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\tLmtst Map Table Entries");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\t=======================");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmtline Base (word 0)\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmt Map Entry (word 1)");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
+ pf);
+
+ index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
+ (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
+ val);
+ /* Reading num of VFs per PF */
+ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
+ for (vf = 0; vf < num_vfs; vf++) {
+ index = (pf * rvu->hw->total_vfs * 16) +
+ ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "PF%d:VF%d \t\t", pf, vf);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%llx\t\t", (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\n", val);
+ }
+ }
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+
+ bytes_not_copied = copy_to_user(buffer, buf, off);
+ kfree(buf);
+
+ iounmap(lmt_map_base);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ *ppos = off;
+ return off;
+}
+
+RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
+
+ static void get_lf_str_list(struct rvu_block block, int pcifunc,
+ char *lfs)
+ {
+ int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
+
+ for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
+ if (lf >= block.lf.max)
+ break;
+
+ if (block.fn_map[lf] != pcifunc)
+ continue;
+
+ if (lf == prev_lf + 1) {
+ prev_lf = lf;
+ seq = 1;
+ continue;
+ }
+
+ if (seq)
+ len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
+ else
+ len += (len ? sprintf(lfs + len, ",%d", lf) :
+ sprintf(lfs + len, "%d", lf));
+
+ prev_lf = lf;
+ seq = 0;
+ }
+
+ if (seq)
+ len += sprintf(lfs + len, "-%d", prev_lf);
+
+ lfs[len] = '\0';
+ }
+
+ static int get_max_column_width(struct rvu *rvu)
+ {
+ int index, pf, vf, lf_str_size = 12, buf_size = 256;
+ struct rvu_block block;
+ u16 pcifunc;
+ char *buf;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ for (index = 0; index < BLK_COUNT; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+
+ get_lf_str_list(block, pcifunc, buf);
+ if (lf_str_size <= strlen(buf))
+ lf_str_size = strlen(buf) + 1;
+ }
+ }
+ }
+
+ kfree(buf);
+ return lf_str_size;
+ }
+
/* Dumps current provisioning status of all RVU block LFs */
static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
char __user *buffer,
size_t count, loff_t *ppos)
{
- int index, off = 0, flag = 0, go_back = 0, len = 0;
+ int index, off = 0, flag = 0, len = 0, i = 0;
struct rvu *rvu = filp->private_data;
- int lf, pf, vf, pcifunc;
+ int bytes_not_copied = 0;
struct rvu_block block;
- int bytes_not_copied;
- int lf_str_size = 12;
+ int pf, vf, pcifunc;
int buf_size = 2048;
+ int lf_str_size;
char *lfs;
char *buf;
@@@ -339,6 -316,9 +406,9 @@@
if (!buf)
return -ENOSPC;
+ /* Get the maximum width of a column */
+ lf_str_size = get_max_column_width(rvu);
+
lfs = kzalloc(lf_str_size, GFP_KERNEL);
if (!lfs) {
kfree(buf);
@@@ -352,65 -332,69 +422,69 @@@
"%-*s", lf_str_size,
rvu->hw->block[index].name);
}
+
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
+ if (bytes_not_copied)
+ goto out;
+
+ i++;
+ *ppos += off;
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ off = 0;
+ flag = 0;
pcifunc = pf << 10 | vf;
if (!pcifunc)
continue;
if (vf) {
sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
- go_back = scnprintf(&buf[off],
- buf_size - 1 - off,
- "%-*s", lf_str_size, lfs);
+ off = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
} else {
sprintf(lfs, "PF%d", pf);
- go_back = scnprintf(&buf[off],
- buf_size - 1 - off,
- "%-*s", lf_str_size, lfs);
+ off = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
}
- off += go_back;
- for (index = 0; index < BLKTYPE_MAX; index++) {
+ for (index = 0; index < BLK_COUNT; index++) {
block = rvu->hw->block[index];
if (!strlen(block.name))
continue;
len = 0;
lfs[len] = '\0';
- for (lf = 0; lf < block.lf.max; lf++) {
- if (block.fn_map[lf] != pcifunc)
- continue;
+ get_lf_str_list(block, pcifunc, lfs);
+ if (strlen(lfs))
flag = 1;
- len += sprintf(&lfs[len], "%d,", lf);
- }
- if (flag)
- len--;
- lfs[len] = '\0';
off += scnprintf(&buf[off], buf_size - 1 - off,
"%-*s", lf_str_size, lfs);
- if (!strlen(lfs))
- go_back += lf_str_size;
}
- if (!flag)
- off -= go_back;
- else
- flag = 0;
- off--;
- off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ if (flag) {
+ off += scnprintf(&buf[off],
+ buf_size - 1 - off, "\n");
+ bytes_not_copied = copy_to_user(buffer +
+ (i * off),
+ buf, off);
+ if (bytes_not_copied)
+ goto out;
+
+ i++;
+ *ppos += off;
+ }
}
}
- bytes_not_copied = copy_to_user(buffer, buf, off);
+ out:
kfree(lfs);
kfree(buf);
-
if (bytes_not_copied)
return -EFAULT;
- *ppos = off;
- return off;
+ return *ppos;
}
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
@@@ -594,7 -578,7 +668,7 @@@ static ssize_t rvu_dbg_qsize_write(stru
if (cmd_buf)
ret = -EINVAL;
- if (!strncmp(subtoken, "help", 4) || ret < 0) {
+ if (ret < 0 || !strncmp(subtoken, "help", 4)) {
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
goto qsize_write_done;
}
@@@ -1809,6 -1793,10 +1883,10 @@@ static int rvu_dbg_nix_band_prof_ctx_di
u16 pcifunc;
char *str;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
if (layer == BAND_PROF_INVAL_LAYER)
continue;
@@@ -1858,6 -1846,10 +1936,10 @@@ static int rvu_dbg_nix_band_prof_rsrc_d
int layer;
char *str;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
seq_puts(m, "\nBandwidth profile resource free count\n");
seq_puts(m, "=====================================\n");
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
@@@ -1968,7 -1960,7 +2050,7 @@@ static int cgx_print_stats(struct seq_f
return -ENODEV;
mac_ops = get_mac_ops(cgxd);
-
+ /* There can be no CGX devices at all */
if (!mac_ops)
return 0;
@@@ -2046,13 -2038,13 +2128,13 @@@
if (err)
return err;
- if (is_rvu_otx2(rvu))
- seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
- tx_stat);
- else
- seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
- tx_stat);
- stat++;
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
+ tx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
+ tx_stat);
+ stat++;
}
return err;
@@@ -2490,8 -2482,6 +2572,8 @@@ static int rvu_dbg_npc_mcam_show_rules(
seq_printf(s, "VF%d", vf);
}
seq_puts(s, "\n");
+ seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
+ seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
}
rvu_dbg_npc_mcam_show_action(s, iter);
@@@ -2764,10 -2754,6 +2846,10 @@@ void rvu_dbg_init(struct rvu *rvu
debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
&rvu_dbg_rsrc_status_fops);
+ if (!is_rvu_otx2(rvu))
+ debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_lmtst_map_table_fops);
+
if (!cgx_get_cgxcnt_max())
goto create;
diff --combined drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 7761dcf17b91,6970540dc470..d8b1948aaa0a
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@@ -28,7 -28,6 +28,7 @@@ static int nix_verify_bandprof(struct n
static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
u32 leaf_prof);
+static const char *nix_get_ctx_name(int ctype);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@@ -1062,68 -1061,10 +1062,68 @@@ static int rvu_nix_blk_aq_enq_inst(stru
return 0;
}
+static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req, u8 ctype)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ int rc, word;
+
+ if (req->ctype != NIX_AQ_CTYPE_CQ)
+ return 0;
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ req->hdr.pcifunc, ctype, req->qidx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
+ __func__, nix_get_ctx_name(ctype), req->qidx,
+ req->hdr.pcifunc);
+ return rc;
+ }
+
+ /* Make copy of original context & mask which are required
+ * for resubmission
+ */
+ memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
+ memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
+
+ /* exclude fields which HW can update */
+ aq_req.cq_mask.cq_err = 0;
+ aq_req.cq_mask.wrptr = 0;
+ aq_req.cq_mask.tail = 0;
+ aq_req.cq_mask.head = 0;
+ aq_req.cq_mask.avg_level = 0;
+ aq_req.cq_mask.update_time = 0;
+ aq_req.cq_mask.substream = 0;
+
+ /* Context mask (cq_mask) holds mask value of fields which
+ * are changed in AQ WRITE operation.
+ * for example cq.drop = 0xa;
+ * cq_mask.drop = 0xff;
+ * Below logic performs '&' between cq and cq_mask so that non
+ * updated fields are masked out for request and response
+ * comparison
+ */
+ for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
+ word++) {
+ *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ }
+
+ if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
+ return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
+
+ return 0;
+}
+
static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
struct nix_hw *nix_hw;
+ int err, retries = 5;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
@@@ -1134,24 -1075,7 +1134,24 @@@
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
- return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+retry:
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+
+ /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
+ * As a work around perfrom CQ context read after each AQ write. If AQ
+ * read shows AQ write is not updated perform AQ write again.
+ */
+ if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
+ err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
+ if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
+ if (retries--)
+ goto retry;
+ else
+ return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
+ }
+ }
+
+ return err;
}
static const char *nix_get_ctx_name(int ctype)
@@@ -2583,6 -2507,9 +2583,9 @@@ static void nix_free_tx_vtag_entries(st
return;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
vlan = &nix_hw->txvlan;
mutex_lock(&vlan->rsrc_lock);
@@@ -4512,17 -4439,10 +4515,17 @@@ int rvu_mbox_handler_nix_lf_stop_rx(str
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
+#define RX_SA_BASE GENMASK_ULL(52, 7)
+
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct hwctx_disable_req ctx_req;
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ u64 sa_base;
+ void *cgxd;
int err;
ctx_req.hdr.pcifunc = pcifunc;
@@@ -4559,33 -4479,9 +4562,33 @@@
dev_err(rvu->dev, "CQ ctx disable failed\n");
}
+ /* reset HW config done for Switch headers */
+ rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
+ (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
+
+ /* Disabling CGX and NPC config done for PTP */
+ if (pfvf->hw_rx_tstamp_en) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
+ /* Undo NPC config done for PTP */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
+ dev_err(rvu->dev, "NPC config for PTP failed\n");
+ pfvf->hw_rx_tstamp_en = false;
+ }
+
nix_ctx_free(rvu, pfvf);
nix_free_all_bandprof(rvu, pcifunc);
+
+ sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
+ if (FIELD_GET(RX_SA_BASE, sa_base)) {
+ err = rvu_cpt_ctx_flush(rvu, pcifunc);
+ if (err)
+ dev_err(rvu->dev,
+ "CPT ctx flush failed with error: %d\n", err);
+ }
}
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
@@@ -4686,119 -4582,6 +4689,119 @@@ int rvu_mbox_handler_nix_lso_format_cfg
return 0;
}
+#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
+#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
+#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
+#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
+
+#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
+#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
+#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
+
+static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
+ int blkaddr)
+{
+ u8 cpt_idx, cpt_blkaddr;
+ u64 val;
+
+ cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ if (req->enable) {
+ val = 0;
+ /* Enable context prefetching */
+ if (!is_rvu_otx2(rvu))
+ val |= BIT_ULL(51);
+
+ /* Set OPCODE and EGRP */
+ val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
+
+ /* Set CPT queue for inline IPSec */
+ val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
+ val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
+ req->inst_qsel.cpt_pf_func);
+
+ if (!is_rvu_otx2(rvu)) {
+ cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
+ BLKADDR_CPT1;
+ val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
+ }
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ val);
+
+ /* Set CPT credit */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ req->cpt_credit);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF);
+ }
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_cfg *req,
+ struct msg_rsp *rsp)
+{
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_lf_cfg *req,
+ struct msg_rsp *rsp)
+{
+ int lf, blkaddr, err;
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->enable) {
+ /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
+ val = (u64)req->ipsec_cfg0.tt << 44 |
+ (u64)req->ipsec_cfg0.tag_const << 20 |
+ (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
+ req->ipsec_cfg0.lenm1_max;
+
+ if (blkaddr == BLKADDR_NIX1)
+ val |= BIT_ULL(46);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
+
+ /* Set SA_IDX_W and SA_IDX_MAX */
+ val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
+ req->ipsec_cfg1.sa_idx_max;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
+
+ /* Set SA base address */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ req->sa_base_addr);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ 0x0);
+ }
+
+ return 0;
+}
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
{
bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
diff --combined drivers/net/ethernet/microchip/lan743x_main.c
index 03d02403c19e,4d5a5d6595b3..4fc97823bc84
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@@ -816,7 -816,7 +816,7 @@@ static int lan743x_mac_init(struct lan7
eth_random_addr(adapter->mac_address);
}
lan743x_mac_set_address(adapter, adapter->mac_address);
- ether_addr_copy(netdev->dev_addr, adapter->mac_address);
+ eth_hw_addr_set(netdev, adapter->mac_address);
return 0;
}
@@@ -1743,6 -1743,16 +1743,16 @@@ static int lan743x_tx_ring_init(struct
ret = -EINVAL;
goto cleanup;
}
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
+ dev_warn(&tx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
ring_allocation_size = ALIGN(tx->ring_size *
sizeof(struct lan743x_tx_descriptor),
PAGE_SIZE);
@@@ -1934,7 -1944,8 +1944,8 @@@ static void lan743x_rx_update_tail(stru
index);
}
- static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
+ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
+ gfp_t gfp)
{
struct net_device *netdev = rx->adapter->netdev;
struct device *dev = &rx->adapter->pdev->dev;
@@@ -1948,7 -1959,7 +1959,7 @@@
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
- skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA);
+ skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
if (!skb)
return -ENOMEM;
dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
@@@ -2110,7 -2121,8 +2121,8 @@@ static int lan743x_rx_process_buffer(st
/* save existing skb, allocate new skb and map to dma */
skb = buffer_info->skb;
- if (lan743x_rx_init_ring_element(rx, rx->last_head)) {
+ if (lan743x_rx_init_ring_element(rx, rx->last_head,
+ GFP_ATOMIC | GFP_DMA)) {
/* failed to allocate next skb.
* Memory is very low.
* Drop this packet and reuse buffer.
@@@ -2276,6 -2288,16 +2288,16 @@@ static int lan743x_rx_ring_init(struct
ret = -EINVAL;
goto cleanup;
}
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
+ dev_warn(&rx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
ring_allocation_size = ALIGN(rx->ring_size *
sizeof(struct lan743x_rx_descriptor),
PAGE_SIZE);
@@@ -2315,13 -2337,16 +2337,16 @@@
rx->last_head = 0;
for (index = 0; index < rx->ring_size; index++) {
- ret = lan743x_rx_init_ring_element(rx, index);
+ ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
if (ret)
goto cleanup;
}
return 0;
cleanup:
+ netif_warn(rx->adapter, ifup, rx->adapter->netdev,
+ "Error allocating memory for LAN743x\n");
+
lan743x_rx_ring_cleanup(rx);
return ret;
}
@@@ -2645,7 -2670,7 +2670,7 @@@ static int lan743x_netdev_set_mac_addre
ret = eth_prepare_mac_addr_change(netdev, sock_addr);
if (ret)
return ret;
- ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
lan743x_mac_set_address(adapter, sock_addr->sa_data);
lan743x_rfe_update_mac_address(adapter);
return 0;
@@@ -3019,6 -3044,8 +3044,8 @@@ static int lan743x_pm_resume(struct dev
if (ret) {
netif_err(adapter, probe, adapter->netdev,
"lan743x_hardware_init returned %d\n", ret);
+ lan743x_pci_cleanup(adapter);
+ return ret;
}
/* open netdev when netdev is at running state while resume.
diff --combined drivers/net/ethernet/nxp/lpc_eth.c
index a63cc295b979,c910fa2f40a4..bc39558fe82b
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@@ -419,7 -419,7 +419,7 @@@ struct netdata_local
/*
* MAC support functions
*/
-static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
+static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac)
{
u32 tmp;
@@@ -1015,9 -1015,6 +1015,6 @@@ static int lpc_eth_close(struct net_dev
napi_disable(&pldat->napi);
netif_stop_queue(ndev);
- if (ndev->phydev)
- phy_stop(ndev->phydev);
-
spin_lock_irqsave(&pldat->lock, flags);
__lpc_eth_reset(pldat);
netif_carrier_off(ndev);
@@@ -1025,6 -1022,8 +1022,8 @@@
writel(0, LPC_ENET_MAC2(pldat->net_base));
spin_unlock_irqrestore(&pldat->lock, flags);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
clk_disable_unprepare(pldat->clk);
return 0;
@@@ -1093,7 -1092,7 +1092,7 @@@ static int lpc_set_mac_address(struct n
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&pldat->lock, flags);
@@@ -1232,7 -1231,6 +1231,7 @@@ static int lpc_eth_drv_probe(struct pla
struct net_device *ndev;
dma_addr_t dma_handle;
struct resource *res;
+ u8 addr[ETH_ALEN];
int irq, ret;
/* Setup network interface for RMII or MII mode */
@@@ -1348,11 -1346,10 +1347,11 @@@
pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Get MAC address from current HW setting (POR state is all zeros) */
- __lpc_get_mac(pldat, ndev->dev_addr);
+ __lpc_get_mac(pldat, addr);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
- of_get_mac_address(np, ndev->dev_addr);
+ of_get_ethdev_address(np, ndev);
}
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --combined drivers/net/ethernet/realtek/r8169_main.c
index ee6c9c842012,2918947dd57c..bbe21db20417
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@@ -118,6 -118,7 +118,6 @@@ static const struct
[RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" },
[RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1},
[RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2},
- [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" },
[RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" },
[RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1},
[RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1},
@@@ -156,6 -157,7 +156,7 @@@ static const struct pci_device_id rtl81
{ PCI_VDEVICE(REALTEK, 0x8129) },
{ PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT },
{ PCI_VDEVICE(REALTEK, 0x8161) },
+ { PCI_VDEVICE(REALTEK, 0x8162) },
{ PCI_VDEVICE(REALTEK, 0x8167) },
{ PCI_VDEVICE(REALTEK, 0x8168) },
{ PCI_VDEVICE(NCUBE, 0x8168) },
@@@ -984,6 -986,33 +985,6 @@@ DECLARE_RTL_COND(rtl_ocpar_cond
return RTL_R32(tp, OCPAR) & OCPAR_FLAG;
}
-static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
-{
- RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
- RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
- RTL_W32(tp, EPHY_RXER_NUM, 0);
-
- rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
-}
-
-static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
-{
- r8168dp_1_mdio_access(tp, reg,
- OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
-}
-
-static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
-{
- r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
-
- mdelay(1);
- RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
- RTL_W32(tp, EPHY_RXER_NUM, 0);
-
- return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
- RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT;
-}
-
#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
static void r8168dp_2_mdio_start(struct rtl8169_private *tp)
@@@ -1025,6 -1054,9 +1026,6 @@@ static int r8168dp_2_mdio_read(struct r
static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- r8168dp_1_mdio_write(tp, location, val);
- break;
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
@@@ -1041,6 -1073,8 +1042,6 @@@
static int rtl_readphy(struct rtl8169_private *tp, int location)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- return r8168dp_1_mdio_read(tp, location);
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
@@@ -1202,6 -1236,7 +1203,6 @@@ static bool r8168ep_check_dash(struct r
static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
@@@ -2006,7 -2041,8 +2007,7 @@@ static enum mac_version rtl8169_get_mac
/* 8168DP family. */
/* It seems this early RTL8168dp version never made it to
- * the wild. Let's see whether somebody complains, if not
- * we'll remove support for this chip version completely.
+ * the wild. Support has been removed.
* { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
*/
{ 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
@@@ -2336,7 -2372,7 +2337,7 @@@ static void rtl_jumbo_config(struct rtl
r8168c_hw_jumbo_disable(tp);
}
break;
- case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_28:
if (jumbo)
r8168dp_hw_jumbo_enable(tp);
else
@@@ -3684,6 -3720,7 +3685,6 @@@ static void rtl_hw_config(struct rtl816
[RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
[RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
[RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
- [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d,
[RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
[RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
[RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
@@@ -3946,6 -3983,7 +3947,6 @@@ static void rtl8169_cleanup(struct rtl8
goto no_reset;
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000);
@@@ -5217,7 -5255,7 +5218,7 @@@ static int rtl_get_ether_clk(struct rtl
static void rtl_init_mac_address(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
- u8 *mac_addr = dev->dev_addr;
+ u8 mac_addr[ETH_ALEN];
int rc;
rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
@@@ -5235,7 -5273,6 +5236,7 @@@
eth_hw_addr_random(dev);
dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
done:
+ eth_hw_addr_set(dev, mac_addr);
rtl_rar_set(tp, mac_addr);
}
diff --combined drivers/net/usb/lan78xx.c
index 03319fdb5235,63cd72c5f580..f20376c1ef3f
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@@ -1817,7 -1817,7 +1817,7 @@@ static void lan78xx_init_mac_address(st
lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
- ether_addr_copy(dev->net->dev_addr, addr);
+ eth_hw_addr_set(dev->net, addr);
}
/* MDIO read and write wrappers for phylib */
@@@ -2416,7 -2416,7 +2416,7 @@@ static int lan78xx_set_mac_addr(struct
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
addr_lo = netdev->dev_addr[0] |
netdev->dev_addr[1] << 8 |
@@@ -4122,6 -4122,12 +4122,12 @@@ static int lan78xx_probe(struct usb_int
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+ /* Reject broken descriptors. */
+ if (dev->maxpacket == 0) {
+ ret = -ENODEV;
+ goto out4;
+ }
+
/* driver requires remote-wakeup capability during autosuspend. */
intf->needs_remote_wakeup = 1;
diff --combined drivers/net/usb/usbnet.c
index 350bae673ed4,a33d7fb82a00..9a6450f796dc
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@@ -165,13 -165,12 +165,13 @@@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints)
int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
{
+ u8 addr[ETH_ALEN];
int tmp = -1, ret;
unsigned char buf [13];
ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
if (ret == 12)
- tmp = hex2bin(dev->net->dev_addr, buf, 6);
+ tmp = hex2bin(addr, buf, 6);
if (tmp < 0) {
dev_dbg(&dev->udev->dev,
"bad MAC string %d fetch, %d\n", iMACAddress, tmp);
@@@ -179,7 -178,6 +179,7 @@@
ret = -EINVAL;
return ret;
}
+ eth_hw_addr_set(dev->net, addr);
return 0;
}
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
@@@ -1728,7 -1726,7 +1728,7 @@@ usbnet_probe (struct usb_interface *ude
dev->net = net;
strscpy(net->name, "usb%d", sizeof(net->name));
- memcpy (net->dev_addr, node_id, sizeof node_id);
+ eth_hw_addr_set(net, node_id);
/* rx and tx sides can use different message sizes;
* bind() should set rx_urb_size in that case.
@@@ -1792,6 -1790,7 +1792,7 @@@
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
if (dev->maxpacket == 0) {
/* that is a broken device */
+ status = -ENODEV;
goto out4;
}
diff --combined drivers/net/vmxnet3/vmxnet3_drv.c
index 3e1b7746cce4,8799854bacb2..14fae317bc70
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@@ -46,7 -46,7 +46,7 @@@ MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_
static int enable_mq = 1;
static void
-vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
/*
* Enable/Disable the given intr
@@@ -2806,7 -2806,7 +2806,7 @@@ vmxnet3_quiesce_dev(struct vmxnet3_adap
static void
-vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
{
u32 tmp;
@@@ -2824,7 -2824,7 +2824,7 @@@ vmxnet3_set_mac_addr(struct net_device
struct sockaddr *addr = p;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ dev_addr_set(netdev, addr->sa_data);
vmxnet3_write_mac_addr(adapter, addr->sa_data);
return 0;
@@@ -3638,7 -3638,7 +3638,7 @@@ vmxnet3_probe_device(struct pci_dev *pd
#endif
vmxnet3_read_mac_addr(adapter, mac);
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ dev_addr_set(netdev, mac);
netdev->netdev_ops = &vmxnet3_netdev_ops;
vmxnet3_set_ethtool_ops(netdev);
@@@ -3833,7 -3833,6 +3833,6 @@@ vmxnet3_suspend(struct device *device
vmxnet3_free_intr_resources(adapter);
netif_device_detach(netdev);
- netif_tx_stop_all_queues(netdev);
/* Create wake-up filters. */
pmConf = adapter->pm_conf;
diff --combined drivers/net/xen-netfront.c
index 57437e4b8a94,fc41ba95f81d..911f43986a8c
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@@ -1730,6 -1730,10 +1730,10 @@@ static int netfront_resume(struct xenbu
dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ netif_tx_lock_bh(info->netdev);
+ netif_device_detach(info->netdev);
+ netif_tx_unlock_bh(info->netdev);
+
xennet_disconnect_backend(info);
return 0;
}
@@@ -2157,7 -2161,6 +2161,7 @@@ static int talk_to_netback(struct xenbu
unsigned int max_queues = 0;
struct netfront_queue *queue = NULL;
unsigned int num_queues = 1;
+ u8 addr[ETH_ALEN];
info->netdev->irq = 0;
@@@ -2171,12 -2174,11 +2175,12 @@@
"feature-split-event-channels", 0);
/* Read mac addr. */
- err = xen_net_read_mac(dev, info->netdev->dev_addr);
+ err = xen_net_read_mac(dev, addr);
if (err) {
xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
goto out_unlocked;
}
+ eth_hw_addr_set(info->netdev, addr);
info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
"feature-xdp-headroom", 0);
@@@ -2351,6 -2353,10 +2355,10 @@@ static int xennet_connect(struct net_de
* domain a kick because we've probably just requeued some
* packets.
*/
+ netif_tx_lock_bh(np->netdev);
+ netif_device_attach(np->netdev);
+ netif_tx_unlock_bh(np->netdev);
+
netif_carrier_on(np->netdev);
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
diff --combined include/linux/bpf.h
index 1c7fd7c4c6d3,3db6f6c95489..e6f5579f9356
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@@ -48,7 -48,6 +48,7 @@@ extern struct idr btf_idr
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;
+typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
@@@ -143,8 -142,7 +143,8 @@@ struct bpf_map_ops
int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee);
- int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn,
+ int (*map_for_each_callback)(struct bpf_map *map,
+ bpf_callback_t callback_fn,
void *callback_ctx, u64 flags);
/* BTF name and id of struct allocated by map_alloc */
@@@ -931,8 -929,11 +931,11 @@@ struct bpf_array_aux
* stored in the map to make sure that all callers and callees have
* the same prog type and JITed flag.
*/
- enum bpf_prog_type type;
- bool jited;
+ struct {
+ spinlock_t lock;
+ enum bpf_prog_type type;
+ bool jited;
+ } owner;
/* Programs with direct jumps into programs part of this array. */
struct list_head poke_progs;
struct bpf_map *map;
@@@ -1091,7 -1092,6 +1094,7 @@@ bool bpf_prog_array_compatible(struct b
int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
@@@ -2220,8 -2220,6 +2223,8 @@@ int bpf_arch_text_poke(void *ip, enum b
struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+#define MAX_BPRINTF_VARARGS 12
+
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u32 **bin_buf, u32 num_args);
void bpf_bprintf_cleanup(void);
diff --combined include/linux/filter.h
index 47f80adbe744,ef03ff34234d..8231a6a257f6
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@@ -360,9 -360,10 +360,9 @@@ static inline bool insn_is_zext(const s
.off = 0, \
.imm = TGT })
-/* Function call */
+/* Convert function address to BPF immediate */
-#define BPF_CAST_CALL(x) \
- ((u64 (*)(u64, u64, u64, u64, u64))(x))
+#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
@@@ -370,7 -371,7 +370,7 @@@
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
- .imm = ((FUNC) - __bpf_call_base) })
+ .imm = BPF_CALL_IMM(FUNC) })
/* Raw code statement block */
@@@ -1050,6 -1051,7 +1050,7 @@@ extern int bpf_jit_enable
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
extern long bpf_jit_limit;
+ extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
diff --combined include/net/cfg80211.h
index 7c9d5db4f0e6,27336fc70467..423f97b982ff
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@@ -739,22 -739,6 +739,22 @@@ struct cfg80211_tid_config
struct cfg80211_tid_cfg tid_conf[];
};
+/**
+ * struct cfg80211_fils_aad - FILS AAD data
+ * @macaddr: STA MAC address
+ * @kek: FILS KEK
+ * @kek_len: FILS KEK length
+ * @snonce: STA Nonce
+ * @anonce: AP Nonce
+ */
+struct cfg80211_fils_aad {
+ const u8 *macaddr;
+ const u8 *kek;
+ u8 kek_len;
+ const u8 *snonce;
+ const u8 *anonce;
+};
+
/**
* cfg80211_get_chandef_type - return old channel type from chandef
* @chandef: the channel definition
@@@ -1056,36 -1040,6 +1056,36 @@@ struct cfg80211_crypto_settings
enum nl80211_sae_pwe_mechanism sae_pwe;
};
+/**
+ * struct cfg80211_mbssid_config - AP settings for multi bssid
+ *
+ * @tx_wdev: pointer to the transmitted interface in the MBSSID set
+ * @index: index of this AP in the multi bssid group.
+ * @ema: set to true if the beacons should be sent out in EMA mode.
+ */
+struct cfg80211_mbssid_config {
+ struct wireless_dev *tx_wdev;
+ u8 index;
+ bool ema;
+};
+
+/**
+ * struct cfg80211_mbssid_elems - Multiple BSSID elements
+ *
+ * @cnt: Number of elements in array %elems.
+ *
+ * @elem: Array of multiple BSSID element(s) to be added into Beacon frames.
+ * @elem.data: Data for multiple BSSID elements.
+ * @elem.len: Length of data.
+ */
+struct cfg80211_mbssid_elems {
+ u8 cnt;
+ struct {
+ const u8 *data;
+ size_t len;
+ } elem[];
+};
+
/**
* struct cfg80211_beacon_data - beacon data
* @head: head portion of beacon (before TIM IE)
@@@ -1104,7 -1058,6 +1104,7 @@@
* @assocresp_ies_len: length of assocresp_ies in octets
* @probe_resp_len: length of probe response template (@probe_resp)
* @probe_resp: probe response template (AP mode only)
+ * @mbssid_ies: multiple BSSID elements
* @ftm_responder: enable FTM responder functionality; -1 for no change
* (which also implies no change in LCI/civic location data)
* @lci: Measurement Report element content, starting with Measurement Token
@@@ -1122,7 -1075,6 +1122,7 @@@ struct cfg80211_beacon_data
const u8 *probe_resp;
const u8 *lci;
const u8 *civicloc;
+ struct cfg80211_mbssid_elems *mbssid_ies;
s8 ftm_responder;
size_t head_len, tail_len;
@@@ -1237,7 -1189,6 +1237,7 @@@ enum cfg80211_ap_settings_flags
* @he_oper: HE operation IE (or %NULL if HE isn't enabled)
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ * @mbssid_config: AP settings for multiple bssid
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@@ -1270,7 -1221,6 +1270,7 @@@
struct cfg80211_he_bss_color he_bss_color;
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+ struct cfg80211_mbssid_config mbssid_config;
};
/**
@@@ -4068,10 -4018,6 +4068,10 @@@ struct mgmt_frame_regs
* @set_sar_specs: Update the SAR (TX power) settings.
*
* @color_change: Initiate a color change.
+ *
+ * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use
+ * those to decrypt (Re)Association Request and encrypt (Re)Association
+ * Response frame.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@@ -4402,8 -4348,6 +4402,8 @@@
int (*color_change)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_color_change_settings *params);
+ int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_fils_aad *fils_aad);
};
/*
@@@ -5037,13 -4981,6 +5037,13 @@@ struct wiphy_iftype_akm_suites
* %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
* @sar_capa: SAR control capabilities
* @rfkill: a pointer to the rfkill structure
+ *
+ * @mbssid_max_interfaces: maximum number of interfaces supported by the driver
+ * in a multiple BSSID set. This field must be set to a non-zero value
+ * by the driver to advertise MBSSID support.
+ * @ema_max_profile_periodicity: maximum profile periodicity supported by
+ * the driver. Setting this field to a non-zero value indicates that the
+ * driver supports enhanced multi-BSSID advertisements (EMA AP).
*/
struct wiphy {
struct mutex mtx;
@@@ -5188,9 -5125,6 +5188,9 @@@
struct rfkill *rfkill;
+ u8 mbssid_max_interfaces;
+ u8 ema_max_profile_periodicity;
+
char priv[] __aligned(NETDEV_ALIGN);
};
@@@ -5442,7 -5376,6 +5442,6 @@@ static inline void wiphy_unlock(struct
* netdev and may otherwise be used by driver read-only, will be update
* by cfg80211 on change_interface
* @mgmt_registrations: list of registrations for management frames
- * @mgmt_registrations_lock: lock for the list
* @mgmt_registrations_need_update: mgmt registrations were updated,
* need to propagate the update to the driver
* @mtx: mutex used to lock data in this struct, may be used by drivers
@@@ -5489,7 -5422,6 +5488,6 @@@ struct wireless_dev
u32 identifier;
struct list_head mgmt_registrations;
- spinlock_t mgmt_registrations_lock;
u8 mgmt_registrations_need_update:1;
struct mutex mtx;
@@@ -5558,7 -5490,7 +5556,7 @@@
unsigned long unprot_beacon_reported;
};
-static inline u8 *wdev_address(struct wireless_dev *wdev)
+static inline const u8 *wdev_address(struct wireless_dev *wdev)
{
if (wdev->netdev)
return wdev->netdev->dev_addr;
@@@ -6376,17 -6308,6 +6374,17 @@@ static inline void cfg80211_gen_new_bss
u64_to_ether_addr(new_bssid_u64, new_bssid);
}
+/**
+ * cfg80211_get_ies_channel_number - returns the channel number from ies
+ * @ie: IEs
+ * @ielen: length of IEs
+ * @band: enum nl80211_band of the channel
+ *
+ * Returns the channel number, or -1 if none could be determined.
+ */
+int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ enum nl80211_band band);
+
/**
* cfg80211_is_element_inherited - returns if element ID should be inherited
* @element: element to check
diff --combined include/net/mptcp.h
index f83fa48408b3,3214848402ec..a925349b4b89
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@@ -12,8 -12,6 +12,8 @@@
#include <linux/tcp.h>
#include <linux/types.h>
+struct mptcp_info;
+struct mptcp_sock;
struct seq_file;
/* MPTCP sk_buff extension data */
@@@ -71,6 -69,10 +71,10 @@@ struct mptcp_out_options
struct {
u64 sndr_key;
u64 rcvr_key;
+ u64 data_seq;
+ u32 subflow_seq;
+ u16 data_len;
+ __sum16 csum;
};
struct {
struct mptcp_addr_info addr;
@@@ -123,8 -125,6 +127,8 @@@ bool mptcp_incoming_options(struct soc
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
struct mptcp_out_options *opts);
+void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info);
+
/* move the skb extension owership, with the assumption that 'to' is
* newly allocated
*/
diff --combined include/net/sock.h
index fb70d8553fae,463f390d90b3..620de053002d
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@@ -259,11 -259,10 +259,11 @@@ struct bpf_local_storage
* @sk_rcvbuf: size of receive buffer in bytes
* @sk_wq: sock wait queue and async head
* @sk_rx_dst: receive input route used by early demux
+ * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst
+ * @sk_rx_dst_cookie: cookie for @sk_rx_dst
* @sk_dst_cache: destination cache
* @sk_dst_pending_confirm: need to confirm neighbour
* @sk_policy: flow policy
- * @sk_rx_skb_cache: cache copy of recently accessed RX skb
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
@@@ -271,7 -270,6 +271,7 @@@
* @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward
+ * @sk_reserved_mem: space reserved and non-reclaimable for the socket
* @sk_napi_id: id of the last napi context to receive data for sk
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
@@@ -331,6 -329,7 +331,6 @@@
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
* @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
- * @sk_tx_skb_cache: cache copy of recently accessed TX skb
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_cgrp_data: cgroup data for this cgroup
@@@ -395,6 -394,7 +395,6 @@@ struct sock
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
- struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
/*
* The backlog queue is special, it is always used with
@@@ -413,7 -413,6 +413,7 @@@
#define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc;
+ u32 sk_reserved_mem;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_ll_usec;
/* ===== mostly read cache line ===== */
@@@ -432,9 -431,6 +432,9 @@@
struct xfrm_policy __rcu *sk_policy[2];
#endif
struct dst_entry *sk_rx_dst;
+ int sk_rx_dst_ifindex;
+ u32 sk_rx_dst_cookie;
+
struct dst_entry __rcu *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
@@@ -447,6 -443,7 +447,6 @@@
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
};
- struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
@@@ -1210,16 -1207,13 +1210,16 @@@ struct proto
unsigned int inuse_idx;
#endif
+ int (*forward_alloc_get)(const struct sock *sk);
+
bool (*stream_memory_free)(const struct sock *sk, int wake);
- bool (*stream_memory_read)(const struct sock *sk);
+ bool (*sock_is_readable)(struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
void (*leave_memory_pressure)(struct sock *sk);
atomic_long_t *memory_allocated; /* Current allocated memory. */
struct percpu_counter *sockets_allocated; /* Current number of sockets. */
+
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
@@@ -1243,7 -1237,7 +1243,7 @@@
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
- struct percpu_counter *orphan_count;
+ unsigned int __percpu *orphan_count;
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
@@@ -1297,22 -1291,20 +1297,22 @@@ static inline void sk_refcnt_debug_rele
INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
+static inline int sk_forward_alloc_get(const struct sock *sk)
+{
+ if (!sk->sk_prot->forward_alloc_get)
+ return sk->sk_forward_alloc;
+
+ return sk->sk_prot->forward_alloc_get(sk);
+}
+
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
return false;
-#ifdef CONFIG_INET
- return sk->sk_prot->stream_memory_free ?
- INDIRECT_CALL_1(sk->sk_prot->stream_memory_free,
- tcp_stream_memory_free,
- sk, wake) : true;
-#else
return sk->sk_prot->stream_memory_free ?
- sk->sk_prot->stream_memory_free(sk, wake) : true;
-#endif
+ INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free,
+ tcp_stream_memory_free, sk, wake) : true;
}
static inline bool sk_stream_memory_free(const struct sock *sk)
@@@ -1526,49 -1518,20 +1526,49 @@@ sk_rmem_schedule(struct sock *sk, struc
skb_pfmemalloc(skb);
}
+static inline int sk_unused_reserved_mem(const struct sock *sk)
+{
+ int unused_mem;
+
+ if (likely(!sk->sk_reserved_mem))
+ return 0;
+
+ unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
+ atomic_read(&sk->sk_rmem_alloc);
+
+ return unused_mem > 0 ? unused_mem : 0;
+}
+
static inline void sk_mem_reclaim(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable >= SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk, reclaimable);
+}
+
+static inline void sk_mem_reclaim_final(struct sock *sk)
+{
+ sk->sk_reserved_mem = 0;
+ sk_mem_reclaim(sk);
}
static inline void sk_mem_reclaim_partial(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable > SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk, reclaimable - 1);
}
static inline void sk_mem_charge(struct sock *sk, int size)
@@@ -1578,19 -1541,11 +1578,19 @@@
sk->sk_forward_alloc -= size;
}
+/* the following macros control memory reclaiming in sk_mem_uncharge()
+ */
+#define SK_RECLAIM_THRESHOLD (1 << 21)
+#define SK_RECLAIM_CHUNK (1 << 20)
+
static inline void sk_mem_uncharge(struct sock *sk, int size)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
/* Avoid a possible overflow.
* TCP send queues can make this happen, if sk_mem_reclaim()
@@@ -1599,14 -1554,22 +1599,14 @@@
* If we reach 2 MBytes, reclaim 1 MBytes right now, there is
* no need to hold that much forward allocation anyway.
*/
- if (unlikely(sk->sk_forward_alloc >= 1 << 21))
- __sk_mem_reclaim(sk, 1 << 20);
+ if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD))
+ __sk_mem_reclaim(sk, SK_RECLAIM_CHUNK);
}
-DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
sk_wmem_queued_add(sk, -skb->truesize);
sk_mem_uncharge(sk, skb->truesize);
- if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
- !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
- skb_ext_reset(skb);
- skb_zcopy_clear(skb, true);
- sk->sk_tx_skb_cache = skb;
- return;
- }
__kfree_skb(skb);
}
@@@ -1926,8 -1889,10 +1926,8 @@@ static inline void sk_rx_queue_set(stru
if (skb_rx_queue_recorded(skb)) {
u16 rx_queue = skb_get_rx_queue(skb);
- if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
- return;
-
- sk->sk_rx_queue_mapping = rx_queue;
+ if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+ WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
}
#endif
}
@@@ -1935,19 -1900,15 +1935,19 @@@
static inline void sk_rx_queue_clear(struct sock *sk)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
- sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
+ WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING);
#endif
}
static inline int sk_rx_queue_get(const struct sock *sk)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
- if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
- return sk->sk_rx_queue_mapping;
+ if (sk) {
+ int res = READ_ONCE(sk->sk_rx_queue_mapping);
+
+ if (res != NO_QUEUE_MAPPING)
+ return res;
+ }
#endif
return -1;
@@@ -2427,11 -2388,13 +2427,11 @@@ static inline void sk_stream_moderate_s
return;
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+ val = max_t(u32, val, sk_unused_reserved_mem(sk));
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
}
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
- bool force_schedule);
-
/**
* sk_page_frag - return an appropriate page_frag
* @sk: socket
@@@ -2645,6 -2608,7 +2645,6 @@@ static inline void skb_setup_tx_timesta
&skb_shinfo(skb)->tskey);
}
-DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
@@@ -2656,6 -2620,12 +2656,6 @@@
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
- if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
- !sk->sk_rx_skb_cache) {
- sk->sk_rx_skb_cache = skb;
- skb_orphan(skb);
- return;
- }
__kfree_skb(skb);
}
@@@ -2850,8 -2820,10 +2850,14 @@@ void sock_set_sndtimeo(struct sock *sk
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+int sock_get_timeout(long timeo, void *optval, bool old_timeval);
+int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
+ sockptr_t optval, int optlen, bool old_timeval);
+
+ static inline bool sk_is_readable(struct sock *sk)
+ {
+ if (sk->sk_prot->sock_is_readable)
+ return sk->sk_prot->sock_is_readable(sk);
+ return false;
+ }
#endif /* _SOCK_H */
diff --combined include/net/tls.h
index adab19a8aed7,1fffb206f09f..526cb2c3b724
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@@ -66,7 -66,7 +66,7 @@@
#define MAX_IV_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
-/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
+/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
*
* IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
*
@@@ -74,7 -74,6 +74,7 @@@
* Hence b0 contains (3 - 1) = 2.
*/
#define TLS_AES_CCM_IV_B0_BYTE 2
+#define TLS_SM4_CCM_IV_B0_BYTE 2
#define __TLS_INC_STATS(net, field) \
__SNMP_INC_STATS((net)->mib.tls_statistics, field)
@@@ -221,8 -220,6 +221,8 @@@ union tls_crypto_context
struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
+ struct tls12_crypto_info_sm4_gcm sm4_gcm;
+ struct tls12_crypto_info_sm4_ccm sm4_ccm;
};
};
@@@ -361,6 -358,7 +361,7 @@@ int tls_sk_query(struct sock *sk, int o
int __user *optlen);
int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
+ void tls_err_abort(struct sock *sk, int err);
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
@@@ -378,7 -376,7 +379,7 @@@ void tls_sw_release_resources_rx(struc
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
- bool tls_sw_stream_read(const struct sock *sk);
+ bool tls_sw_sock_is_readable(struct sock *sk);
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
@@@ -469,12 -467,6 +470,6 @@@ static inline bool tls_is_sk_tx_device_
#endif
}
- static inline void tls_err_abort(struct sock *sk, int err)
- {
- sk->sk_err = err;
- sk_error_report(sk);
- }
-
static inline bool tls_bigint_increment(unsigned char *seq, int len)
{
int i;
@@@ -515,7 -507,7 +510,7 @@@ static inline void tls_advance_record_s
struct cipher_context *ctx)
{
if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
- tls_err_abort(sk, EBADMSG);
+ tls_err_abort(sk, -EBADMSG);
if (prot->version != TLS_1_3_VERSION &&
prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
diff --combined kernel/bpf/arraymap.c
index 5e1ccfae916b,447def540544..c7a5be3bf8be
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@@ -645,7 -645,7 +645,7 @@@ static const struct bpf_iter_seq_info i
.seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
};
-static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
+static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
void *callback_ctx, u64 flags)
{
u32 i, key, num_elems = 0;
@@@ -668,8 -668,9 +668,8 @@@
val = array->value + array->elem_size * i;
num_elems++;
key = i;
- ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
- (u64)(long)&key, (u64)(long)val,
- (u64)(long)callback_ctx, 0);
+ ret = callback_fn((u64)(long)map, (u64)(long)&key,
+ (u64)(long)val, (u64)(long)callback_ctx, 0);
/* return value: 0 - continue, 1 - stop and return */
if (ret)
break;
@@@ -1071,6 -1072,7 +1071,7 @@@ static struct bpf_map *prog_array_map_a
INIT_WORK(&aux->work, prog_array_map_clear_deferred);
INIT_LIST_HEAD(&aux->poke_progs);
mutex_init(&aux->poke_mutex);
+ spin_lock_init(&aux->owner.lock);
map = array_map_alloc(attr);
if (IS_ERR(map)) {
diff --combined kernel/bpf/core.c
index ea8a468dbded,6e3ae90ad107..ded9163185d1
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@@ -524,6 -524,7 +524,7 @@@ int bpf_jit_enable __read_mostly = IS
int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
int bpf_jit_harden __read_mostly;
long bpf_jit_limit __read_mostly;
+ long bpf_jit_limit_max __read_mostly;
static void
bpf_prog_ksym_set_addr(struct bpf_prog *prog)
@@@ -817,7 -818,8 +818,8 @@@ u64 __weak bpf_jit_alloc_exec_limit(voi
static int __init bpf_jit_charge_init(void)
{
/* Only used as heuristic here to derive limit. */
- bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
+ bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
+ bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
PAGE_SIZE), LONG_MAX);
return 0;
}
@@@ -1821,20 -1823,26 +1823,26 @@@ static unsigned int __bpf_prog_ret0_war
bool bpf_prog_array_compatible(struct bpf_array *array,
const struct bpf_prog *fp)
{
+ bool ret;
+
if (fp->kprobe_override)
return false;
- if (!array->aux->type) {
+ spin_lock(&array->aux->owner.lock);
+
+ if (!array->aux->owner.type) {
/* There's no owner yet where we could check for
* compatibility.
*/
- array->aux->type = fp->type;
- array->aux->jited = fp->jited;
- return true;
+ array->aux->owner.type = fp->type;
+ array->aux->owner.jited = fp->jited;
+ ret = true;
+ } else {
+ ret = array->aux->owner.type == fp->type &&
+ array->aux->owner.jited == fp->jited;
}
-
- return array->aux->type == fp->type &&
- array->aux->jited == fp->jited;
+ spin_unlock(&array->aux->owner.lock);
+ return ret;
}
static int bpf_check_tail_call(const struct bpf_prog *fp)
@@@ -2357,11 -2365,6 +2365,11 @@@ const struct bpf_func_proto * __weak bp
return NULL;
}
+const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
+{
+ return NULL;
+}
+
u64 __weak
bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
diff --combined net/batman-adv/bridge_loop_avoidance.c
index 7242b32fff80,17687848daec..2ed9496fc41f
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@@ -254,7 -254,7 +254,7 @@@ batadv_claim_hash_find(struct batadv_pr
* Return: backbone gateway if found or NULL otherwise
*/
static struct batadv_bla_backbone_gw *
-batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
+batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
@@@ -336,7 -336,7 +336,7 @@@ batadv_bla_del_backbone_claims(struct b
* @vid: the VLAN ID
* @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
*/
-static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
+static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac,
unsigned short vid, int claimtype)
{
struct sk_buff *skb;
@@@ -488,7 -488,7 +488,7 @@@ static void batadv_bla_loopdetect_repor
* Return: the (possibly created) backbone gateway or NULL on error
*/
static struct batadv_bla_backbone_gw *
-batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
+batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig,
unsigned short vid, bool own_backbone)
{
struct batadv_bla_backbone_gw *entry;
@@@ -926,7 -926,7 +926,7 @@@ static bool batadv_handle_request(struc
*/
static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- u8 *backbone_addr, u8 *claim_addr,
+ const u8 *backbone_addr, const u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
@@@ -964,7 -964,7 +964,7 @@@
*/
static bool batadv_handle_claim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- u8 *backbone_addr, u8 *claim_addr,
+ const u8 *backbone_addr, const u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
@@@ -1560,10 -1560,14 +1560,14 @@@ int batadv_bla_init(struct batadv_priv
return 0;
bat_priv->bla.claim_hash = batadv_hash_new(128);
- bat_priv->bla.backbone_hash = batadv_hash_new(32);
+ if (!bat_priv->bla.claim_hash)
+ return -ENOMEM;
- if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
+ bat_priv->bla.backbone_hash = batadv_hash_new(32);
+ if (!bat_priv->bla.backbone_hash) {
+ batadv_hash_destroy(bat_priv->bla.claim_hash);
return -ENOMEM;
+ }
batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
&batadv_claim_hash_lock_class_key);
@@@ -2126,7 -2130,7 +2130,7 @@@ batadv_bla_claim_dump_entry(struct sk_b
struct batadv_hard_iface *primary_if,
struct batadv_bla_claim *claim)
{
- u8 *primary_addr = primary_if->net_dev->dev_addr;
+ const u8 *primary_addr = primary_if->net_dev->dev_addr;
u16 backbone_crc;
bool is_own;
void *hdr;
@@@ -2294,7 -2298,7 +2298,7 @@@ batadv_bla_backbone_dump_entry(struct s
struct batadv_hard_iface *primary_if,
struct batadv_bla_backbone_gw *backbone_gw)
{
- u8 *primary_addr = primary_if->net_dev->dev_addr;
+ const u8 *primary_addr = primary_if->net_dev->dev_addr;
u16 backbone_crc;
bool is_own;
int msecs;
diff --combined net/core/dev.c
index e8754560e641,eb3a366bf212..edeb811c454e
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -140,7 -140,7 +140,7 @@@
#include <linux/if_macvlan.h>
#include <linux/errqueue.h>
#include <linux/hrtimer.h>
-#include <linux/netfilter_ingress.h>
+#include <linux/netfilter_netdev.h>
#include <linux/crash_dump.h>
#include <linux/sctp.h>
#include <net/udp_tunnel.h>
@@@ -303,12 -303,6 +303,12 @@@ static struct netdev_name_node *netdev_
return NULL;
}
+bool netdev_name_in_use(struct net *net, const char *name)
+{
+ return netdev_name_node_lookup(net, name);
+}
+EXPORT_SYMBOL(netdev_name_in_use);
+
int netdev_name_node_alt_create(struct net_device *dev, const char *name)
{
struct netdev_name_node *name_node;
@@@ -1139,7 -1133,7 +1139,7 @@@ static int __dev_alloc_name(struct net
}
snprintf(buf, IFNAMSIZ, name, i);
- if (!__dev_get_by_name(net, buf))
+ if (!netdev_name_in_use(net, buf))
return i;
/* It is possible to run out of possible slots
@@@ -1193,7 -1187,7 +1193,7 @@@ static int dev_get_valid_name(struct ne
if (strchr(name, '%'))
return dev_alloc_name_ns(net, dev, name);
- else if (__dev_get_by_name(net, name))
+ else if (netdev_name_in_use(net, name))
return -EEXIST;
else if (dev->name != name)
strlcpy(dev->name, name, IFNAMSIZ);
@@@ -1296,8 -1290,8 +1296,8 @@@ rollback
old_assign_type = NET_NAME_RENAMED;
goto rollback;
} else {
- pr_err("%s: name change rollback failed: %d\n",
- dev->name, ret);
+ netdev_err(dev, "name change rollback failed: %d\n",
+ ret);
}
}
@@@ -2351,7 -2345,7 +2351,7 @@@ static void netif_setup_tc(struct net_d
/* If TC0 is invalidated disable TC mapping */
if (tc->offset + tc->count > txq) {
- pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
+ netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
dev->num_tc = 0;
return;
}
@@@ -2362,8 -2356,8 +2362,8 @@@
tc = &dev->tc_to_txq[q];
if (tc->offset + tc->count > txq) {
- pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
- i, q);
+ netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
+ i, q);
netdev_set_prio_tc_map(dev, i, 0);
}
}
@@@ -2927,8 -2921,6 +2927,8 @@@ int netif_set_real_num_tx_queues(struc
if (dev->num_tc)
netif_setup_tc(dev, txq);
+ dev_qdisc_change_real_num_tx(dev, txq);
+
dev->real_num_tx_queues = txq;
if (disabling) {
@@@ -3171,6 -3163,12 +3171,12 @@@ static u16 skb_tx_hash(const struct net
qoffset = sb_dev->tc_to_txq[tc].offset;
qcount = sb_dev->tc_to_txq[tc].count;
+ if (unlikely(!qcount)) {
+ net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
+ sb_dev->name, qoffset, tc);
+ qoffset = 0;
+ qcount = dev->real_num_tx_queues;
+ }
}
if (skb_rx_queue_recorded(skb)) {
@@@ -3416,7 -3414,7 +3422,7 @@@ EXPORT_SYMBOL(__skb_gso_segment)
#ifdef CONFIG_BUG
static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{
- pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
+ netdev_err(dev, "hw csum failure\n");
skb_dump(KERN_ERR, skb, true);
dump_stack();
}
@@@ -3914,7 -3912,8 +3920,8 @@@ int dev_loopback_xmit(struct net *net,
skb_reset_mac_header(skb);
__skb_pull(skb, skb_network_offset(skb));
skb->pkt_type = PACKET_LOOPBACK;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (skb->ip_summed == CHECKSUM_NONE)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
WARN_ON(!skb_dst(skb));
skb_dst_force(skb);
netif_rx_ni(skb);
@@@ -3926,7 -3925,6 +3933,7 @@@ EXPORT_SYMBOL(dev_loopback_xmit)
static struct sk_buff *
sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{
+#ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
struct tcf_result cl_res;
@@@ -3962,7 -3960,6 +3969,7 @@@
default:
break;
}
+#endif /* CONFIG_NET_CLS_ACT */
return skb;
}
@@@ -4156,20 -4153,13 +4163,20 @@@ static int __dev_queue_xmit(struct sk_b
qdisc_pkt_len_init(skb);
#ifdef CONFIG_NET_CLS_ACT
skb->tc_at_ingress = 0;
-# ifdef CONFIG_NET_EGRESS
+#endif
+#ifdef CONFIG_NET_EGRESS
if (static_branch_unlikely(&egress_needed_key)) {
+ if (nf_hook_egress_active()) {
+ skb = nf_hook_egress(skb, &rc, dev);
+ if (!skb)
+ goto out;
+ }
+ nf_skip_egress(skb, true);
skb = sch_handle_egress(skb, &rc, dev);
if (!skb)
goto out;
+ nf_skip_egress(skb, false);
}
-# endif
#endif
/* If device/qdisc don't need skb->dst, release it right now while
* its hot in this cpu cache.
@@@ -5311,7 -5301,6 +5318,7 @@@ skip_taps
if (static_branch_unlikely(&ingress_needed_key)) {
bool another = false;
+ nf_skip_egress(skb, true);
skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
&another);
if (another)
@@@ -5319,7 -5308,6 +5326,7 @@@
if (!skb)
goto out;
+ nf_skip_egress(skb, false);
if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
goto out;
}
@@@ -5856,7 -5844,7 +5863,7 @@@ static void gro_normal_one(struct napi_
gro_normal_list(napi);
}
-static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
+static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
struct packet_offload *ptype;
__be16 type = skb->protocol;
@@@ -5885,11 -5873,12 +5892,11 @@@
if (err) {
WARN_ON(&ptype->list == head);
kfree_skb(skb);
- return NET_RX_SUCCESS;
+ return;
}
out:
gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
- return NET_RX_SUCCESS;
}
static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
@@@ -6916,25 -6905,19 +6923,25 @@@ EXPORT_SYMBOL(netif_napi_add)
void napi_disable(struct napi_struct *n)
{
+ unsigned long val, new;
+
might_sleep();
set_bit(NAPI_STATE_DISABLE, &n->state);
- while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
- msleep(1);
- while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
- msleep(1);
+ do {
+ val = READ_ONCE(n->state);
+ if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
+ usleep_range(20, 200);
+ continue;
+ }
+
+ new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
+ new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
+ } while (cmpxchg(&n->state, val, new) != val);
hrtimer_cancel(&n->timer);
- clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
clear_bit(NAPI_STATE_DISABLE, &n->state);
- clear_bit(NAPI_STATE_THREADED, &n->state);
}
EXPORT_SYMBOL(napi_disable);
@@@ -7012,8 -6995,8 +7019,8 @@@ static int __napi_poll(struct napi_stru
}
if (unlikely(work > weight))
- pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
- n->poll, work, weight);
+ netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
+ n->poll, work, weight);
if (likely(work < weight))
return work;
@@@ -8567,7 -8550,8 +8574,7 @@@ static int __dev_set_promiscuity(struc
dev->flags &= ~IFF_PROMISC;
else {
dev->promiscuity -= inc;
- pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
- dev->name);
+ netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
return -EOVERFLOW;
}
}
@@@ -8637,7 -8621,8 +8644,7 @@@ static int __dev_set_allmulti(struct ne
dev->flags &= ~IFF_ALLMULTI;
else {
dev->allmulti -= inc;
- pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
- dev->name);
+ netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
return -EOVERFLOW;
}
}
@@@ -9174,11 -9159,14 +9181,11 @@@ int dev_get_port_parent_id(struct net_d
}
err = devlink_compat_switch_id_get(dev, ppid);
- if (!err || err != -EOPNOTSUPP)
+ if (!recurse || err != -EOPNOTSUPP)
return err;
- if (!recurse)
- return -EOPNOTSUPP;
-
netdev_for_each_lower_dev(dev, lower_dev, iter) {
- err = dev_get_port_parent_id(lower_dev, ppid, recurse);
+ err = dev_get_port_parent_id(lower_dev, ppid, true);
if (err)
break;
if (!first.id_len)
@@@ -9922,11 -9910,6 +9929,11 @@@ static netdev_features_t netdev_fix_fea
}
}
+ if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
+ netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
+ features &= ~NETIF_F_LRO;
+ }
+
if (features & NETIF_F_HW_TLS_TX) {
bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
@@@ -10884,7 -10867,7 +10891,7 @@@ struct net_device *alloc_netdev_mqs(in
if (!dev->ethtool_ops)
dev->ethtool_ops = &default_ethtool_ops;
- nf_hook_ingress_init(dev);
+ nf_hook_netdev_init(dev);
return dev;
@@@ -11170,7 -11153,7 +11177,7 @@@ int __dev_change_net_namespace(struct n
* we can use it in the destination network namespace.
*/
err = -EEXIST;
- if (__dev_get_by_name(net, dev->name)) {
+ if (netdev_name_in_use(net, dev->name)) {
/* We get here if we can't use the current device name */
if (!pat)
goto out;
@@@ -11523,7 -11506,7 +11530,7 @@@ static void __net_exit default_device_e
/* Push remaining network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
- if (__dev_get_by_name(&init_net, fb_name))
+ if (netdev_name_in_use(&init_net, fb_name))
snprintf(fb_name, IFNAMSIZ, "dev%%d");
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
diff --combined net/core/net-sysfs.c
index d6e4e0b43beb,b2e49eb7001d..9c01c642cf9e
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@@ -175,14 -175,6 +175,14 @@@ static int change_carrier(struct net_de
static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
+ struct net_device *netdev = to_net_dev(dev);
+
+ /* The check is also done in change_carrier; this helps returning early
+ * without hitting the trylock/restart in netdev_store.
+ */
+ if (!netdev->netdev_ops->ndo_change_carrier)
+ return -EOPNOTSUPP;
+
return netdev_store(dev, attr, buf, len, change_carrier);
}
@@@ -204,12 -196,6 +204,12 @@@ static ssize_t speed_show(struct devic
struct net_device *netdev = to_net_dev(dev);
int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps
+ * returning early without hitting the trylock/restart below.
+ */
+ if (!netdev->ethtool_ops->get_link_ksettings)
+ return ret;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -230,12 -216,6 +230,12 @@@ static ssize_t duplex_show(struct devic
struct net_device *netdev = to_net_dev(dev);
int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps
+ * returning early without hitting the trylock/restart below.
+ */
+ if (!netdev->ethtool_ops->get_link_ksettings)
+ return ret;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -488,14 -468,6 +488,14 @@@ static ssize_t proto_down_store(struct
struct device_attribute *attr,
const char *buf, size_t len)
{
+ struct net_device *netdev = to_net_dev(dev);
+
+ /* The check is also done in change_proto_down; this helps returning
+ * early without hitting the trylock/restart in netdev_store.
+ */
+ if (!netdev->netdev_ops->ndo_change_proto_down)
+ return -EOPNOTSUPP;
+
return netdev_store(dev, attr, buf, len, change_proto_down);
}
NETDEVICE_SHOW_RW(proto_down, fmt_dec);
@@@ -506,12 -478,6 +506,12 @@@ static ssize_t phys_port_id_show(struc
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
+ /* The check is also done in dev_get_phys_port_id; this helps returning
+ * early without hitting the trylock/restart below.
+ */
+ if (!netdev->netdev_ops->ndo_get_phys_port_id)
+ return -EOPNOTSUPP;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -534,13 -500,6 +534,13 @@@ static ssize_t phys_port_name_show(stru
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps
+ * returning early without hitting the trylock/restart below.
+ */
+ if (!netdev->netdev_ops->ndo_get_phys_port_name &&
+ !netdev->netdev_ops->ndo_get_devlink_port)
+ return -EOPNOTSUPP;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -563,14 -522,6 +563,14 @@@ static ssize_t phys_switch_id_show(stru
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps
+ * returning early without hitting the trylock/restart below. This works
+ * because recurse is false when calling dev_get_port_parent_id.
+ */
+ if (!netdev->netdev_ops->ndo_get_port_parent_id &&
+ !netdev->netdev_ops->ndo_get_devlink_port)
+ return -EOPNOTSUPP;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -1275,12 -1226,6 +1275,12 @@@ static ssize_t tx_maxrate_store(struct
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ /* The check is also done later; this helps returning early without
+ * hitting the trylock/restart below.
+ */
+ if (!dev->netdev_ops->ndo_set_tx_maxrate)
+ return -EOPNOTSUPP;
+
err = kstrtou32(buf, 10, &rate);
if (err < 0)
return err;
@@@ -1924,7 -1869,7 +1924,7 @@@ static struct class net_class __ro_afte
.get_ownership = net_get_ownership,
};
-#ifdef CONFIG_OF_NET
+#ifdef CONFIG_OF
static int of_dev_node_match(struct device *dev, const void *data)
{
for (; dev; dev = dev->parent) {
@@@ -2028,9 -1973,9 +2028,9 @@@ int netdev_register_kobject(struct net_
int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
const struct net *net_new)
{
+ kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID;
+ kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID;
struct device *dev = &ndev->dev;
- kuid_t old_uid, new_uid;
- kgid_t old_gid, new_gid;
int error;
net_ns_get_ownership(net_old, &old_uid, &old_gid);
diff --combined net/core/skbuff.c
index 74601bbc56ac,fe9358437380..09b8cf8ab234
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@@ -80,6 -80,7 +80,7 @@@
#include <linux/indirect_call_wrapper.h>
#include "datagram.h"
+ #include "sock_destructor.h"
struct kmem_cache *skbuff_head_cache __ro_after_init;
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
@@@ -134,31 -135,34 +135,31 @@@ struct napi_alloc_cache
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
-static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask,
- unsigned int align_mask)
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask);
-}
-
-void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
-{
fragsz = SKB_DATA_ALIGN(fragsz);
- return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
+ return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
}
EXPORT_SYMBOL(__napi_alloc_frag_align);
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
- struct page_frag_cache *nc;
void *data;
fragsz = SKB_DATA_ALIGN(fragsz);
if (in_hardirq() || irqs_disabled()) {
- nc = this_cpu_ptr(&netdev_alloc_cache);
+ struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
+
data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
} else {
+ struct napi_alloc_cache *nc;
+
local_bh_disable();
- data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
+ nc = this_cpu_ptr(&napi_alloc_cache);
+ data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
local_bh_enable();
}
return data;
@@@ -394,9 -398,8 +395,9 @@@ struct sk_buff *__alloc_skb(unsigned in
{
struct kmem_cache *cache;
struct sk_buff *skb;
- u8 *data;
+ unsigned int osize;
bool pfmemalloc;
+ u8 *data;
cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_head_cache;
@@@ -428,8 -431,7 +429,8 @@@
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
- size = SKB_WITH_OVERHEAD(ksize(data));
+ osize = ksize(data);
+ size = SKB_WITH_OVERHEAD(osize);
prefetchw(data + size);
/*
@@@ -438,7 -440,7 +439,7 @@@
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
- __build_skb_around(skb, data, 0);
+ __build_skb_around(skb, data, osize);
skb->pfmemalloc = pfmemalloc;
if (flags & SKB_ALLOC_FCLONE) {
@@@ -1803,30 -1805,39 +1804,39 @@@ EXPORT_SYMBOL(skb_realloc_headroom)
struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
{
int delta = headroom - skb_headroom(skb);
+ int osize = skb_end_offset(skb);
+ struct sock *sk = skb->sk;
if (WARN_ONCE(delta <= 0,
"%s is expecting an increase in the headroom", __func__))
return skb;
- /* pskb_expand_head() might crash, if skb is shared */
- if (skb_shared(skb)) {
+ delta = SKB_DATA_ALIGN(delta);
+ /* pskb_expand_head() might crash, if skb is shared. */
+ if (skb_shared(skb) || !is_skb_wmem(skb)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
- if (likely(nskb)) {
- if (skb->sk)
- skb_set_owner_w(nskb, skb->sk);
- consume_skb(skb);
- } else {
- kfree_skb(skb);
- }
+ if (unlikely(!nskb))
+ goto fail;
+
+ if (sk)
+ skb_set_owner_w(nskb, sk);
+ consume_skb(skb);
skb = nskb;
}
- if (skb &&
- pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
- kfree_skb(skb);
- skb = NULL;
+ if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC))
+ goto fail;
+
+ if (sk && is_skb_wmem(skb)) {
+ delta = skb_end_offset(skb) - osize;
+ refcount_add(delta, &sk->sk_wmem_alloc);
+ skb->truesize += delta;
}
return skb;
+
+ fail:
+ kfree_skb(skb);
+ return NULL;
}
EXPORT_SYMBOL(skb_expand_head);
diff --combined net/ipv4/tcp.c
index 7a7b9aa8f19a,f5c336f8b0c8..a7b1138d619c
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@@ -287,8 -287,8 +287,8 @@@ enum
TCP_CMSG_TS = 2
};
-struct percpu_counter tcp_orphan_count;
-EXPORT_SYMBOL_GPL(tcp_orphan_count);
+DEFINE_PER_CPU(unsigned int, tcp_orphan_count);
+EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
long sysctl_tcp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_mem);
@@@ -325,6 -325,11 +325,6 @@@ struct tcp_splice_state
unsigned long tcp_memory_pressure __read_mostly;
EXPORT_SYMBOL_GPL(tcp_memory_pressure);
-DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
-EXPORT_SYMBOL(tcp_rx_skb_cache_key);
-
-DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
-
void tcp_enter_memory_pressure(struct sock *sk)
{
unsigned long val;
@@@ -481,10 -486,7 +481,7 @@@ static bool tcp_stream_is_readable(stru
{
if (tcp_epollin_ready(sk, target))
return true;
-
- if (sk->sk_prot->stream_memory_read)
- return sk->sk_prot->stream_memory_read(sk);
- return false;
+ return sk_is_readable(sk);
}
/*
@@@ -642,7 -644,7 +639,7 @@@ int tcp_ioctl(struct sock *sk, int cmd
}
EXPORT_SYMBOL(tcp_ioctl);
-static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
+void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
tp->pushed_seq = tp->write_seq;
@@@ -653,13 -655,15 +650,13 @@@ static inline bool forced_push(const st
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
-static void skb_entail(struct sock *sk, struct sk_buff *skb)
+void tcp_skb_entail(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
- skb->csum = 0;
tcb->seq = tcb->end_seq = tp->write_seq;
tcb->tcp_flags = TCPHDR_ACK;
- tcb->sacked = 0;
__skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
sk_wmem_queued_add(sk, skb->truesize);
@@@ -854,15 -858,30 +851,15 @@@ ssize_t tcp_splice_read(struct socket *
}
EXPORT_SYMBOL(tcp_splice_read);
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
- bool force_schedule)
+struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ bool force_schedule)
{
struct sk_buff *skb;
- if (likely(!size)) {
- skb = sk->sk_tx_skb_cache;
- if (skb) {
- skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
- sk->sk_tx_skb_cache = NULL;
- pskb_trim(skb, 0);
- INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
- skb_shinfo(skb)->tx_flags = 0;
- memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb));
- return skb;
- }
- }
- /* The TCP header must be at least 32-bit aligned. */
- size = ALIGN(size, 4);
-
if (unlikely(tcp_under_memory_pressure(sk)))
sk_mem_reclaim_partial(sk);
- skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+ skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp);
if (likely(skb)) {
bool mem_scheduled;
@@@ -873,8 -892,12 +870,8 @@@
mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
}
if (likely(mem_scheduled)) {
- skb_reserve(skb, sk->sk_prot->max_header);
- /*
- * Make sure that we have exactly size bytes
- * available to the caller, no more, no less.
- */
- skb->reserved_tailroom = skb->end - skb->tail - size;
+ skb_reserve(skb, MAX_TCP_HEADER);
+ skb->ip_summed = CHECKSUM_PARTIAL;
INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
return skb;
}
@@@ -927,11 -950,9 +924,11 @@@ int tcp_send_mss(struct sock *sk, int *
* importantly be able to generate EPOLLOUT for Edge Trigger epoll()
* users.
*/
-void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
+void tcp_remove_empty_skb(struct sock *sk)
{
- if (skb && !skb->len) {
+ struct sk_buff *skb = tcp_write_queue_tail(sk);
+
+ if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
tcp_unlink_write_queue(skb, sk);
if (tcp_write_queue_empty(sk))
tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
@@@ -939,8 -960,8 +936,8 @@@
}
}
-struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
- struct page *page, int offset, size_t *size)
+static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
+ struct page *page, int offset, size_t *size)
{
struct sk_buff *skb = tcp_write_queue_tail(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@@ -953,15 -974,15 +950,15 @@@ new_segment
if (!sk_stream_memory_free(sk))
return NULL;
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
- tcp_rtx_and_write_queues_empty(sk));
+ skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation,
+ tcp_rtx_and_write_queues_empty(sk));
if (!skb)
return NULL;
#ifdef CONFIG_TLS_DEVICE
skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
#endif
- skb_entail(sk, skb);
+ tcp_skb_entail(sk, skb);
copy = size_goal;
}
@@@ -992,6 -1013,7 +989,6 @@@
skb->truesize += copy;
sk_wmem_queued_add(sk, copy);
sk_mem_charge(sk, copy);
- skb->ip_summed = CHECKSUM_PARTIAL;
WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
TCP_SKB_CB(skb)->end_seq += copy;
tcp_skb_pcount_set(skb, 0);
@@@ -1082,7 -1104,7 +1079,7 @@@ out
return copied;
do_error:
- tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
+ tcp_remove_empty_skb(sk);
if (copied)
goto out;
out_err:
@@@ -1281,14 -1303,15 +1278,14 @@@ new_segment
goto restart;
}
first_skb = tcp_rtx_and_write_queues_empty(sk);
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
- first_skb);
+ skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation,
+ first_skb);
if (!skb)
goto wait_for_space;
process_backlog++;
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb_entail(sk, skb);
+ tcp_skb_entail(sk, skb);
copy = size_goal;
/* All packets are restored as if they have
@@@ -1303,7 -1326,14 +1300,7 @@@
if (copy > msg_data_left(msg))
copy = msg_data_left(msg);
- /* Where to copy to? */
- if (skb_availroom(skb) > 0 && !zc) {
- /* We have some space in skb head. Superb! */
- copy = min_t(int, copy, skb_availroom(skb));
- err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
- if (err)
- goto do_fault;
- } else if (!zc) {
+ if (!zc) {
bool merge = true;
int i = skb_shinfo(skb)->nr_frags;
struct page_frag *pfrag = sk_page_frag(sk);
@@@ -1402,7 -1432,9 +1399,7 @@@ out_nopush
return copied + copied_syn;
do_error:
- skb = tcp_write_queue_tail(sk);
-do_fault:
- tcp_remove_empty_skb(sk, skb);
+ tcp_remove_empty_skb(sk);
if (copied + copied_syn)
goto out;
@@@ -2655,36 -2687,11 +2652,36 @@@ void tcp_shutdown(struct sock *sk, int
}
EXPORT_SYMBOL(tcp_shutdown);
+int tcp_orphan_count_sum(void)
+{
+ int i, total = 0;
+
+ for_each_possible_cpu(i)
+ total += per_cpu(tcp_orphan_count, i);
+
+ return max(total, 0);
+}
+
+static int tcp_orphan_cache;
+static struct timer_list tcp_orphan_timer;
+#define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100)
+
+static void tcp_orphan_update(struct timer_list *unused)
+{
+ WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum());
+ mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
+}
+
+static bool tcp_too_many_orphans(int shift)
+{
+ return READ_ONCE(tcp_orphan_cache) << shift > sysctl_tcp_max_orphans;
+}
+
bool tcp_check_oom(struct sock *sk, int shift)
{
bool too_many_orphans, out_of_socket_memory;
- too_many_orphans = tcp_too_many_orphans(sk, shift);
+ too_many_orphans = tcp_too_many_orphans(shift);
out_of_socket_memory = tcp_out_of_memory(sk);
if (too_many_orphans)
@@@ -2793,7 -2800,7 +2790,7 @@@ adjudge_to_death
/* remove backlog if any, without releasing ownership. */
__release_sock(sk);
- percpu_counter_inc(sk->sk_prot->orphan_count);
+ this_cpu_inc(tcp_orphan_count);
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
@@@ -2910,6 -2917,11 +2907,6 @@@ void tcp_write_queue_purge(struct sock
sk_wmem_free_skb(sk, skb);
}
tcp_rtx_queue_purge(sk);
- skb = sk->sk_tx_skb_cache;
- if (skb) {
- __kfree_skb(skb);
- sk->sk_tx_skb_cache = NULL;
- }
INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk));
@@@ -2946,6 -2958,10 +2943,6 @@@ int tcp_disconnect(struct sock *sk, in
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
- if (sk->sk_rx_skb_cache) {
- __kfree_skb(sk->sk_rx_skb_cache);
- sk->sk_rx_skb_cache = NULL;
- }
WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
tp->urg_data = 0;
tcp_write_queue_purge(sk);
@@@ -4486,10 -4502,7 +4483,10 @@@ void __init tcp_init(void
sizeof_field(struct sk_buff, cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
- percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
+
+ timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE);
+ mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
+
inet_hashinfo_init(&tcp_hashinfo);
inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
thash_entries, 21, /* one slot per 2 MB*/
diff --combined net/mac80211/mesh.c
index a4212a333d61,5dcfd53a4ab6..15ac08d111ea
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@@ -672,7 -672,7 +672,7 @@@ ieee80211_mesh_update_bss_params(struc
u8 *ie, u8 ie_len)
{
struct ieee80211_supported_band *sband;
- const u8 *cap;
+ const struct element *cap;
const struct ieee80211_he_operation *he_oper = NULL;
sband = ieee80211_get_sband(sdata);
@@@ -687,9 -687,10 +687,10 @@@
sdata->vif.bss_conf.he_support = true;
- cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
- if (cap && cap[1] >= ieee80211_he_oper_size(&cap[3]))
- he_oper = (void *)(cap + 3);
+ cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
+ if (cap && cap->datalen >= 1 + sizeof(*he_oper) &&
+ cap->datalen >= 1 + ieee80211_he_oper_size(cap->data + 1))
+ he_oper = (void *)(cap->data + 1);
if (he_oper)
sdata->vif.bss_conf.he_oper.params =
@@@ -1246,7 -1247,7 +1247,7 @@@ ieee80211_mesh_rx_probe_req(struct ieee
struct sk_buff *presp;
struct beacon_data *bcn;
struct ieee80211_mgmt *hdr;
- struct ieee802_11_elems elems;
+ struct ieee802_11_elems *elems;
size_t baselen;
u8 *pos;
@@@ -1255,24 -1256,22 +1256,24 @@@
if (baselen > len)
return;
- ieee802_11_parse_elems(pos, len - baselen, false, &elems, mgmt->bssid,
- NULL);
-
- if (!elems.mesh_id)
+ elems = ieee802_11_parse_elems(pos, len - baselen, false, mgmt->bssid,
+ NULL);
+ if (!elems)
return;
+ if (!elems->mesh_id)
+ goto free;
+
/* 802.11-2012 10.1.4.3.2 */
if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) &&
!is_broadcast_ether_addr(mgmt->da)) ||
- elems.ssid_len != 0)
- return;
+ elems->ssid_len != 0)
+ goto free;
- if (elems.mesh_id_len != 0 &&
- (elems.mesh_id_len != ifmsh->mesh_id_len ||
- memcmp(elems.mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len)))
- return;
+ if (elems->mesh_id_len != 0 &&
+ (elems->mesh_id_len != ifmsh->mesh_id_len ||
+ memcmp(elems->mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len)))
+ goto free;
rcu_read_lock();
bcn = rcu_dereference(ifmsh->beacon);
@@@ -1296,8 -1295,6 +1297,8 @@@
ieee80211_tx_skb(sdata, presp);
out:
rcu_read_unlock();
+free:
+ kfree(elems);
}
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
@@@ -1308,7 -1305,7 +1309,7 @@@
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee802_11_elems elems;
+ struct ieee802_11_elems *elems;
struct ieee80211_channel *channel;
size_t baselen;
int freq;
@@@ -1323,47 -1320,42 +1324,47 @@@
if (baselen > len)
return;
- ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
- false, &elems, mgmt->bssid, NULL);
+ elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable,
+ len - baselen,
+ false, mgmt->bssid, NULL);
+ if (!elems)
+ return;
/* ignore non-mesh or secure / unsecure mismatch */
- if ((!elems.mesh_id || !elems.mesh_config) ||
- (elems.rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) ||
- (!elems.rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE))
- return;
+ if ((!elems->mesh_id || !elems->mesh_config) ||
+ (elems->rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) ||
+ (!elems->rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE))
+ goto free;
- if (elems.ds_params)
- freq = ieee80211_channel_to_frequency(elems.ds_params[0], band);
+ if (elems->ds_params)
+ freq = ieee80211_channel_to_frequency(elems->ds_params[0], band);
else
freq = rx_status->freq;
channel = ieee80211_get_channel(local->hw.wiphy, freq);
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
- return;
+ goto free;
- if (mesh_matches_local(sdata, &elems)) {
+ if (mesh_matches_local(sdata, elems)) {
mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n",
sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal);
if (!sdata->u.mesh.user_mpm ||
sdata->u.mesh.mshcfg.rssi_threshold == 0 ||
sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal)
- mesh_neighbour_update(sdata, mgmt->sa, &elems,
+ mesh_neighbour_update(sdata, mgmt->sa, elems,
rx_status);
if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
!sdata->vif.csa_active)
- ieee80211_mesh_process_chnswitch(sdata, &elems, true);
+ ieee80211_mesh_process_chnswitch(sdata, elems, true);
}
if (ifmsh->sync_ops)
- ifmsh->sync_ops->rx_bcn_presp(sdata,
- stype, mgmt, &elems, rx_status);
+ ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, len,
+ elems->mesh_config, rx_status);
+free:
+ kfree(elems);
}
int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
@@@ -1455,7 -1447,7 +1456,7 @@@ static void mesh_rx_csa_frame(struct ie
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee802_11_elems elems;
+ struct ieee802_11_elems *elems;
u16 pre_value;
bool fwd_csa = true;
size_t baselen;
@@@ -1468,37 -1460,33 +1469,37 @@@
pos = mgmt->u.action.u.chan_switch.variable;
baselen = offsetof(struct ieee80211_mgmt,
u.action.u.chan_switch.variable);
- ieee802_11_parse_elems(pos, len - baselen, true, &elems,
- mgmt->bssid, NULL);
-
- if (!mesh_matches_local(sdata, &elems))
+ elems = ieee802_11_parse_elems(pos, len - baselen, true,
+ mgmt->bssid, NULL);
+ if (!elems)
return;
- ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl;
+ if (!mesh_matches_local(sdata, elems))
+ goto free;
+
+ ifmsh->chsw_ttl = elems->mesh_chansw_params_ie->mesh_ttl;
if (!--ifmsh->chsw_ttl)
fwd_csa = false;
- pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value);
+ pre_value = le16_to_cpu(elems->mesh_chansw_params_ie->mesh_pre_value);
if (ifmsh->pre_value >= pre_value)
- return;
+ goto free;
ifmsh->pre_value = pre_value;
if (!sdata->vif.csa_active &&
- !ieee80211_mesh_process_chnswitch(sdata, &elems, false)) {
+ !ieee80211_mesh_process_chnswitch(sdata, elems, false)) {
mcsa_dbg(sdata, "Failed to process CSA action frame");
- return;
+ goto free;
}
/* forward or re-broadcast the CSA frame */
if (fwd_csa) {
- if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0)
+ if (mesh_fwd_csa_frame(sdata, mgmt, len, elems) < 0)
mcsa_dbg(sdata, "Failed to forward the CSA frame");
}
+free:
+ kfree(elems);
}
static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
diff --combined net/mptcp/options.c
index 422f4acfb3e6,f0f22eb4fd5f..7c3420afb1a0
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@@ -485,11 -485,11 +485,11 @@@ static bool mptcp_established_options_m
mpext = mptcp_get_ext(skb);
data_len = mpext ? mpext->data_len : 0;
- /* we will check ext_copy.data_len in mptcp_write_options() to
+ /* we will check ops->data_len in mptcp_write_options() to
* discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and
* TCPOLEN_MPTCP_MPC_ACK
*/
- opts->ext_copy.data_len = data_len;
+ opts->data_len = data_len;
opts->suboptions = OPTION_MPTCP_MPC_ACK;
opts->sndr_key = subflow->local_key;
opts->rcvr_key = subflow->remote_key;
@@@ -505,9 -505,9 +505,9 @@@
len = TCPOLEN_MPTCP_MPC_ACK_DATA;
if (opts->csum_reqd) {
/* we need to propagate more info to csum the pseudo hdr */
- opts->ext_copy.data_seq = mpext->data_seq;
- opts->ext_copy.subflow_seq = mpext->subflow_seq;
- opts->ext_copy.csum = mpext->csum;
+ opts->data_seq = mpext->data_seq;
+ opts->subflow_seq = mpext->subflow_seq;
+ opts->csum = mpext->csum;
len += TCPOLEN_MPTCP_DSS_CHECKSUM;
}
*size = ALIGN(len, 4);
@@@ -748,7 -748,9 +748,7 @@@ static bool mptcp_established_options_m
/* can't send MP_PRIO with MPC, as they share the same option space:
* 'backup'. Also it makes no sense at all
*/
- if (!subflow->send_mp_prio ||
- ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK |
- OPTION_MPTCP_MPC_ACK) & opts->suboptions))
+ if (!subflow->send_mp_prio || (opts->suboptions & OPTIONS_MPTCP_MPC))
return false;
/* account for the trailing 'nop' option */
@@@ -1017,9 -1019,11 +1017,9 @@@ static void ack_update_msk(struct mptcp
old_snd_una = msk->snd_una;
new_snd_una = mptcp_expand_seq(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
- /* ACK for data not even sent yet and even above recovery bound? Ignore.*/
- if (unlikely(after64(new_snd_una, snd_nxt))) {
- if (!msk->recovery || after64(new_snd_una, msk->recovery_snd_nxt))
- new_snd_una = old_snd_una;
- }
+ /* ACK for data not even sent yet? Ignore.*/
+ if (unlikely(after64(new_snd_una, snd_nxt)))
+ new_snd_una = old_snd_una;
new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
@@@ -1223,7 -1227,7 +1223,7 @@@ static void mptcp_set_rwin(const struc
WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
}
- static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
+ static u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __sum16 sum)
{
struct csum_pseudo_header header;
__wsum csum;
@@@ -1233,15 -1237,21 +1233,21 @@@
* always the 64-bit value, irrespective of what length is used in the
* DSS option itself.
*/
- header.data_seq = cpu_to_be64(mpext->data_seq);
- header.subflow_seq = htonl(mpext->subflow_seq);
- header.data_len = htons(mpext->data_len);
+ header.data_seq = cpu_to_be64(data_seq);
+ header.subflow_seq = htonl(subflow_seq);
+ header.data_len = htons(data_len);
header.csum = 0;
- csum = csum_partial(&header, sizeof(header), ~csum_unfold(mpext->csum));
+ csum = csum_partial(&header, sizeof(header), ~csum_unfold(sum));
return (__force u16)csum_fold(csum);
}
+ static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
+ {
+ return __mptcp_make_csum(mpext->data_seq, mpext->subflow_seq, mpext->data_len,
+ mpext->csum);
+ }
+
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
struct mptcp_out_options *opts)
{
@@@ -1325,14 -1335,15 +1331,14 @@@
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
}
}
- } else if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK |
- OPTION_MPTCP_MPC_ACK) & opts->suboptions) {
+ } else if (OPTIONS_MPTCP_MPC & opts->suboptions) {
u8 len, flag = MPTCP_CAP_HMAC_SHA256;
if (OPTION_MPTCP_MPC_SYN & opts->suboptions) {
len = TCPOLEN_MPTCP_MPC_SYN;
} else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) {
len = TCPOLEN_MPTCP_MPC_SYNACK;
- } else if (opts->ext_copy.data_len) {
+ } else if (opts->data_len) {
len = TCPOLEN_MPTCP_MPC_ACK_DATA;
if (opts->csum_reqd)
len += TCPOLEN_MPTCP_DSS_CHECKSUM;
@@@ -1361,14 -1372,17 +1367,17 @@@
put_unaligned_be64(opts->rcvr_key, ptr);
ptr += 2;
- if (!opts->ext_copy.data_len)
+ if (!opts->data_len)
goto mp_capable_done;
if (opts->csum_reqd) {
- put_unaligned_be32(opts->ext_copy.data_len << 16 |
- mptcp_make_csum(&opts->ext_copy), ptr);
+ put_unaligned_be32(opts->data_len << 16 |
+ __mptcp_make_csum(opts->data_seq,
+ opts->subflow_seq,
+ opts->data_len,
+ opts->csum), ptr);
} else {
- put_unaligned_be32(opts->ext_copy.data_len << 16 |
+ put_unaligned_be32(opts->data_len << 16 |
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
}
ptr += 1;
diff --combined net/smc/af_smc.c
index 5e50e007a7da,78b663dbfa1f..8dc34388b2c1
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@@ -439,47 -439,6 +439,47 @@@ static int smcr_clnt_conf_first_link(st
return 0;
}
+static bool smc_isascii(char *hostname)
+{
+ int i;
+
+ for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
+ if (!isascii(hostname[i]))
+ return false;
+ return true;
+}
+
+static void smc_conn_save_peer_info_fce(struct smc_sock *smc,
+ struct smc_clc_msg_accept_confirm *clc)
+{
+ struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
+ (struct smc_clc_msg_accept_confirm_v2 *)clc;
+ struct smc_clc_first_contact_ext *fce;
+ int clc_v2_len;
+
+ if (clc->hdr.version == SMC_V1 ||
+ !(clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK))
+ return;
+
+ if (smc->conn.lgr->is_smcd) {
+ memcpy(smc->conn.lgr->negotiated_eid, clc_v2->d1.eid,
+ SMC_MAX_EID_LEN);
+ clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2,
+ d1);
+ } else {
+ memcpy(smc->conn.lgr->negotiated_eid, clc_v2->r1.eid,
+ SMC_MAX_EID_LEN);
+ clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2,
+ r1);
+ }
+ fce = (struct smc_clc_first_contact_ext *)(((u8 *)clc_v2) + clc_v2_len);
+ smc->conn.lgr->peer_os = fce->os_type;
+ smc->conn.lgr->peer_smc_release = fce->release;
+ if (smc_isascii(fce->hostname))
+ memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
+ SMC_MAX_HOSTNAME_LEN);
+}
+
static void smcr_conn_save_peer_info(struct smc_sock *smc,
struct smc_clc_msg_accept_confirm *clc)
{
@@@ -492,6 -451,16 +492,6 @@@
smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
}
-static bool smc_isascii(char *hostname)
-{
- int i;
-
- for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
- if (!isascii(hostname[i]))
- return false;
- return true;
-}
-
static void smcd_conn_save_peer_info(struct smc_sock *smc,
struct smc_clc_msg_accept_confirm *clc)
{
@@@ -503,6 -472,22 +503,6 @@@
smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
- if (clc->hdr.version > SMC_V1 &&
- (clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) {
- struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
- (struct smc_clc_msg_accept_confirm_v2 *)clc;
- struct smc_clc_first_contact_ext *fce =
- (struct smc_clc_first_contact_ext *)
- (((u8 *)clc_v2) + sizeof(*clc_v2));
-
- memcpy(smc->conn.lgr->negotiated_eid, clc_v2->eid,
- SMC_MAX_EID_LEN);
- smc->conn.lgr->peer_os = fce->os_type;
- smc->conn.lgr->peer_smc_release = fce->release;
- if (smc_isascii(fce->hostname))
- memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
- SMC_MAX_HOSTNAME_LEN);
- }
}
static void smc_conn_save_peer_info(struct smc_sock *smc,
@@@ -512,16 -497,14 +512,16 @@@
smcd_conn_save_peer_info(smc, clc);
else
smcr_conn_save_peer_info(smc, clc);
+ smc_conn_save_peer_info_fce(smc, clc);
}
static void smc_link_save_peer_info(struct smc_link *link,
- struct smc_clc_msg_accept_confirm *clc)
+ struct smc_clc_msg_accept_confirm *clc,
+ struct smc_init_info *ini)
{
link->peer_qpn = ntoh24(clc->r0.qpn);
- memcpy(link->peer_gid, clc->r0.lcl.gid, SMC_GID_SIZE);
- memcpy(link->peer_mac, clc->r0.lcl.mac, sizeof(link->peer_mac));
+ memcpy(link->peer_gid, ini->peer_gid, SMC_GID_SIZE);
+ memcpy(link->peer_mac, ini->peer_mac, sizeof(link->peer_mac));
link->peer_psn = ntoh24(clc->r0.psn);
link->peer_mtu = clc->r0.qp_mtu;
}
@@@ -625,9 -608,7 +625,9 @@@ static int smc_find_rdma_device(struct
* used for the internal TCP socket
*/
smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
- if (!ini->ib_dev)
+ if (!ini->check_smcrv2 && !ini->ib_dev)
+ return SMC_CLC_DECL_NOSMCRDEV;
+ if (ini->check_smcrv2 && !ini->smcrv2.ib_dev_v2)
return SMC_CLC_DECL_NOSMCRDEV;
return 0;
}
@@@ -711,42 -692,27 +711,42 @@@ static int smc_find_proposal_devices(st
int rc = 0;
/* check if there is an ism device available */
- if (ini->smcd_version & SMC_V1) {
- if (smc_find_ism_device(smc, ini) ||
- smc_connect_ism_vlan_setup(smc, ini)) {
- if (ini->smc_type_v1 == SMC_TYPE_B)
- ini->smc_type_v1 = SMC_TYPE_R;
- else
- ini->smc_type_v1 = SMC_TYPE_N;
- } /* else ISM V1 is supported for this connection */
- if (smc_find_rdma_device(smc, ini)) {
- if (ini->smc_type_v1 == SMC_TYPE_B)
- ini->smc_type_v1 = SMC_TYPE_D;
- else
- ini->smc_type_v1 = SMC_TYPE_N;
- } /* else RDMA is supported for this connection */
- }
- if (smc_ism_is_v2_capable() && smc_find_ism_v2_device_clnt(smc, ini))
- ini->smc_type_v2 = SMC_TYPE_N;
+ if (!(ini->smcd_version & SMC_V1) ||
+ smc_find_ism_device(smc, ini) ||
+ smc_connect_ism_vlan_setup(smc, ini))
+ ini->smcd_version &= ~SMC_V1;
+ /* else ISM V1 is supported for this connection */
+
+ /* check if there is an rdma device available */
+ if (!(ini->smcr_version & SMC_V1) ||
+ smc_find_rdma_device(smc, ini))
+ ini->smcr_version &= ~SMC_V1;
+ /* else RDMA is supported for this connection */
+
+ ini->smc_type_v1 = smc_indicated_type(ini->smcd_version & SMC_V1,
+ ini->smcr_version & SMC_V1);
+
+ /* check if there is an ism v2 device available */
+ if (!(ini->smcd_version & SMC_V2) ||
+ !smc_ism_is_v2_capable() ||
+ smc_find_ism_v2_device_clnt(smc, ini))
+ ini->smcd_version &= ~SMC_V2;
+
+ /* check if there is an rdma v2 device available */
+ ini->check_smcrv2 = true;
+ ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr;
+ if (!(ini->smcr_version & SMC_V2) ||
+ smc->clcsock->sk->sk_family != AF_INET ||
+ !smc_clc_ueid_count() ||
+ smc_find_rdma_device(smc, ini))
+ ini->smcr_version &= ~SMC_V2;
+ ini->check_smcrv2 = false;
+
+ ini->smc_type_v2 = smc_indicated_type(ini->smcd_version & SMC_V2,
+ ini->smcr_version & SMC_V2);
/* if neither ISM nor RDMA are supported, fallback */
- if (!smcr_indicated(ini->smc_type_v1) &&
- ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
+ if (ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
rc = SMC_CLC_DECL_NOSMCDEV;
return rc;
@@@ -786,64 -752,6 +786,64 @@@ static int smc_connect_clc(struct smc_s
SMC_CLC_ACCEPT, CLC_WAIT_TIME);
}
+void smc_fill_gid_list(struct smc_link_group *lgr,
+ struct smc_gidlist *gidlist,
+ struct smc_ib_device *known_dev, u8 *known_gid)
+{
+ struct smc_init_info *alt_ini = NULL;
+
+ memset(gidlist, 0, sizeof(*gidlist));
+ memcpy(gidlist->list[gidlist->len++], known_gid, SMC_GID_SIZE);
+
+ alt_ini = kzalloc(sizeof(*alt_ini), GFP_KERNEL);
+ if (!alt_ini)
+ goto out;
+
+ alt_ini->vlan_id = lgr->vlan_id;
+ alt_ini->check_smcrv2 = true;
+ alt_ini->smcrv2.saddr = lgr->saddr;
+ smc_pnet_find_alt_roce(lgr, alt_ini, known_dev);
+
+ if (!alt_ini->smcrv2.ib_dev_v2)
+ goto out;
+
+ memcpy(gidlist->list[gidlist->len++], alt_ini->smcrv2.ib_gid_v2,
+ SMC_GID_SIZE);
+
+out:
+ kfree(alt_ini);
+}
+
+static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
+ struct smc_clc_msg_accept_confirm *aclc,
+ struct smc_init_info *ini)
+{
+ struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
+ (struct smc_clc_msg_accept_confirm_v2 *)aclc;
+ struct smc_clc_first_contact_ext *fce =
+ (struct smc_clc_first_contact_ext *)
+ (((u8 *)clc_v2) + sizeof(*clc_v2));
+
+ if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
+ return 0;
+
+ if (fce->v2_direct) {
+ memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
+ ini->smcrv2.uses_gateway = false;
+ } else {
+ if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr,
+ smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
+ ini->smcrv2.nexthop_mac,
+ &ini->smcrv2.uses_gateway))
+ return SMC_CLC_DECL_NOROUTE;
+ if (!ini->smcrv2.uses_gateway) {
+ /* mismatch: peer claims indirect, but its direct */
+ return SMC_CLC_DECL_NOINDIRECT;
+ }
+ }
+ return 0;
+}
+
/* setup for RDMA connection of client */
static int smc_connect_rdma(struct smc_sock *smc,
struct smc_clc_msg_accept_confirm *aclc,
@@@ -851,18 -759,11 +851,18 @@@
{
int i, reason_code = 0;
struct smc_link *link;
+ u8 *eid = NULL;
ini->is_smcd = false;
- ini->ib_lcl = &aclc->r0.lcl;
ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
+ memcpy(ini->peer_systemid, aclc->r0.lcl.id_for_peer, SMC_SYSTEMID_LEN);
+ memcpy(ini->peer_gid, aclc->r0.lcl.gid, SMC_GID_SIZE);
+ memcpy(ini->peer_mac, aclc->r0.lcl.mac, ETH_ALEN);
+
+ reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini);
+ if (reason_code)
+ return reason_code;
mutex_lock(&smc_client_lgr_pending);
reason_code = smc_conn_create(smc, ini);
@@@ -884,9 -785,8 +884,9 @@@
if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
!memcmp(l->peer_gid, &aclc->r0.lcl.gid,
SMC_GID_SIZE) &&
- !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
- sizeof(l->peer_mac))) {
+ (aclc->hdr.version > SMC_V1 ||
+ !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
+ sizeof(l->peer_mac)))) {
link = l;
break;
}
@@@ -905,7 -805,7 +905,7 @@@
}
if (ini->first_contact_local)
- smc_link_save_peer_info(link, aclc);
+ smc_link_save_peer_info(link, aclc, ini);
if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
reason_code = SMC_CLC_DECL_ERR_RTOK;
@@@ -928,18 -828,8 +928,18 @@@
}
smc_rmb_sync_sg_for_device(&smc->conn);
+ if (aclc->hdr.version > SMC_V1) {
+ struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
+ (struct smc_clc_msg_accept_confirm_v2 *)aclc;
+
+ eid = clc_v2->r1.eid;
+ if (ini->first_contact_local)
+ smc_fill_gid_list(link->lgr, &ini->smcrv2.gidlist,
+ link->smcibdev, link->gid);
+ }
+
reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
- SMC_V1);
+ aclc->hdr.version, eid, ini);
if (reason_code)
goto connect_abort;
@@@ -979,7 -869,7 +979,7 @@@ smc_v2_determine_accepted_chid(struct s
int i;
for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
- if (ini->ism_chid[i] == ntohs(aclc->chid)) {
+ if (ini->ism_chid[i] == ntohs(aclc->d1.chid)) {
ini->ism_selected = i;
return 0;
}
@@@ -993,7 -883,6 +993,7 @@@ static int smc_connect_ism(struct smc_s
struct smc_clc_msg_accept_confirm *aclc,
struct smc_init_info *ini)
{
+ u8 *eid = NULL;
int rc = 0;
ini->is_smcd = true;
@@@ -1029,15 -918,8 +1029,15 @@@
smc_rx_init(smc);
smc_tx_init(smc);
+ if (aclc->hdr.version > SMC_V1) {
+ struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
+ (struct smc_clc_msg_accept_confirm_v2 *)aclc;
+
+ eid = clc_v2->d1.eid;
+ }
+
rc = smc_clc_send_confirm(smc, ini->first_contact_local,
- aclc->hdr.version);
+ aclc->hdr.version, eid, NULL);
if (rc)
goto connect_abort;
mutex_unlock(&smc_server_lgr_pending);
@@@ -1060,24 -942,17 +1060,24 @@@ connect_abort
static int smc_connect_check_aclc(struct smc_init_info *ini,
struct smc_clc_msg_accept_confirm *aclc)
{
- if ((aclc->hdr.typev1 == SMC_TYPE_R &&
- !smcr_indicated(ini->smc_type_v1)) ||
- (aclc->hdr.typev1 == SMC_TYPE_D &&
- ((!smcd_indicated(ini->smc_type_v1) &&
- !smcd_indicated(ini->smc_type_v2)) ||
- (aclc->hdr.version == SMC_V1 &&
- !smcd_indicated(ini->smc_type_v1)) ||
- (aclc->hdr.version == SMC_V2 &&
- !smcd_indicated(ini->smc_type_v2)))))
+ if (aclc->hdr.typev1 != SMC_TYPE_R &&
+ aclc->hdr.typev1 != SMC_TYPE_D)
return SMC_CLC_DECL_MODEUNSUPP;
+ if (aclc->hdr.version >= SMC_V2) {
+ if ((aclc->hdr.typev1 == SMC_TYPE_R &&
+ !smcr_indicated(ini->smc_type_v2)) ||
+ (aclc->hdr.typev1 == SMC_TYPE_D &&
+ !smcd_indicated(ini->smc_type_v2)))
+ return SMC_CLC_DECL_MODEUNSUPP;
+ } else {
+ if ((aclc->hdr.typev1 == SMC_TYPE_R &&
+ !smcr_indicated(ini->smc_type_v1)) ||
+ (aclc->hdr.typev1 == SMC_TYPE_D &&
+ !smcd_indicated(ini->smc_type_v1)))
+ return SMC_CLC_DECL_MODEUNSUPP;
+ }
+
return 0;
}
@@@ -1108,15 -983,14 +1108,15 @@@ static int __smc_connect(struct smc_soc
return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
version);
- ini->smcd_version = SMC_V1;
- ini->smcd_version |= smc_ism_is_v2_capable() ? SMC_V2 : 0;
+ ini->smcd_version = SMC_V1 | SMC_V2;
+ ini->smcr_version = SMC_V1 | SMC_V2;
ini->smc_type_v1 = SMC_TYPE_B;
- ini->smc_type_v2 = smc_ism_is_v2_capable() ? SMC_TYPE_D : SMC_TYPE_N;
+ ini->smc_type_v2 = SMC_TYPE_B;
/* get vlan id from IP device */
if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
ini->smcd_version &= ~SMC_V1;
+ ini->smcr_version = 0;
ini->smc_type_v1 = SMC_TYPE_N;
if (!ini->smcd_version) {
rc = SMC_CLC_DECL_GETVLANERR;
@@@ -1144,17 -1018,15 +1144,17 @@@
/* check if smc modes and versions of CLC proposal and accept match */
rc = smc_connect_check_aclc(ini, aclc);
version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
- ini->smcd_version = version;
if (rc)
goto vlan_cleanup;
/* depending on previous steps, connect using rdma or ism */
- if (aclc->hdr.typev1 == SMC_TYPE_R)
+ if (aclc->hdr.typev1 == SMC_TYPE_R) {
+ ini->smcr_version = version;
rc = smc_connect_rdma(smc, aclc, ini);
- else if (aclc->hdr.typev1 == SMC_TYPE_D)
+ } else if (aclc->hdr.typev1 == SMC_TYPE_D) {
+ ini->smcd_version = version;
rc = smc_connect_ism(smc, aclc, ini);
+ }
if (rc)
goto vlan_cleanup;
@@@ -1185,7 -1057,7 +1185,7 @@@ static void smc_connect_work(struct wor
if (smc->clcsock->sk->sk_err) {
smc->sk.sk_err = smc->clcsock->sk->sk_err;
} else if ((1 << smc->clcsock->sk->sk_state) &
- (TCPF_SYN_SENT | TCP_SYN_RECV)) {
+ (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
if ((rc == -EPIPE) &&
((1 << smc->clcsock->sk->sk_state) &
@@@ -1435,7 -1307,7 +1435,7 @@@ static int smcr_serv_conf_first_link(st
smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
/* initial contact - try to establish second link */
- smc_llc_srv_add_link(link);
+ smc_llc_srv_add_link(link, NULL);
return 0;
}
@@@ -1515,48 -1387,33 +1515,48 @@@ static int smc_listen_v2_check(struct s
ini->smc_type_v1 = pclc->hdr.typev1;
ini->smc_type_v2 = pclc->hdr.typev2;
- ini->smcd_version = ini->smc_type_v1 != SMC_TYPE_N ? SMC_V1 : 0;
- if (pclc->hdr.version > SMC_V1)
- ini->smcd_version |=
- ini->smc_type_v2 != SMC_TYPE_N ? SMC_V2 : 0;
- if (!(ini->smcd_version & SMC_V2)) {
+ ini->smcd_version = smcd_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
+ ini->smcr_version = smcr_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
+ if (pclc->hdr.version > SMC_V1) {
+ if (smcd_indicated(ini->smc_type_v2))
+ ini->smcd_version |= SMC_V2;
+ if (smcr_indicated(ini->smc_type_v2))
+ ini->smcr_version |= SMC_V2;
+ }
+ if (!(ini->smcd_version & SMC_V2) && !(ini->smcr_version & SMC_V2)) {
rc = SMC_CLC_DECL_PEERNOSMC;
goto out;
}
- if (!smc_ism_is_v2_capable()) {
- ini->smcd_version &= ~SMC_V2;
- rc = SMC_CLC_DECL_NOISM2SUPP;
- goto out;
- }
pclc_v2_ext = smc_get_clc_v2_ext(pclc);
if (!pclc_v2_ext) {
ini->smcd_version &= ~SMC_V2;
+ ini->smcr_version &= ~SMC_V2;
rc = SMC_CLC_DECL_NOV2EXT;
goto out;
}
pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
- if (!pclc_smcd_v2_ext) {
- ini->smcd_version &= ~SMC_V2;
- rc = SMC_CLC_DECL_NOV2DEXT;
+ if (ini->smcd_version & SMC_V2) {
+ if (!smc_ism_is_v2_capable()) {
+ ini->smcd_version &= ~SMC_V2;
+ rc = SMC_CLC_DECL_NOISM2SUPP;
+ } else if (!pclc_smcd_v2_ext) {
+ ini->smcd_version &= ~SMC_V2;
+ rc = SMC_CLC_DECL_NOV2DEXT;
+ } else if (!pclc_v2_ext->hdr.eid_cnt &&
+ !pclc_v2_ext->hdr.flag.seid) {
+ ini->smcd_version &= ~SMC_V2;
+ rc = SMC_CLC_DECL_NOUEID;
+ }
+ }
+ if (ini->smcr_version & SMC_V2) {
+ if (!pclc_v2_ext->hdr.eid_cnt) {
+ ini->smcr_version &= ~SMC_V2;
+ rc = SMC_CLC_DECL_NOUEID;
+ }
}
out:
- if (!ini->smcd_version)
+ if (!ini->smcd_version && !ini->smcr_version)
return rc;
return 0;
@@@ -1676,6 -1533,11 +1676,6 @@@ static void smc_find_ism_v2_device_serv
pclc_smcd = smc_get_clc_msg_smcd(pclc);
smc_v2_ext = smc_get_clc_v2_ext(pclc);
smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
- if (!smcd_v2_ext ||
- !smc_v2_ext->hdr.flag.seid) { /* no system EID support for SMCD */
- smc_find_ism_store_rc(SMC_CLC_DECL_NOSEID, ini);
- goto not_found;
- }
mutex_lock(&smcd_dev_list.mutex);
if (pclc_smcd->ism.chid)
@@@ -1693,16 -1555,14 +1693,16 @@@
}
mutex_unlock(&smcd_dev_list.mutex);
- if (ini->ism_dev[0]) {
- smc_ism_get_system_eid(ini->ism_dev[0], &eid);
- if (memcmp(eid, smcd_v2_ext->system_eid, SMC_MAX_EID_LEN))
- goto not_found;
- } else {
+ if (!ini->ism_dev[0]) {
+ smc_find_ism_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini);
goto not_found;
}
+ smc_ism_get_system_eid(&eid);
+ if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext,
+ smcd_v2_ext->system_eid, eid))
+ goto not_found;
+
/* separate - outside the smcd_dev_list.lock */
smcd_version = ini->smcd_version;
for (i = 0; i < matches; i++) {
@@@ -1719,7 -1579,6 +1719,7 @@@
}
/* no V2 ISM device could be initialized */
ini->smcd_version = smcd_version; /* restore original value */
+ ini->negotiated_eid[0] = 0;
not_found:
ini->smcd_version &= ~SMC_V2;
@@@ -1749,7 -1608,6 +1749,7 @@@ static void smc_find_ism_v1_device_serv
not_found:
smc_find_ism_store_rc(rc, ini);
+ ini->smcd_version &= ~SMC_V1;
ini->ism_dev[0] = NULL;
ini->is_smcd = false;
}
@@@ -1768,69 -1626,24 +1768,69 @@@ static int smc_listen_rdma_reg(struct s
return 0;
}
+static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
+ struct smc_clc_msg_proposal *pclc,
+ struct smc_init_info *ini)
+{
+ struct smc_clc_v2_extension *smc_v2_ext;
+ u8 smcr_version;
+ int rc;
+
+ if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2))
+ goto not_found;
+
+ smc_v2_ext = smc_get_clc_v2_ext(pclc);
+ if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
+ goto not_found;
+
+ /* prepare RDMA check */
+ memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
+ memcpy(ini->peer_gid, smc_v2_ext->roce, SMC_GID_SIZE);
+ memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
+ ini->check_smcrv2 = true;
+ ini->smcrv2.clc_sk = new_smc->clcsock->sk;
+ ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr;
+ ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce);
+ rc = smc_find_rdma_device(new_smc, ini);
+ if (rc) {
+ smc_find_ism_store_rc(rc, ini);
+ goto not_found;
+ }
+ if (!ini->smcrv2.uses_gateway)
+ memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN);
+
+ smcr_version = ini->smcr_version;
+ ini->smcr_version = SMC_V2;
+ rc = smc_listen_rdma_init(new_smc, ini);
+ if (!rc)
+ rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
+ if (!rc)
+ return;
+ ini->smcr_version = smcr_version;
+ smc_find_ism_store_rc(rc, ini);
+
+not_found:
+ ini->smcr_version &= ~SMC_V2;
+ ini->check_smcrv2 = false;
+}
+
static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
struct smc_clc_msg_proposal *pclc,
struct smc_init_info *ini)
{
int rc;
- if (!smcr_indicated(ini->smc_type_v1))
+ if (!(ini->smcr_version & SMC_V1) || !smcr_indicated(ini->smc_type_v1))
return SMC_CLC_DECL_NOSMCDEV;
/* prepare RDMA check */
- ini->ib_lcl = &pclc->lcl;
+ memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
+ memcpy(ini->peer_gid, pclc->lcl.gid, SMC_GID_SIZE);
+ memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
rc = smc_find_rdma_device(new_smc, ini);
if (rc) {
/* no RDMA device found */
- if (ini->smc_type_v1 == SMC_TYPE_B)
- /* neither ISM nor RDMA device found */
- rc = SMC_CLC_DECL_NOSMCDEV;
- return rc;
+ return SMC_CLC_DECL_NOSMCDEV;
}
rc = smc_listen_rdma_init(new_smc, ini);
if (rc)
@@@ -1843,60 -1656,51 +1843,60 @@@ static int smc_listen_find_device(struc
struct smc_clc_msg_proposal *pclc,
struct smc_init_info *ini)
{
- int rc;
+ int prfx_rc;
/* check for ISM device matching V2 proposed device */
smc_find_ism_v2_device_serv(new_smc, pclc, ini);
if (ini->ism_dev[0])
return 0;
- if (!(ini->smcd_version & SMC_V1))
- return ini->rc ?: SMC_CLC_DECL_NOSMCD2DEV;
-
- /* check for matching IP prefix and subnet length */
- rc = smc_listen_prfx_check(new_smc, pclc);
- if (rc)
- return ini->rc ?: rc;
+ /* check for matching IP prefix and subnet length (V1) */
+ prfx_rc = smc_listen_prfx_check(new_smc, pclc);
+ if (prfx_rc)
+ smc_find_ism_store_rc(prfx_rc, ini);
/* get vlan id from IP device */
if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
/* check for ISM device matching V1 proposed device */
- smc_find_ism_v1_device_serv(new_smc, pclc, ini);
+ if (!prfx_rc)
+ smc_find_ism_v1_device_serv(new_smc, pclc, ini);
if (ini->ism_dev[0])
return 0;
- if (pclc->hdr.typev1 == SMC_TYPE_D)
+ if (!smcr_indicated(pclc->hdr.typev1) &&
+ !smcr_indicated(pclc->hdr.typev2))
/* skip RDMA and decline */
return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
- /* check if RDMA is available */
- rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
- smc_find_ism_store_rc(rc, ini);
+ /* check if RDMA V2 is available */
+ smc_find_rdma_v2_device_serv(new_smc, pclc, ini);
+ if (ini->smcrv2.ib_dev_v2)
+ return 0;
- return (!rc) ? 0 : ini->rc;
+ /* check if RDMA V1 is available */
+ if (!prfx_rc) {
+ int rc;
+
+ rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
+ smc_find_ism_store_rc(rc, ini);
+ return (!rc) ? 0 : ini->rc;
+ }
+ return SMC_CLC_DECL_NOSMCDEV;
}
/* listen worker: finish RDMA setup */
static int smc_listen_rdma_finish(struct smc_sock *new_smc,
struct smc_clc_msg_accept_confirm *cclc,
- bool local_first)
+ bool local_first,
+ struct smc_init_info *ini)
{
struct smc_link *link = new_smc->conn.lnk;
int reason_code = 0;
if (local_first)
- smc_link_save_peer_info(link, cclc);
+ smc_link_save_peer_info(link, cclc, ini);
if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
return SMC_CLC_DECL_ERR_RTOK;
@@@ -1917,13 -1721,12 +1917,13 @@@ static void smc_listen_work(struct work
{
struct smc_sock *new_smc = container_of(work, struct smc_sock,
smc_listen_work);
- u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
struct socket *newclcsock = new_smc->clcsock;
struct smc_clc_msg_accept_confirm *cclc;
struct smc_clc_msg_proposal_area *buf;
struct smc_clc_msg_proposal *pclc;
struct smc_init_info *ini = NULL;
+ u8 proposal_version = SMC_V1;
+ u8 accept_version;
int rc = 0;
if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
@@@ -1954,9 -1757,7 +1954,9 @@@
SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
if (rc)
goto out_decl;
- version = pclc->hdr.version == SMC_V1 ? SMC_V1 : version;
+
+ if (pclc->hdr.version > SMC_V1)
+ proposal_version = SMC_V2;
/* IPSec connections opt out of SMC optimizations */
if (using_ipsec(new_smc)) {
@@@ -1986,9 -1787,8 +1986,9 @@@
goto out_unlock;
/* send SMC Accept CLC message */
+ accept_version = ini->is_smcd ? ini->smcd_version : ini->smcr_version;
rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
- ini->smcd_version == SMC_V2 ? SMC_V2 : SMC_V1);
+ accept_version, ini->negotiated_eid);
if (rc)
goto out_unlock;
@@@ -2010,7 -1810,7 +2010,7 @@@
/* finish worker */
if (!ini->is_smcd) {
rc = smc_listen_rdma_finish(new_smc, cclc,
- ini->first_contact_local);
+ ini->first_contact_local, ini);
if (rc)
goto out_unlock;
mutex_unlock(&smc_server_lgr_pending);
@@@ -2024,7 -1824,7 +2024,7 @@@ out_unlock
mutex_unlock(&smc_server_lgr_pending);
out_decl:
smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
- version);
+ proposal_version);
out_free:
kfree(ini);
kfree(buf);
@@@ -2862,7 -2662,6 +2862,7 @@@ static void __exit smc_exit(void
proto_unregister(&smc_proto);
smc_pnet_exit();
smc_nl_exit();
+ smc_clc_exit();
unregister_pernet_subsys(&smc_net_stat_ops);
unregister_pernet_subsys(&smc_net_ops);
rcu_barrier();
diff --combined net/smc/smc_llc.c
index a9623c952007,f1d323439a2a..b102680296b8
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@@ -23,24 -23,16 +23,24 @@@
struct smc_llc_hdr {
struct smc_wr_rx_hdr common;
- u8 length; /* 44 */
-#if defined(__BIG_ENDIAN_BITFIELD)
- u8 reserved:4,
- add_link_rej_rsn:4;
+ union {
+ struct {
+ u8 length; /* 44 */
+ #if defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:4,
+ add_link_rej_rsn:4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u8 add_link_rej_rsn:4,
- reserved:4;
+ u8 add_link_rej_rsn:4,
+ reserved:4;
#endif
+ };
+ u16 length_v2; /* 44 - 8192*/
+ };
u8 flags;
-};
+} __packed; /* format defined in
+ * IBM Shared Memory Communications Version 2
+ * (https://www.ibm.com/support/pages/node/6326337)
+ */
#define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03
@@@ -84,32 -76,6 +84,32 @@@ struct smc_llc_msg_add_link_cont_rt
__be64 rmb_vaddr_new;
};
+struct smc_llc_msg_add_link_v2_ext {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 v2_direct : 1,
+ reserved : 7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 reserved : 7,
+ v2_direct : 1;
+#endif
+ u8 reserved2;
+ u8 client_target_gid[SMC_GID_SIZE];
+ u8 reserved3[8];
+ u16 num_rkeys;
+ struct smc_llc_msg_add_link_cont_rt rt[];
+} __packed; /* format defined in
+ * IBM Shared Memory Communications Version 2
+ * (https://www.ibm.com/support/pages/node/6326337)
+ */
+
+struct smc_llc_msg_req_add_link_v2 {
+ struct smc_llc_hdr hd;
+ u8 reserved[20];
+ u8 gid_cnt;
+ u8 reserved2[3];
+ u8 gid[][SMC_GID_SIZE];
+};
+
#define SMC_LLC_RKEYS_PER_CONT_MSG 2
struct smc_llc_msg_add_link_cont { /* type 0x03 */
@@@ -148,8 -114,7 +148,8 @@@ struct smc_rmb_rtoken
__be64 rmb_vaddr;
} __packed; /* format defined in RFC7609 */
-#define SMC_LLC_RKEYS_PER_MSG 3
+#define SMC_LLC_RKEYS_PER_MSG 3
+#define SMC_LLC_RKEYS_PER_MSG_V2 255
struct smc_llc_msg_confirm_rkey { /* type 0x06 */
struct smc_llc_hdr hd;
@@@ -170,18 -135,9 +170,18 @@@ struct smc_llc_msg_delete_rkey { /* typ
u8 reserved2[4];
};
+struct smc_llc_msg_delete_rkey_v2 { /* type 0x29 */
+ struct smc_llc_hdr hd;
+ u8 num_rkeys;
+ u8 num_inval_rkeys;
+ u8 reserved[2];
+ __be32 rkey[];
+};
+
union smc_llc_msg {
struct smc_llc_msg_confirm_link confirm_link;
struct smc_llc_msg_add_link add_link;
+ struct smc_llc_msg_req_add_link_v2 req_add_link;
struct smc_llc_msg_add_link_cont add_link_cont;
struct smc_llc_msg_del_link delete_link;
@@@ -233,7 -189,7 +233,7 @@@ static inline void smc_llc_flow_qentry_
static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
struct smc_llc_qentry *qentry)
{
- u8 msg_type = qentry->msg.raw.hdr.common.type;
+ u8 msg_type = qentry->msg.raw.hdr.common.llc_type;
if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
flow_type != msg_type && !lgr->delayed_event) {
@@@ -263,7 -219,7 +263,7 @@@ static bool smc_llc_flow_start(struct s
spin_unlock_bh(&lgr->llc_flow_lock);
return false;
}
- switch (qentry->msg.raw.hdr.common.type) {
+ switch (qentry->msg.raw.hdr.common.llc_type) {
case SMC_LLC_ADD_LINK:
flow->type = SMC_LLC_FLOW_ADD_LINK;
break;
@@@ -350,7 -306,7 +350,7 @@@ struct smc_llc_qentry *smc_llc_wait(str
smc_llc_flow_qentry_del(flow);
goto out;
}
- rcv_msg = flow->qentry->msg.raw.hdr.common.type;
+ rcv_msg = flow->qentry->msg.raw.hdr.common.llc_type;
if (exp_msg && rcv_msg != exp_msg) {
if (exp_msg == SMC_LLC_ADD_LINK &&
rcv_msg == SMC_LLC_DELETE_LINK) {
@@@ -418,30 -374,6 +418,30 @@@ static int smc_llc_add_pending_send(str
return 0;
}
+static int smc_llc_add_pending_send_v2(struct smc_link *link,
+ struct smc_wr_v2_buf **wr_buf,
+ struct smc_wr_tx_pend_priv **pend)
+{
+ int rc;
+
+ rc = smc_wr_tx_get_v2_slot(link, smc_llc_tx_handler, wr_buf, pend);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+
+static void smc_llc_init_msg_hdr(struct smc_llc_hdr *hdr,
+ struct smc_link_group *lgr, size_t len)
+{
+ if (lgr->smc_version == SMC_V2) {
+ hdr->common.llc_version = SMC_V2;
+ hdr->length_v2 = len;
+ } else {
+ hdr->common.llc_version = 0;
+ hdr->length = len;
+ }
+}
+
/* high-level API to send LLC confirm link */
int smc_llc_send_confirm_link(struct smc_link *link,
enum smc_llc_reqresp reqresp)
@@@ -458,8 -390,8 +458,8 @@@
goto put_out;
confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
memset(confllc, 0, sizeof(*confllc));
- confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
- confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link);
+ confllc->hd.common.llc_type = SMC_LLC_CONFIRM_LINK;
+ smc_llc_init_msg_hdr(&confllc->hd, link->lgr, sizeof(*confllc));
confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
if (reqresp == SMC_LLC_RESP)
confllc->hd.flags |= SMC_LLC_FLAG_RESP;
@@@ -494,8 -426,8 +494,8 @@@ static int smc_llc_send_confirm_rkey(st
goto put_out;
rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
memset(rkeyllc, 0, sizeof(*rkeyllc));
- rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
- rkeyllc->hd.length = sizeof(struct smc_llc_msg_confirm_rkey);
+ rkeyllc->hd.common.llc_type = SMC_LLC_CONFIRM_RKEY;
+ smc_llc_init_msg_hdr(&rkeyllc->hd, send_link->lgr, sizeof(*rkeyllc));
rtok_ix = 1;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
@@@ -539,8 -471,8 +539,8 @@@ static int smc_llc_send_delete_rkey(str
goto put_out;
rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
memset(rkeyllc, 0, sizeof(*rkeyllc));
- rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
- rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey);
+ rkeyllc->hd.common.llc_type = SMC_LLC_DELETE_RKEY;
+ smc_llc_init_msg_hdr(&rkeyllc->hd, link->lgr, sizeof(*rkeyllc));
rkeyllc->num_rkeys = 1;
rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
/* send llc message */
@@@ -550,116 -482,26 +550,116 @@@ put_out
return rc;
}
+/* return first buffer from any of the next buf lists */
+static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr,
+ int *buf_lst)
+{
+ struct smc_buf_desc *buf_pos;
+
+ while (*buf_lst < SMC_RMBE_SIZES) {
+ buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst],
+ struct smc_buf_desc, list);
+ if (buf_pos)
+ return buf_pos;
+ (*buf_lst)++;
+ }
+ return NULL;
+}
+
+/* return next rmb from buffer lists */
+static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
+ int *buf_lst,
+ struct smc_buf_desc *buf_pos)
+{
+ struct smc_buf_desc *buf_next;
+
+ if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
+ (*buf_lst)++;
+ return _smc_llc_get_next_rmb(lgr, buf_lst);
+ }
+ buf_next = list_next_entry(buf_pos, list);
+ return buf_next;
+}
+
+static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr,
+ int *buf_lst)
+{
+ *buf_lst = 0;
+ return smc_llc_get_next_rmb(lgr, buf_lst, NULL);
+}
+
+static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
+ struct smc_link *link, struct smc_link *link_new)
+{
+ struct smc_link_group *lgr = link->lgr;
+ struct smc_buf_desc *buf_pos;
+ int prim_lnk_idx, lnk_idx, i;
+ struct smc_buf_desc *rmb;
+ int len = sizeof(*ext);
+ int buf_lst;
+
+ ext->v2_direct = !lgr->uses_gateway;
+ memcpy(ext->client_target_gid, link_new->gid, SMC_GID_SIZE);
+
+ prim_lnk_idx = link->link_idx;
+ lnk_idx = link_new->link_idx;
+ mutex_lock(&lgr->rmbs_lock);
+ ext->num_rkeys = lgr->conns_num;
+ if (!ext->num_rkeys)
+ goto out;
+ buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
+ for (i = 0; i < ext->num_rkeys; i++) {
+ if (!buf_pos)
+ break;
+ rmb = buf_pos;
+ ext->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey);
+ ext->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey);
+ ext->rt[i].rmb_vaddr_new =
+ cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
+ buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
+ while (buf_pos && !(buf_pos)->used)
+ buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
+ }
+ len += i * sizeof(ext->rt[0]);
+out:
+ mutex_unlock(&lgr->rmbs_lock);
+ return len;
+}
+
/* send ADD LINK request or response */
int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
struct smc_link *link_new,
enum smc_llc_reqresp reqresp)
{
+ struct smc_llc_msg_add_link_v2_ext *ext = NULL;
struct smc_llc_msg_add_link *addllc;
struct smc_wr_tx_pend_priv *pend;
- struct smc_wr_buf *wr_buf;
+ int len = sizeof(*addllc);
int rc;
if (!smc_wr_tx_link_hold(link))
return -ENOLINK;
- rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
- if (rc)
- goto put_out;
- addllc = (struct smc_llc_msg_add_link *)wr_buf;
+ if (link->lgr->smc_version == SMC_V2) {
+ struct smc_wr_v2_buf *wr_buf;
+
+ rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);
+ if (rc)
+ goto put_out;
+ addllc = (struct smc_llc_msg_add_link *)wr_buf;
+ ext = (struct smc_llc_msg_add_link_v2_ext *)
+ &wr_buf->raw[sizeof(*addllc)];
+ memset(ext, 0, SMC_WR_TX_SIZE);
+ } else {
+ struct smc_wr_buf *wr_buf;
+
+ rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+ if (rc)
+ goto put_out;
+ addllc = (struct smc_llc_msg_add_link *)wr_buf;
+ }
memset(addllc, 0, sizeof(*addllc));
- addllc->hd.common.type = SMC_LLC_ADD_LINK;
- addllc->hd.length = sizeof(struct smc_llc_msg_add_link);
+ addllc->hd.common.llc_type = SMC_LLC_ADD_LINK;
if (reqresp == SMC_LLC_RESP)
addllc->hd.flags |= SMC_LLC_FLAG_RESP;
memcpy(addllc->sender_mac, mac, ETH_ALEN);
@@@ -674,14 -516,8 +674,14 @@@
addllc->qp_mtu = min(link_new->path_mtu,
link_new->peer_mtu);
}
+ if (ext && link_new)
+ len += smc_llc_fill_ext_v2(ext, link, link_new);
+ smc_llc_init_msg_hdr(&addllc->hd, link->lgr, len);
/* send llc message */
- rc = smc_wr_tx_send(link, pend);
+ if (link->lgr->smc_version == SMC_V2)
+ rc = smc_wr_tx_v2_send(link, pend, len);
+ else
+ rc = smc_wr_tx_send(link, pend);
put_out:
smc_wr_tx_link_put(link);
return rc;
@@@ -705,8 -541,8 +705,8 @@@ int smc_llc_send_delete_link(struct smc
delllc = (struct smc_llc_msg_del_link *)wr_buf;
memset(delllc, 0, sizeof(*delllc));
- delllc->hd.common.type = SMC_LLC_DELETE_LINK;
- delllc->hd.length = sizeof(struct smc_llc_msg_del_link);
+ delllc->hd.common.llc_type = SMC_LLC_DELETE_LINK;
+ smc_llc_init_msg_hdr(&delllc->hd, link->lgr, sizeof(*delllc));
if (reqresp == SMC_LLC_RESP)
delllc->hd.flags |= SMC_LLC_FLAG_RESP;
if (orderly)
@@@ -738,8 -574,8 +738,8 @@@ static int smc_llc_send_test_link(struc
goto put_out;
testllc = (struct smc_llc_msg_test_link *)wr_buf;
memset(testllc, 0, sizeof(*testllc));
- testllc->hd.common.type = SMC_LLC_TEST_LINK;
- testllc->hd.length = sizeof(struct smc_llc_msg_test_link);
+ testllc->hd.common.llc_type = SMC_LLC_TEST_LINK;
+ smc_llc_init_msg_hdr(&testllc->hd, link->lgr, sizeof(*testllc));
memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
/* send llc message */
rc = smc_wr_tx_send(link, pend);
@@@ -815,6 -651,44 +815,6 @@@ static int smc_llc_alloc_alt_link(struc
return -EMLINK;
}
-/* return first buffer from any of the next buf lists */
-static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr,
- int *buf_lst)
-{
- struct smc_buf_desc *buf_pos;
-
- while (*buf_lst < SMC_RMBE_SIZES) {
- buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst],
- struct smc_buf_desc, list);
- if (buf_pos)
- return buf_pos;
- (*buf_lst)++;
- }
- return NULL;
-}
-
-/* return next rmb from buffer lists */
-static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
- int *buf_lst,
- struct smc_buf_desc *buf_pos)
-{
- struct smc_buf_desc *buf_next;
-
- if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
- (*buf_lst)++;
- return _smc_llc_get_next_rmb(lgr, buf_lst);
- }
- buf_next = list_next_entry(buf_pos, list);
- return buf_next;
-}
-
-static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr,
- int *buf_lst)
-{
- *buf_lst = 0;
- return smc_llc_get_next_rmb(lgr, buf_lst, NULL);
-}
-
/* send one add_link_continue msg */
static int smc_llc_add_link_cont(struct smc_link *link,
struct smc_link *link_new, u8 *num_rkeys_todo,
@@@ -860,7 -734,7 +860,7 @@@
while (*buf_pos && !(*buf_pos)->used)
*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
}
- addc_llc->hd.common.type = SMC_LLC_ADD_LINK_CONT;
+ addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT;
addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
if (lgr->role == SMC_CLNT)
addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
@@@ -919,8 -793,6 +919,8 @@@ static int smc_llc_cli_add_link_reject(
qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
+ smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr,
+ sizeof(qentry->msg));
return smc_llc_send_message(qentry->link, &qentry->msg);
}
@@@ -941,7 -813,7 +941,7 @@@ static int smc_llc_cli_conf_link(struc
SMC_LLC_DEL_LOST_PATH);
return -ENOLINK;
}
- if (qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) {
+ if (qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) {
/* received DELETE_LINK instead */
qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
smc_llc_send_message(link, &qentry->msg);
@@@ -982,26 -854,6 +982,26 @@@
return 0;
}
+static void smc_llc_save_add_link_rkeys(struct smc_link *link,
+ struct smc_link *link_new)
+{
+ struct smc_llc_msg_add_link_v2_ext *ext;
+ struct smc_link_group *lgr = link->lgr;
+ int max, i;
+
+ ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 +
+ SMC_WR_TX_SIZE);
+ max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
+ mutex_lock(&lgr->rmbs_lock);
+ for (i = 0; i < max; i++) {
+ smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
+ ext->rt[i].rmb_key,
+ ext->rt[i].rmb_vaddr_new,
+ ext->rt[i].rmb_key_new);
+ }
+ mutex_unlock(&lgr->rmbs_lock);
+}
+
static void smc_llc_save_add_link_info(struct smc_link *link,
struct smc_llc_msg_add_link *add_llc)
{
@@@ -1018,47 -870,31 +1018,47 @@@ int smc_llc_cli_add_link(struct smc_lin
struct smc_llc_msg_add_link *llc = &qentry->msg.add_link;
enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
struct smc_link_group *lgr = smc_get_lgr(link);
+ struct smc_init_info *ini = NULL;
struct smc_link *lnk_new = NULL;
- struct smc_init_info ini;
int lnk_idx, rc = 0;
if (!llc->qp_mtu)
goto out_reject;
- ini.vlan_id = lgr->vlan_id;
- smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
+ ini = kzalloc(sizeof(*ini), GFP_KERNEL);
+ if (!ini) {
+ rc = -ENOMEM;
+ goto out_reject;
+ }
+
+ ini->vlan_id = lgr->vlan_id;
+ if (lgr->smc_version == SMC_V2) {
+ ini->check_smcrv2 = true;
+ ini->smcrv2.saddr = lgr->saddr;
+ ini->smcrv2.daddr = smc_ib_gid_to_ipv4(llc->sender_gid);
+ }
+ smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
- !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN)) {
- if (!ini.ib_dev)
+ (lgr->smc_version == SMC_V2 ||
+ !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN))) {
+ if (!ini->ib_dev && !ini->smcrv2.ib_dev_v2)
goto out_reject;
lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
}
- if (!ini.ib_dev) {
+ if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) {
lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
- ini.ib_dev = link->smcibdev;
- ini.ib_port = link->ibport;
+ ini->smcrv2.ib_dev_v2 = link->smcibdev;
+ ini->smcrv2.ib_port_v2 = link->ibport;
+ } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) {
+ lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
+ ini->ib_dev = link->smcibdev;
+ ini->ib_port = link->ibport;
}
lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
if (lnk_idx < 0)
goto out_reject;
lnk_new = &lgr->lnk[lnk_idx];
- rc = smcr_link_init(lgr, lnk_new, lnk_idx, &ini);
+ rc = smcr_link_init(lgr, lnk_new, lnk_idx, ini);
if (rc)
goto out_reject;
smc_llc_save_add_link_info(lnk_new, llc);
@@@ -1074,20 -910,16 +1074,20 @@@
goto out_clear_lnk;
rc = smc_llc_send_add_link(link,
- lnk_new->smcibdev->mac[ini.ib_port - 1],
+ lnk_new->smcibdev->mac[lnk_new->ibport - 1],
lnk_new->gid, lnk_new, SMC_LLC_RESP);
if (rc)
goto out_clear_lnk;
- rc = smc_llc_cli_rkey_exchange(link, lnk_new);
- if (rc) {
- rc = 0;
- goto out_clear_lnk;
+ if (lgr->smc_version == SMC_V2) {
+ smc_llc_save_add_link_rkeys(link, lnk_new);
+ } else {
+ rc = smc_llc_cli_rkey_exchange(link, lnk_new);
+ if (rc) {
+ rc = 0;
+ goto out_clear_lnk;
+ }
}
- rc = smc_llc_cli_conf_link(link, &ini, lnk_new, lgr_new_t);
+ rc = smc_llc_cli_conf_link(link, ini, lnk_new, lgr_new_t);
if (!rc)
goto out;
out_clear_lnk:
@@@ -1096,78 -928,29 +1096,78 @@@
out_reject:
smc_llc_cli_add_link_reject(qentry);
out:
+ kfree(ini);
kfree(qentry);
return rc;
}
+static void smc_llc_send_request_add_link(struct smc_link *link)
+{
+ struct smc_llc_msg_req_add_link_v2 *llc;
+ struct smc_wr_tx_pend_priv *pend;
+ struct smc_wr_v2_buf *wr_buf;
+ struct smc_gidlist gidlist;
+ int rc, len, i;
+
+ if (!smc_wr_tx_link_hold(link))
+ return;
+ if (link->lgr->type == SMC_LGR_SYMMETRIC ||
+ link->lgr->type == SMC_LGR_ASYMMETRIC_PEER)
+ goto put_out;
+
+ smc_fill_gid_list(link->lgr, &gidlist, link->smcibdev, link->gid);
+ if (gidlist.len <= 1)
+ goto put_out;
+
+ rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);
+ if (rc)
+ goto put_out;
+ llc = (struct smc_llc_msg_req_add_link_v2 *)wr_buf;
+ memset(llc, 0, SMC_WR_TX_SIZE);
+
+ llc->hd.common.llc_type = SMC_LLC_REQ_ADD_LINK;
+ for (i = 0; i < gidlist.len; i++)
+ memcpy(llc->gid[i], gidlist.list[i], sizeof(gidlist.list[0]));
+ llc->gid_cnt = gidlist.len;
+ len = sizeof(*llc) + (gidlist.len * sizeof(gidlist.list[0]));
+ smc_llc_init_msg_hdr(&llc->hd, link->lgr, len);
+ rc = smc_wr_tx_v2_send(link, pend, len);
+ if (!rc)
+ /* set REQ_ADD_LINK flow and wait for response from peer */
+ link->lgr->llc_flow_lcl.type = SMC_LLC_FLOW_REQ_ADD_LINK;
+put_out:
+ smc_wr_tx_link_put(link);
+}
+
/* as an SMC client, invite server to start the add_link processing */
static void smc_llc_cli_add_link_invite(struct smc_link *link,
struct smc_llc_qentry *qentry)
{
struct smc_link_group *lgr = smc_get_lgr(link);
- struct smc_init_info ini;
+ struct smc_init_info *ini = NULL;
+
+ if (lgr->smc_version == SMC_V2) {
+ smc_llc_send_request_add_link(link);
+ goto out;
+ }
if (lgr->type == SMC_LGR_SYMMETRIC ||
lgr->type == SMC_LGR_ASYMMETRIC_PEER)
goto out;
- ini.vlan_id = lgr->vlan_id;
- smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
- if (!ini.ib_dev)
+ ini = kzalloc(sizeof(*ini), GFP_KERNEL);
+ if (!ini)
+ goto out;
+
+ ini->vlan_id = lgr->vlan_id;
+ smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
+ if (!ini->ib_dev)
goto out;
- smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1],
- ini.ib_gid, NULL, SMC_LLC_REQ);
+ smc_llc_send_add_link(link, ini->ib_dev->mac[ini->ib_port - 1],
+ ini->ib_gid, NULL, SMC_LLC_REQ);
out:
+ kfree(ini);
kfree(qentry);
}
@@@ -1183,7 -966,7 +1183,7 @@@ static bool smc_llc_is_empty_llc_messag
static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
{
- if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK &&
+ if (llc->raw.hdr.common.llc_type == SMC_LLC_ADD_LINK &&
smc_llc_is_empty_llc_message(llc))
return true;
return false;
@@@ -1350,7 -1133,7 +1350,7 @@@ static int smc_llc_srv_conf_link(struc
/* receive CONFIRM LINK response over the RoCE fabric */
qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0);
if (!qentry ||
- qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) {
+ qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) {
/* send DELETE LINK */
smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
false, SMC_LLC_DEL_LOST_PATH);
@@@ -1369,80 -1152,37 +1369,80 @@@
return 0;
}
-int smc_llc_srv_add_link(struct smc_link *link)
+static void smc_llc_send_req_add_link_response(struct smc_llc_qentry *qentry)
+{
+ qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
+ smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr,
+ sizeof(qentry->msg));
+ memset(&qentry->msg.raw.data, 0, sizeof(qentry->msg.raw.data));
+ smc_llc_send_message(qentry->link, &qentry->msg);
+}
+
+int smc_llc_srv_add_link(struct smc_link *link,
+ struct smc_llc_qentry *req_qentry)
{
enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
struct smc_link_group *lgr = link->lgr;
struct smc_llc_msg_add_link *add_llc;
struct smc_llc_qentry *qentry = NULL;
- struct smc_link *link_new;
- struct smc_init_info ini;
+ bool send_req_add_link_resp = false;
+ struct smc_link *link_new = NULL;
+ struct smc_init_info *ini = NULL;
int lnk_idx, rc = 0;
+ if (req_qentry &&
+ req_qentry->msg.raw.hdr.common.llc_type == SMC_LLC_REQ_ADD_LINK)
+ send_req_add_link_resp = true;
+
+ ini = kzalloc(sizeof(*ini), GFP_KERNEL);
+ if (!ini) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
/* ignore client add link recommendation, start new flow */
- ini.vlan_id = lgr->vlan_id;
- smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
- if (!ini.ib_dev) {
+ ini->vlan_id = lgr->vlan_id;
+ if (lgr->smc_version == SMC_V2) {
+ ini->check_smcrv2 = true;
+ ini->smcrv2.saddr = lgr->saddr;
+ if (send_req_add_link_resp) {
+ struct smc_llc_msg_req_add_link_v2 *req_add =
+ &req_qentry->msg.req_add_link;
+
+ ini->smcrv2.daddr = smc_ib_gid_to_ipv4(req_add->gid[0]);
+ }
+ }
+ smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
+ if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) {
+ lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
+ ini->smcrv2.ib_dev_v2 = link->smcibdev;
+ ini->smcrv2.ib_port_v2 = link->ibport;
+ } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) {
lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
- ini.ib_dev = link->smcibdev;
- ini.ib_port = link->ibport;
+ ini->ib_dev = link->smcibdev;
+ ini->ib_port = link->ibport;
}
lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
- if (lnk_idx < 0)
- return 0;
+ if (lnk_idx < 0) {
+ rc = 0;
+ goto out;
+ }
- rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, &ini);
+ rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, ini);
if (rc)
- return rc;
+ goto out;
link_new = &lgr->lnk[lnk_idx];
+
+ rc = smcr_buf_map_lgr(link_new);
+ if (rc)
+ goto out_err;
+
rc = smc_llc_send_add_link(link,
- link_new->smcibdev->mac[ini.ib_port - 1],
+ link_new->smcibdev->mac[link_new->ibport-1],
link_new->gid, link_new, SMC_LLC_REQ);
if (rc)
goto out_err;
+ send_req_add_link_resp = false;
/* receive ADD LINK response over the RoCE fabric */
qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK);
if (!qentry) {
@@@ -1457,59 -1197,48 +1457,59 @@@
}
if (lgr->type == SMC_LGR_SINGLE &&
(!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
- !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN))) {
+ (lgr->smc_version == SMC_V2 ||
+ !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN)))) {
lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
}
smc_llc_save_add_link_info(link_new, add_llc);
smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
rc = smc_ib_ready_link(link_new);
- if (rc)
- goto out_err;
- rc = smcr_buf_map_lgr(link_new);
if (rc)
goto out_err;
rc = smcr_buf_reg_lgr(link_new);
if (rc)
goto out_err;
- rc = smc_llc_srv_rkey_exchange(link, link_new);
- if (rc)
- goto out_err;
+ if (lgr->smc_version == SMC_V2) {
+ smc_llc_save_add_link_rkeys(link, link_new);
+ } else {
+ rc = smc_llc_srv_rkey_exchange(link, link_new);
+ if (rc)
+ goto out_err;
+ }
rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t);
if (rc)
goto out_err;
+ kfree(ini);
return 0;
out_err:
- link_new->state = SMC_LNK_INACTIVE;
- smcr_link_clear(link_new, false);
+ if (link_new) {
+ link_new->state = SMC_LNK_INACTIVE;
+ smcr_link_clear(link_new, false);
+ }
+out:
+ kfree(ini);
+ if (send_req_add_link_resp)
+ smc_llc_send_req_add_link_response(req_qentry);
return rc;
}
static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
{
struct smc_link *link = lgr->llc_flow_lcl.qentry->link;
+ struct smc_llc_qentry *qentry;
int rc;
- smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
+ qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
mutex_lock(&lgr->llc_conf_mutex);
- rc = smc_llc_srv_add_link(link);
+ rc = smc_llc_srv_add_link(link, qentry);
if (!rc && lgr->type == SMC_LGR_SYMMETRIC) {
/* delete any asymmetric link */
smc_llc_delete_asym_link(lgr);
}
mutex_unlock(&lgr->llc_conf_mutex);
+ kfree(qentry);
}
/* enqueue a local add_link req to trigger a new add_link flow */
@@@ -1517,8 -1246,8 +1517,8 @@@ void smc_llc_add_link_local(struct smc_
{
struct smc_llc_msg_add_link add_llc = {};
- add_llc.hd.length = sizeof(add_llc);
- add_llc.hd.common.type = SMC_LLC_ADD_LINK;
+ add_llc.hd.common.llc_type = SMC_LLC_ADD_LINK;
+ smc_llc_init_msg_hdr(&add_llc.hd, link->lgr, sizeof(add_llc));
/* no dev and port needed */
smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc);
}
@@@ -1540,8 -1269,7 +1540,8 @@@ static void smc_llc_add_link_work(struc
else
smc_llc_process_srv_add_link(lgr);
out:
- smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
+ if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_REQ_ADD_LINK)
+ smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
}
/* enqueue a local del_link msg to trigger a new del_link flow,
@@@ -1551,8 -1279,8 +1551,8 @@@ void smc_llc_srv_delete_link_local(stru
{
struct smc_llc_msg_del_link del_llc = {};
- del_llc.hd.length = sizeof(del_llc);
- del_llc.hd.common.type = SMC_LLC_DELETE_LINK;
+ del_llc.hd.common.llc_type = SMC_LLC_DELETE_LINK;
+ smc_llc_init_msg_hdr(&del_llc.hd, link->lgr, sizeof(del_llc));
del_llc.link_num = del_link_id;
del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH);
del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
@@@ -1622,8 -1350,8 +1622,8 @@@ void smc_llc_send_link_delete_all(struc
struct smc_llc_msg_del_link delllc = {};
int i;
- delllc.hd.common.type = SMC_LLC_DELETE_LINK;
- delllc.hd.length = sizeof(delllc);
+ delllc.hd.common.llc_type = SMC_LLC_DELETE_LINK;
+ smc_llc_init_msg_hdr(&delllc.hd, lgr, sizeof(delllc));
if (ord)
delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
@@@ -1739,8 -1467,6 +1739,8 @@@ static void smc_llc_rmt_conf_rkey(struc
link = qentry->link;
num_entries = llc->rtoken[0].num_rkeys;
+ if (num_entries > SMC_LLC_RKEYS_PER_MSG)
+ goto out_err;
/* first rkey entry is for receiving link */
rk_idx = smc_rtoken_add(link,
llc->rtoken[0].rmb_vaddr,
@@@ -1759,7 -1485,6 +1759,7 @@@ out_err
llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY;
out:
llc->hd.flags |= SMC_LLC_FLAG_RESP;
+ smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc));
smc_llc_send_message(link, &qentry->msg);
smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
}
@@@ -1777,28 -1502,6 +1777,28 @@@ static void smc_llc_rmt_delete_rkey(str
llc = &qentry->msg.delete_rkey;
link = qentry->link;
+ if (lgr->smc_version == SMC_V2) {
+ struct smc_llc_msg_delete_rkey_v2 *llcv2;
+
+ memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc));
+ llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2;
+ llcv2->num_inval_rkeys = 0;
+
+ max = min_t(u8, llcv2->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
+ for (i = 0; i < max; i++) {
+ if (smc_rtoken_delete(link, llcv2->rkey[i]))
+ llcv2->num_inval_rkeys++;
+ }
+ memset(&llc->rkey[0], 0, sizeof(llc->rkey));
+ memset(&llc->reserved2, 0, sizeof(llc->reserved2));
+ smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc));
+ if (llcv2->num_inval_rkeys) {
+ llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
+ llc->err_mask = llcv2->num_inval_rkeys;
+ }
+ goto finish;
+ }
+
max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
for (i = 0; i < max; i++) {
if (smc_rtoken_delete(link, llc->rkey[i]))
@@@ -1808,7 -1511,6 +1808,7 @@@
llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
llc->err_mask = err_mask;
}
+finish:
llc->hd.flags |= SMC_LLC_FLAG_RESP;
smc_llc_send_message(link, &qentry->msg);
smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
@@@ -1844,7 -1546,7 +1844,7 @@@ static void smc_llc_event_handler(struc
if (!smc_link_usable(link))
goto out;
- switch (llc->raw.hdr.common.type) {
+ switch (llc->raw.hdr.common.llc_type) {
case SMC_LLC_TEST_LINK:
llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP;
smc_llc_send_message(link, llc);
@@@ -1869,18 -1571,8 +1869,18 @@@
smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
qentry);
wake_up(&lgr->llc_msg_waiter);
- } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
- qentry)) {
+ return;
+ }
+ if (lgr->llc_flow_lcl.type ==
+ SMC_LLC_FLOW_REQ_ADD_LINK) {
+ /* server started add_link processing */
+ lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK;
+ smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
+ qentry);
+ schedule_work(&lgr->llc_add_link_work);
+ return;
+ }
+ if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
schedule_work(&lgr->llc_add_link_work);
}
} else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
@@@ -1928,23 -1620,6 +1928,23 @@@
smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
}
return;
+ case SMC_LLC_REQ_ADD_LINK:
+ /* handle response here, smc_llc_flow_stop() cannot be called
+ * in tasklet context
+ */
+ if (lgr->role == SMC_CLNT &&
+ lgr->llc_flow_lcl.type == SMC_LLC_FLOW_REQ_ADD_LINK &&
+ (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP)) {
+ smc_llc_flow_stop(link->lgr, &lgr->llc_flow_lcl);
+ } else if (lgr->role == SMC_SERV) {
+ if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
+ /* as smc server, handle client suggestion */
+ lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK;
+ schedule_work(&lgr->llc_add_link_work);
+ }
+ return;
+ }
+ break;
default:
smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type);
break;
@@@ -1988,7 -1663,7 +1988,7 @@@ static void smc_llc_rx_response(struct
{
enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type;
struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl;
- u8 llc_type = qentry->msg.raw.hdr.common.type;
+ u8 llc_type = qentry->msg.raw.hdr.common.llc_type;
switch (llc_type) {
case SMC_LLC_TEST_LINK:
@@@ -2014,8 -1689,7 +2014,8 @@@
/* not used because max links is 3 */
break;
default:
- smc_llc_protocol_violation(link->lgr, llc_type);
+ smc_llc_protocol_violation(link->lgr,
+ qentry->msg.raw.hdr.common.type);
break;
}
kfree(qentry);
@@@ -2040,8 -1714,7 +2040,8 @@@ static void smc_llc_enqueue(struct smc_
memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
/* process responses immediately */
- if (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) {
+ if ((llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) &&
+ llc->raw.hdr.common.llc_type != SMC_LLC_REQ_ADD_LINK) {
smc_llc_rx_response(link, qentry);
return;
}
@@@ -2061,13 -1734,8 +2061,13 @@@ static void smc_llc_rx_handler(struct i
if (wc->byte_len < sizeof(*llc))
return; /* short message */
- if (llc->raw.hdr.length != sizeof(*llc))
- return; /* invalid message */
+ if (!llc->raw.hdr.common.llc_version) {
+ if (llc->raw.hdr.length != sizeof(*llc))
+ return; /* invalid message */
+ } else {
+ if (llc->raw.hdr.length_v2 < sizeof(*llc))
+ return; /* invalid message */
+ }
smc_llc_enqueue(link, llc);
}
@@@ -2154,7 -1822,7 +2154,7 @@@ void smc_llc_link_active(struct smc_lin
link->smcibdev->ibdev->name, link->ibport);
link->state = SMC_LNK_ACTIVE;
if (link->lgr->llc_testlink_time) {
- link->llc_testlink_time = link->lgr->llc_testlink_time * HZ;
+ link->llc_testlink_time = link->lgr->llc_testlink_time;
schedule_delayed_work(&link->llc_testlink_wrk,
link->llc_testlink_time);
}
@@@ -2286,35 -1954,6 +2286,35 @@@ static struct smc_wr_rx_handler smc_llc
.handler = smc_llc_rx_handler,
.type = SMC_LLC_DELETE_RKEY
},
+ /* V2 types */
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_CONFIRM_LINK_V2
+ },
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_TEST_LINK_V2
+ },
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_ADD_LINK_V2
+ },
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_DELETE_LINK_V2
+ },
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_REQ_ADD_LINK_V2
+ },
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_CONFIRM_RKEY_V2
+ },
+ {
+ .handler = smc_llc_rx_handler,
+ .type = SMC_LLC_DELETE_RKEY_V2
+ },
{
.handler = NULL,
}
diff --combined net/tls/tls_main.c
index 278192ee133e,9ab81db8a654..acfba9f1ba72
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@@ -421,88 -421,6 +421,88 @@@ static int do_tls_getsockopt_conf(struc
rc = -EFAULT;
break;
}
+ case TLS_CIPHER_AES_CCM_128: {
+ struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_aes_ccm_128, info);
+
+ if (len != sizeof(*aes_ccm_128)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(aes_ccm_128->iv,
+ cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE,
+ TLS_CIPHER_AES_CCM_128_IV_SIZE);
+ memcpy(aes_ccm_128->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_CHACHA20_POLY1305: {
+ struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_chacha20_poly1305,
+ info);
+
+ if (len != sizeof(*chacha20_poly1305)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(chacha20_poly1305->iv,
+ cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE,
+ TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE);
+ memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval, chacha20_poly1305,
+ sizeof(*chacha20_poly1305)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_SM4_GCM: {
+ struct tls12_crypto_info_sm4_gcm *sm4_gcm_info =
+ container_of(crypto_info,
+ struct tls12_crypto_info_sm4_gcm, info);
+
+ if (len != sizeof(*sm4_gcm_info)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(sm4_gcm_info->iv,
+ cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE,
+ TLS_CIPHER_SM4_GCM_IV_SIZE);
+ memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_SM4_CCM: {
+ struct tls12_crypto_info_sm4_ccm *sm4_ccm_info =
+ container_of(crypto_info,
+ struct tls12_crypto_info_sm4_ccm, info);
+
+ if (len != sizeof(*sm4_ccm_info)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(sm4_ccm_info->iv,
+ cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE,
+ TLS_CIPHER_SM4_CCM_IV_SIZE);
+ memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info)))
+ rc = -EFAULT;
+ break;
+ }
default:
rc = -EINVAL;
}
@@@ -606,12 -524,6 +606,12 @@@ static int do_tls_setsockopt_conf(struc
case TLS_CIPHER_CHACHA20_POLY1305:
optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305);
break;
+ case TLS_CIPHER_SM4_GCM:
+ optsize = sizeof(struct tls12_crypto_info_sm4_gcm);
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ optsize = sizeof(struct tls12_crypto_info_sm4_ccm);
+ break;
default:
rc = -EINVAL;
goto err_crypto_info;
@@@ -769,12 -681,12 +769,12 @@@ static void build_protos(struct proto p
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
- prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
+ prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
- prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
+ prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
#ifdef CONFIG_TLS_DEVICE
diff --combined net/tls/tls_sw.c
index 4147bb2e7057,1b08b877a890..d81564078557
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@@ -35,6 -35,7 +35,7 @@@
* SOFTWARE.
*/
+ #include <linux/bug.h>
#include <linux/sched/signal.h>
#include <linux/module.h>
#include <linux/splice.h>
@@@ -43,6 -44,14 +44,14 @@@
#include <net/strparser.h>
#include <net/tls.h>
+ noinline void tls_err_abort(struct sock *sk, int err)
+ {
+ WARN_ON_ONCE(err >= 0);
+ /* sk->sk_err should contain a positive error code. */
+ sk->sk_err = -err;
+ sk_error_report(sk);
+ }
+
static int __skb_nsg(struct sk_buff *skb, int offset, int len,
unsigned int recursion_level)
{
@@@ -419,7 -428,7 +428,7 @@@ int tls_tx_records(struct sock *sk, in
tx_err:
if (rc < 0 && rc != -EAGAIN)
- tls_err_abort(sk, EBADMSG);
+ tls_err_abort(sk, -EBADMSG);
return rc;
}
@@@ -450,7 -459,7 +459,7 @@@ static void tls_encrypt_done(struct cry
/* If err is already set on socket, return the same code */
if (sk->sk_err) {
- ctx->async_wait.err = sk->sk_err;
+ ctx->async_wait.err = -sk->sk_err;
} else {
ctx->async_wait.err = err;
tls_err_abort(sk, err);
@@@ -498,15 -507,9 +507,15 @@@ static int tls_do_encryption(struct soc
int rc, iv_offset = 0;
/* For CCM based ciphers, first byte of IV is a constant */
- if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
+ switch (prot->cipher_type) {
+ case TLS_CIPHER_AES_CCM_128:
rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
iv_offset = 1;
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
+ iv_offset = 1;
+ break;
}
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
@@@ -769,7 -772,7 +778,7 @@@ static int tls_push_record(struct sock
msg_pl->sg.size + prot->tail_size, i);
if (rc < 0) {
if (rc != -EINPROGRESS) {
- tls_err_abort(sk, EBADMSG);
+ tls_err_abort(sk, -EBADMSG);
if (split) {
tls_ctx->pending_open_record_frags = true;
tls_merge_open_record(sk, rec, tmp, orig_end);
@@@ -1463,16 -1466,10 +1472,16 @@@ static int decrypt_internal(struct soc
aad = (u8 *)(sgout + n_sgout);
iv = aad + prot->aad_size;
- /* For CCM based ciphers, first byte of nonce+iv is always '2' */
- if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
- iv[0] = 2;
+ /* For CCM based ciphers, first byte of nonce+iv is a constant */
+ switch (prot->cipher_type) {
+ case TLS_CIPHER_AES_CCM_128:
+ iv[0] = TLS_AES_CCM_IV_B0_BYTE;
+ iv_offset = 1;
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
iv_offset = 1;
+ break;
}
/* Prepare IV */
@@@ -1839,7 -1836,7 +1848,7 @@@ int tls_sw_recvmsg(struct sock *sk
err = decrypt_skb_update(sk, skb, &msg->msg_iter,
&chunk, &zc, async_capable);
if (err < 0 && err != -EINPROGRESS) {
- tls_err_abort(sk, EBADMSG);
+ tls_err_abort(sk, -EBADMSG);
goto recv_end;
}
@@@ -2019,7 -2016,7 +2028,7 @@@ ssize_t tls_sw_splice_read(struct socke
}
if (err < 0) {
- tls_err_abort(sk, EBADMSG);
+ tls_err_abort(sk, -EBADMSG);
goto splice_read_end;
}
ctx->decrypted = 1;
@@@ -2038,7 -2035,7 +2047,7 @@@ splice_read_end
return copied ? : err;
}
- bool tls_sw_stream_read(const struct sock *sk)
+ bool tls_sw_sock_is_readable(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@@ -2436,40 -2433,6 +2445,40 @@@ int tls_set_sw_offload(struct sock *sk
cipher_name = "rfc7539(chacha20,poly1305)";
break;
}
+ case TLS_CIPHER_SM4_GCM: {
+ struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
+
+ sm4_gcm_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
+ tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
+ iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
+ iv = sm4_gcm_info->iv;
+ rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
+ rec_seq = sm4_gcm_info->rec_seq;
+ keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
+ key = sm4_gcm_info->key;
+ salt = sm4_gcm_info->salt;
+ salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
+ cipher_name = "gcm(sm4)";
+ break;
+ }
+ case TLS_CIPHER_SM4_CCM: {
+ struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
+
+ sm4_ccm_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
+ tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
+ iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
+ iv = sm4_ccm_info->iv;
+ rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
+ rec_seq = sm4_ccm_info->rec_seq;
+ keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
+ key = sm4_ccm_info->key;
+ salt = sm4_ccm_info->salt;
+ salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
+ cipher_name = "ccm(sm4)";
+ break;
+ }
default:
rc = -EINVAL;
goto free_priv;
diff --combined net/wireless/core.c
index 45be124a98f1,aaba847d79eb..eb297e1015e0
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@@ -524,6 -524,7 +524,7 @@@ use_default_name
INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk);
INIT_WORK(&rdev->mgmt_registrations_update_wk,
cfg80211_mgmt_registrations_update_wk);
+ spin_lock_init(&rdev->mgmt_registrations_lock);
#ifdef CONFIG_CFG80211_DEFAULT_PS
rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@@ -1080,16 -1081,6 +1081,16 @@@ void cfg80211_dev_free(struct cfg80211_
list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
cfg80211_put_bss(&rdev->wiphy, &scan->pub);
mutex_destroy(&rdev->wiphy.mtx);
+
+ /*
+ * The 'regd' can only be non-NULL if we never finished
+ * initializing the wiphy and thus never went through the
+ * unregister path - e.g. in failure scenarios. Thus, it
+ * cannot have been visible to anyone if non-NULL, so we
+ * can just free it here.
+ */
+ kfree(rcu_dereference_raw(rdev->wiphy.regd));
+
kfree(rdev);
}
@@@ -1289,7 -1280,6 +1290,6 @@@ void cfg80211_init_wdev(struct wireless
INIT_LIST_HEAD(&wdev->event_list);
spin_lock_init(&wdev->event_lock);
INIT_LIST_HEAD(&wdev->mgmt_registrations);
- spin_lock_init(&wdev->mgmt_registrations_lock);
INIT_LIST_HEAD(&wdev->pmsr_list);
spin_lock_init(&wdev->pmsr_lock);
INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk);
diff --combined net/wireless/scan.c
index e4f79b23f7f6,adc0d14cfd86..22e92be61938
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@@ -383,7 -383,7 +383,7 @@@ static bool is_bss(struct cfg80211_bss
const u8 *ssid, size_t ssid_len)
{
const struct cfg80211_bss_ies *ies;
- const u8 *ssidie;
+ const struct element *ssid_elem;
if (bssid && !ether_addr_equal(a->bssid, bssid))
return false;
@@@ -394,12 -394,12 +394,12 @@@
ies = rcu_access_pointer(a->ies);
if (!ies)
return false;
- ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
- if (!ssidie)
+ ssid_elem = cfg80211_find_elem(WLAN_EID_SSID, ies->data, ies->len);
+ if (!ssid_elem)
return false;
- if (ssidie[1] != ssid_len)
+ if (ssid_elem->datalen != ssid_len)
return false;
- return memcmp(ssidie + 2, ssid, ssid_len) == 0;
+ return memcmp(ssid_elem->data, ssid, ssid_len) == 0;
}
static int
@@@ -418,14 -418,17 +418,17 @@@ cfg80211_add_nontrans_list(struct cfg80
}
ssid_len = ssid[1];
ssid = ssid + 2;
- rcu_read_unlock();
/* check if nontrans_bss is in the list */
list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) {
- if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len))
+ if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) {
+ rcu_read_unlock();
return 0;
+ }
}
+ rcu_read_unlock();
+
/* add to the list */
list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list);
return 0;
@@@ -1791,13 -1794,25 +1794,13 @@@ cfg80211_bss_update(struct cfg80211_reg
return NULL;
}
-/*
- * Update RX channel information based on the available frame payload
- * information. This is mainly for the 2.4 GHz band where frames can be received
- * from neighboring channels and the Beacon frames use the DSSS Parameter Set
- * element to indicate the current (transmitting) channel, but this might also
- * be needed on other bands if RX frequency does not match with the actual
- * operating channel of a BSS.
- */
-static struct ieee80211_channel *
-cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
- struct ieee80211_channel *channel,
- enum nl80211_bss_scan_width scan_width)
+int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ enum nl80211_band band)
{
const u8 *tmp;
- u32 freq;
int channel_number = -1;
- struct ieee80211_channel *alt_channel;
- if (channel->band == NL80211_BAND_S1GHZ) {
+ if (band == NL80211_BAND_S1GHZ) {
tmp = cfg80211_find_ie(WLAN_EID_S1G_OPERATION, ie, ielen);
if (tmp && tmp[1] >= sizeof(struct ieee80211_s1g_oper_ie)) {
struct ieee80211_s1g_oper_ie *s1gop = (void *)(tmp + 2);
@@@ -1818,29 -1833,6 +1821,29 @@@
}
}
+ return channel_number;
+}
+EXPORT_SYMBOL(cfg80211_get_ies_channel_number);
+
+/*
+ * Update RX channel information based on the available frame payload
+ * information. This is mainly for the 2.4 GHz band where frames can be received
+ * from neighboring channels and the Beacon frames use the DSSS Parameter Set
+ * element to indicate the current (transmitting) channel, but this might also
+ * be needed on other bands if RX frequency does not match with the actual
+ * operating channel of a BSS.
+ */
+static struct ieee80211_channel *
+cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width)
+{
+ u32 freq;
+ int channel_number;
+ struct ieee80211_channel *alt_channel;
+
+ channel_number = cfg80211_get_ies_channel_number(ie, ielen, channel->band);
+
if (channel_number < 0) {
/* No channel information in frame payload */
return channel;
@@@ -2083,12 -2075,12 +2086,12 @@@ static void cfg80211_parse_mbssid_data(
if (!non_tx_data)
return;
- if (!cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+ if (!cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
return;
if (!wiphy->support_mbssid)
return;
if (wiphy->support_only_he_mbssid &&
- !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+ !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
return;
new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
@@@ -2455,10 -2447,10 +2458,10 @@@ cfg80211_inform_bss_frame_data(struct w
res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
len, gfp);
if (!res || !wiphy->support_mbssid ||
- !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+ !cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
return res;
if (wiphy->support_only_he_mbssid &&
- !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+ !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
return res;
non_tx_data.tx_bss = res;
diff --combined net/wireless/util.c
index 2991f711491a,a1a99a574984..5ff1f8726faf
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@@ -80,7 -80,6 +80,7 @@@ u32 ieee80211_channel_to_freq_khz(int c
return 0; /* not supported */
switch (band) {
case NL80211_BAND_2GHZ:
+ case NL80211_BAND_LC:
if (chan == 14)
return MHZ_TO_KHZ(2484);
else if (chan < 14)
@@@ -210,7 -209,6 +210,7 @@@ static void set_mandatory_flags_band(st
WARN_ON(want);
break;
case NL80211_BAND_2GHZ:
+ case NL80211_BAND_LC:
want = 7;
for (i = 0; i < sband->n_bitrates; i++) {
switch (sband->bitrates[i].bitrate) {
@@@ -1030,14 -1028,14 +1030,14 @@@ int cfg80211_change_iface(struct cfg802
!(rdev->wiphy.interface_modes & (1 << ntype)))
return -EOPNOTSUPP;
- /* if it's part of a bridge, reject changing type to station/ibss */
- if (netif_is_bridge_port(dev) &&
- (ntype == NL80211_IFTYPE_ADHOC ||
- ntype == NL80211_IFTYPE_STATION ||
- ntype == NL80211_IFTYPE_P2P_CLIENT))
- return -EBUSY;
-
if (ntype != otype) {
+ /* if it's part of a bridge, reject changing type to station/ibss */
+ if (netif_is_bridge_port(dev) &&
+ (ntype == NL80211_IFTYPE_ADHOC ||
+ ntype == NL80211_IFTYPE_STATION ||
+ ntype == NL80211_IFTYPE_P2P_CLIENT))
+ return -EBUSY;
+
dev->ieee80211_ptr->use_4addr = false;
dev->ieee80211_ptr->mesh_id_up_len = 0;
wdev_lock(dev->ieee80211_ptr);
--
LinuxNextTracking
8 months
[linux-next] LinuxNextTracking branch, master, updated. next-20211028
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit 440ffcdd9db4758f1503a25fb49a8e15ca83d6bc
Merge: 19fa0887c57d35b57bfb895e6caf8e72d9601ec0 54713c85f536048e685258f880bf298a74c3620d
Author: Jakub Kicinski <kuba(a)kernel.org>
Date: Tue Oct 26 14:38:54 2021 -0700
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says:
====================
pull-request: bpf 2021-10-26
We've added 12 non-merge commits during the last 7 day(s) which contain
a total of 23 files changed, 118 insertions(+), 98 deletions(-).
The main changes are:
1) Fix potential race window in BPF tail call compatibility check, from Toke H��iland-J��rgensen.
2) Fix memory leak in cgroup fs due to missing cgroup_bpf_offline(), from Quanyang Wang.
3) Fix file descriptor reference counting in generic_map_update_batch(), from Xu Kuohai.
4) Fix bpf_jit_limit knob to the max supported limit by the arch's JIT, from Lorenz Bauer.
5) Fix BPF sockmap ->poll callbacks for UDP and AF_UNIX sockets, from Cong Wang and Yucong Sun.
6) Fix BPF sockmap concurrency issue in TCP on non-blocking sendmsg calls, from Liu Jian.
7) Fix build failure of INODE_STORAGE and TASK_STORAGE maps on !CONFIG_NET, from Tejun Heo.
* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
bpf: Fix potential race in tail call compatibility check
bpf: Move BPF_MAP_TYPE for INODE_STORAGE and TASK_STORAGE outside of CONFIG_NET
selftests/bpf: Use recv_timeout() instead of retries
net: Implement ->sock_is_readable() for UDP and AF_UNIX
skmsg: Extract and reuse sk_msg_is_readable()
net: Rename ->stream_memory_read to ->sock_is_readable
tcp_bpf: Fix one concurrency problem in the tcp_bpf_send_verdict function
cgroup: Fix memory leak caused by missing cgroup_bpf_offline
bpf: Fix error usage of map_fd and fdget() in generic_map_update_batch()
bpf: Prevent increasing bpf_jit_limit above max
bpf: Define bpf_jit_alloc_exec_limit for arm64 JIT
bpf: Define bpf_jit_alloc_exec_limit for riscv JIT
====================
Link: https://lore.kernel.org/r/20211026201920.11296-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
--
LinuxNextTracking
8 months
[linux-next] LinuxNextTracking branch, master, updated. next-20211028
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit c30eb0084783beafe602cbcf368367d2e4f24d17
Merge: 41cc40eee37048f315a5fa8e7b2df2ea5810a256 f25c0515c521375154c62c72447869f40218c861
Author: Stephen Rothwell <sfr(a)canb.auug.org.au>
Date: Thu Oct 28 11:54:18 2021 +1100
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
diff --combined Documentation/devicetree/bindings/vendor-prefixes.yaml
index fe9615089acb,e2eb0c738f3a..a348fccf3d0a
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@@ -131,6 -131,8 +131,8 @@@ patternProperties
description: Asahi Kasei Corp.
"^asc,.*":
description: All Sensors Corporation
+ "^asix,.*":
+ description: ASIX Electronics Corporation
"^aspeed,.*":
description: ASPEED Technology Inc.
"^asus,.*":
@@@ -191,8 -193,6 +193,8 @@@
description: B&R Industrial Automation GmbH
"^bticino,.*":
description: Bticino International
+ "^calamp,.*":
+ description: CalAmp Corp.
"^calaosystems,.*":
description: CALAO Systems SAS
"^calxeda,.*":
@@@ -337,8 -337,6 +339,8 @@@
description: EBV Elektronik
"^eckelmann,.*":
description: Eckelmann AG
+ "^edimax,.*":
+ description: EDIMAX Technology Co., Ltd
"^edt,.*":
description: Emerging Display Technologies
"^eeti,.*":
@@@ -357,8 -355,6 +359,8 @@@
description: Shenzhen Elida Technology Co., Ltd.
"^elimo,.*":
description: Elimo Engineering Ltd.
+ "^elpida,.*":
+ description: Elpida Memory, Inc.
"^embest,.*":
description: Shenzhen Embest Technology Co., Ltd.
"^emlid,.*":
@@@ -401,8 -397,6 +403,8 @@@
description: Exar Corporation
"^excito,.*":
description: Excito
+ "^exegin,.*":
+ description: Exegin Technologies Limited
"^ezchip,.*":
description: EZchip Semiconductor
"^facebook,.*":
@@@ -517,8 -511,6 +519,8 @@@
description: Hycon Technology Corp.
"^hydis,.*":
description: Hydis Technologies
+ "^hynix,.*":
+ description: SK Hynix Inc.
"^hyundai,.*":
description: Hyundai Technology
"^i2se,.*":
@@@ -587,8 -579,6 +589,8 @@@
description: JEDEC Solid State Technology Association
"^jesurun,.*":
description: Shenzhen Jesurun Electronics Business Dept.
+ "^jethome,.*":
+ description: JetHome (IP Sokolov P.A.)
"^jianda,.*":
description: Jiandangjing Technology Co., Ltd.
"^kam,.*":
@@@ -1114,8 -1104,6 +1116,8 @@@
description: SparkFun Electronics
"^sprd,.*":
description: Spreadtrum Communications Inc.
+ "^ssi,.*":
+ description: SSI Computer Corp
"^sst,.*":
description: Silicon Storage Technology, Inc.
"^sstar,.*":
diff --combined MAINTAINERS
index 99e5c7702cca,975086c5345d..524ef8e72339
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -334,7 -334,7 +334,7 @@@ F: drivers/platform/x86/acer-wmi.
ACPI
M: "Rafael J. Wysocki" <rafael(a)kernel.org>
-M: Len Brown <lenb(a)kernel.org>
+R: Len Brown <lenb(a)kernel.org>
L: linux-acpi(a)vger.kernel.org
S: Supported
W: https://01.org/linux-acpi
@@@ -355,7 -355,7 +355,7 @@@ F: tools/power/acpi
ACPI APEI
M: "Rafael J. Wysocki" <rafael(a)kernel.org>
-M: Len Brown <lenb(a)kernel.org>
+R: Len Brown <lenb(a)kernel.org>
R: James Morse <james.morse(a)arm.com>
R: Tony Luck <tony.luck(a)intel.com>
R: Borislav Petkov <bp(a)alien8.de>
@@@ -378,6 -378,14 +378,6 @@@ F: drivers/acpi/acpica
F: include/acpi/
F: tools/power/acpi/
-ACPI FAN DRIVER
-M: Zhang Rui <rui.zhang(a)intel.com>
-L: linux-acpi(a)vger.kernel.org
-S: Supported
-W: https://01.org/linux-acpi
-B: https://bugzilla.kernel.org
-F: drivers/acpi/fan.c
-
ACPI FOR ARM64 (ACPI/arm64)
M: Lorenzo Pieralisi <lorenzo.pieralisi(a)arm.com>
M: Hanjun Guo <guohanjun(a)huawei.com>
@@@ -414,6 -422,14 +414,6 @@@ W: https://01.org/linux-acp
B: https://bugzilla.kernel.org
F: drivers/acpi/*thermal*
-ACPI VIDEO DRIVER
-M: Zhang Rui <rui.zhang(a)intel.com>
-L: linux-acpi(a)vger.kernel.org
-S: Supported
-W: https://01.org/linux-acpi
-B: https://bugzilla.kernel.org
-F: drivers/acpi/acpi_video.c
-
ACPI VIOT DRIVER
M: Jean-Philippe Brucker <jean-philippe(a)linaro.org>
L: linux-acpi(a)vger.kernel.org
@@@ -1266,13 -1282,6 +1266,13 @@@ S: Maintaine
F: Documentation/devicetree/bindings/iommu/apple,dart.yaml
F: drivers/iommu/apple-dart.c
+APPLE PCIE CONTROLLER DRIVER
+M: Alyssa Rosenzweig <alyssa(a)rosenzweig.io>
+M: Marc Zyngier <maz(a)kernel.org>
+L: linux-pci(a)vger.kernel.org
+S: Maintained
+F: drivers/pci/controller/pcie-apple.c
+
APPLE SMC DRIVER
M: Henrik Rydberg <rydberg(a)bitmath.org>
L: linux-hwmon(a)vger.kernel.org
@@@ -1713,14 -1722,10 +1713,14 @@@ B: https://github.com/AsahiLinux/linux/
C: irc://irc.oftc.net/asahi-dev
T: git https://github.com/AsahiLinux/linux.git
F: Documentation/devicetree/bindings/arm/apple.yaml
+F: Documentation/devicetree/bindings/i2c/apple,i2c.yaml
F: Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
F: Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
F: arch/arm64/boot/dts/apple/
+F: drivers/i2c/busses/i2c-pasemi-core.c
+F: drivers/i2c/busses/i2c-pasemi-platform.c
F: drivers/irqchip/irq-apple-aic.c
+F: drivers/pinctrl/pinctrl-apple-gpio.c
F: include/dt-bindings/interrupt-controller/apple-aic.h
F: include/dt-bindings/pinctrl/apple.h
@@@ -2307,14 -2312,6 +2307,14 @@@ F: arch/arm/boot/dts/nuvoton-wpcm450
F: arch/arm/mach-npcm/wpcm450.c
F: drivers/*/*wpcm*
+ARM/NXP S32G ARCHITECTURE
+M: Chester Lin <clin(a)suse.com>
+R: Andreas F��rber <afaerber(a)suse.de>
+R: Matthias Brugger <mbrugger(a)suse.com>
+L: linux-arm-kernel(a)lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm64/boot/dts/freescale/s32g*.dts*
+
ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
L: openmoko-kernel(a)lists.openmoko.org (subscribers-only)
S: Orphan
@@@ -2902,6 -2899,12 +2902,12 @@@ S: Maintaine
F: Documentation/hwmon/asc7621.rst
F: drivers/hwmon/asc7621.c
+ ASIX AX88796C SPI ETHERNET ADAPTER
+ M: ��ukasz Stelmach <l.stelmach(a)samsung.com>
+ S: Maintained
+ F: Documentation/devicetree/bindings/net/asix,ax88796c.yaml
+ F: drivers/net/ethernet/asix/ax88796c_*
+
ASPEED PINCTRL DRIVERS
M: Andrew Jeffery <andrew(a)aj.id.au>
L: linux-aspeed(a)lists.ozlabs.org (moderated for non-subscribers)
@@@ -4439,7 -4442,7 +4445,7 @@@ N: cros_e
N: cros-ec
CHRONTEL CH7322 CEC DRIVER
-M: Jeff Chase <jnchase(a)google.com>
+M: Joe Tessler <jrt(a)google.com>
L: linux-media(a)vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
@@@ -5461,19 -5464,6 +5467,19 @@@ F: include/net/devlink.
F: include/uapi/linux/devlink.h
F: net/core/devlink.c
+DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT
+M: Christoph Niedermaier <cniedermaier(a)dh-electronics.com>
+L: kernel(a)dh-electronics.com
+S: Maintained
+F: arch/arm/boot/dts/imx6*-dhcom-*
+
+DH ELECTRONICS STM32MP1 DHCOM/DHCOR BOARD SUPPORT
+M: Marek Vasut <marex(a)denx.de>
+L: kernel(a)dh-electronics.com
+S: Maintained
+F: arch/arm/boot/dts/stm32mp1*-dhcom-*
+F: arch/arm/boot/dts/stm32mp1*-dhcor-*
+
DIALOG SEMICONDUCTOR DRIVERS
M: Support Opensource <support.opensource(a)diasemi.com>
S: Supported
@@@ -6308,7 -6298,6 +6314,7 @@@ L: linux-tegra(a)vger.kernel.or
S: Supported
T: git git://anongit.freedesktop.org/tegra/linux.git
F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
+F: Documentation/devicetree/bindings/gpu/host1x/
F: drivers/gpu/drm/tegra/
F: drivers/gpu/host1x/
F: include/linux/host1x.h
@@@ -7029,7 -7018,6 +7035,6 @@@ F: drivers/net/mdio/fwnode_mdio.
F: drivers/net/mdio/of_mdio.c
F: drivers/net/pcs/
F: drivers/net/phy/
- F: drivers/of/of_net.c
F: include/dt-bindings/net/qca-ar803x.h
F: include/linux/*mdio*.h
F: include/linux/mdio/*.h
@@@ -7041,6 -7029,7 +7046,7 @@@ F: include/linux/platform_data/mdio-gpi
F: include/trace/events/mdio.h
F: include/uapi/linux/mdio.h
F: include/uapi/linux/mii.h
+ F: net/core/of_net.c
EXFAT FILE SYSTEM
M: Namjae Jeon <linkinjeon(a)kernel.org>
@@@ -8215,7 -8204,7 +8221,7 @@@ T: git git://linuxtv.org/anttip/media_t
F: drivers/media/usb/hackrf/
HANTRO VPU CODEC DRIVER
-M: Ezequiel Garcia <ezequiel(a)collabora.com>
+M: Ezequiel Garcia <ezequiel(a)vanguardiasur.com.ar>
M: Philipp Zabel <p.zabel(a)pengutronix.de>
L: linux-media(a)vger.kernel.org
L: linux-rockchip(a)lists.infradead.org
@@@ -8683,12 -8672,6 +8689,12 @@@ S: Maintaine
T: git git://linuxtv.org/media_tree.git
F: drivers/media/i2c/hi556.c
+HYNIX HI846 SENSOR DRIVER
+M: Martin Kepplinger <martin.kepplinger(a)puri.sm>
+L: linux-media(a)vger.kernel.org
+S: Maintained
+F: drivers/media/i2c/hi846.c
+
Hyper-V/Azure CORE AND DRIVERS
M: "K. Y. Srinivasan" <kys(a)microsoft.com>
M: Haiyang Zhang <haiyangz(a)microsoft.com>
@@@ -10024,7 -10007,6 +10030,7 @@@ JC42.4 TEMPERATURE SENSOR DRIVE
M: Guenter Roeck <linux(a)roeck-us.net>
L: linux-hwmon(a)vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/hwmon/jedec,jc42.yaml
F: Documentation/hwmon/jc42.rst
F: drivers/hwmon/jc42.c
@@@ -10064,7 -10046,6 +10070,7 @@@ F: include/linux/jbd2.
JPU V4L2 MEM2MEM DRIVER FOR RENESAS
M: Mikhail Ulyanov <mikhail.ulyanov(a)cogentembedded.com>
L: linux-media(a)vger.kernel.org
+L: linux-renesas-soc(a)vger.kernel.org
S: Maintained
F: drivers/media/platform/rcar_jpu.c
@@@ -10919,7 -10900,7 +10925,7 @@@ LM90 HARDWARE MONITOR DRIVE
M: Jean Delvare <jdelvare(a)suse.com>
L: linux-hwmon(a)vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/hwmon/lm90.txt
+F: Documentation/devicetree/bindings/hwmon/national,lm90.yaml
F: Documentation/hwmon/lm90.rst
F: drivers/hwmon/lm90.c
F: include/dt-bindings/thermal/lm90.h
@@@ -11303,6 -11284,7 +11309,6 @@@ F: Documentation/networking/device_driv
F: drivers/net/ethernet/marvell/octeontx2/af/
MARVELL PRESTERA ETHERNET SWITCH DRIVER
-M: Vadym Kochan <vkochan(a)marvell.com>
M: Taras Chornyi <tchornyi(a)marvell.com>
S: Supported
W: https://github.com/Marvell-switching/switchdev-prestera
@@@ -11703,7 -11685,6 +11709,7 @@@ T: git git://linuxtv.org/media_tree.gi
F: Documentation/devicetree/bindings/media/renesas,csi2.yaml
F: Documentation/devicetree/bindings/media/renesas,isp.yaml
F: Documentation/devicetree/bindings/media/renesas,vin.yaml
+F: drivers/media/platform/rcar-isp.c
F: drivers/media/platform/rcar-vin/
MEDIA DRIVERS FOR RENESAS - VSP1
@@@ -11849,7 -11830,9 +11855,9 @@@ F: drivers/mmc/host/mtk-sd.
MEDIATEK MT76 WIRELESS LAN DRIVER
M: Felix Fietkau <nbd(a)nbd.name>
M: Lorenzo Bianconi <lorenzo.bianconi83(a)gmail.com>
- R: Ryder Lee <ryder.lee(a)mediatek.com>
+ M: Ryder Lee <ryder.lee(a)mediatek.com>
+ R: Shayne Chen <shayne.chen(a)mediatek.com>
+ R: Sean Wang <sean.wang(a)mediatek.com>
L: linux-wireless(a)vger.kernel.org
S: Maintained
F: drivers/net/wireless/mediatek/mt76/
@@@ -11873,12 -11856,6 +11881,12 @@@ S: Maintaine
F: Documentation/devicetree/bindings/i2c/i2c-mt7621.txt
F: drivers/i2c/busses/i2c-mt7621.c
+MEDIATEK MT7621 PCIE CONTROLLER DRIVER
+M: Sergio Paracuellos <sergio.paracuellos(a)gmail.com>
+S: Maintained
+F: Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml
+F: drivers/pci/controller/pcie-mt7621.c
+
MEDIATEK MT7621 PHY PCI DRIVER
M: Sergio Paracuellos <sergio.paracuellos(a)gmail.com>
S: Maintained
@@@ -11902,14 -11879,6 +11910,14 @@@ M: Sean Wang <sean.wang(a)mediatek.com
S: Maintained
F: drivers/char/hw_random/mtk-rng.c
+MEDIATEK SMI DRIVER
+M: Yong Wu <yong.wu(a)mediatek.com>
+L: linux-mediatek(a)lists.infradead.org (moderated for non-subscribers)
+S: Supported
+F: Documentation/devicetree/bindings/memory-controllers/mediatek,smi*
+F: drivers/memory/mtk-smi.c
+F: include/soc/mediatek/smi.h
+
MEDIATEK SWITCH DRIVER
M: Sean Wang <sean.wang(a)mediatek.com>
M: Landen Chao <Landen.Chao(a)mediatek.com>
@@@ -12793,7 -12762,6 +12801,7 @@@ M: Laurent Pinchart <laurent.pinchart@i
L: linux-media(a)vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
+F: Documentation/devicetree/bindings/media/i2c/aptina,mt9p031.yaml
F: drivers/media/i2c/mt9p031.c
F: include/media/i2c/mt9p031.h
@@@ -13088,6 -13056,7 +13096,7 @@@ F: include/linux/dsa
F: include/linux/platform_data/dsa.h
F: include/net/dsa.h
F: net/dsa/
+ F: tools/testing/selftests/drivers/net/dsa/
NETWORKING [GENERAL]
M: "David S. Miller" <davem(a)davemloft.net>
@@@ -13300,12 -13269,6 +13309,12 @@@ W: http://www.netlab.is.tsukuba.ac.jp/~
F: Documentation/scsi/NinjaSCSI.rst
F: drivers/scsi/nsp32*
+NINTENDO HID DRIVER
+M: Daniel J. Ogorchock <djogorchock(a)gmail.com>
+L: linux-input(a)vger.kernel.org
+S: Maintained
+F: drivers/hid/hid-nintendo*
+
NIOS2 ARCHITECTURE
M: Dinh Nguyen <dinguyen(a)kernel.org>
S: Maintained
@@@ -13852,13 -13815,6 +13861,13 @@@ S: Maintaine
T: git git://linuxtv.org/media_tree.git
F: drivers/media/i2c/ov13858.c
+OMNIVISION OV13B10 SENSOR DRIVER
+M: Arec Kao <arec.kao(a)intel.com>
+L: linux-media(a)vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: drivers/media/i2c/ov13b10.c
+
OMNIVISION OV2680 SENSOR DRIVER
M: Rui Miguel Silva <rmfrfs(a)gmail.com>
L: linux-media(a)vger.kernel.org
@@@ -14655,15 -14611,7 +14664,15 @@@ M: Stanimir Varbanov <svarbanov@mm-sol.
L: linux-pci(a)vger.kernel.org
L: linux-arm-msm(a)vger.kernel.org
S: Maintained
-F: drivers/pci/controller/dwc/*qcom*
+F: drivers/pci/controller/dwc/pcie-qcom.c
+
+PCIE ENDPOINT DRIVER FOR QUALCOMM
+M: Manivannan Sadhasivam <manivannan.sadhasivam(a)linaro.org>
+L: linux-pci(a)vger.kernel.org
+L: linux-arm-msm(a)vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
+F: drivers/pci/controller/dwc/pcie-qcom-ep.c
PCIE DRIVER FOR ROCKCHIP
M: Shawn Lin <shawn.lin(a)rock-chips.com>
@@@ -14920,6 -14868,13 +14929,6 @@@ L: linux-omap(a)vger.kernel.or
S: Maintained
F: drivers/pinctrl/pinctrl-single.c
-PIN CONTROLLER - ST SPEAR
-M: Viresh Kumar <vireshk(a)kernel.org>
-L: linux-arm-kernel(a)lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-W: http://www.st.com/spear
-F: drivers/pinctrl/spear/
-
PKTCDVD DRIVER
M: linux-block(a)vger.kernel.org
S: Orphan
@@@ -14968,6 -14923,7 +14977,6 @@@ S: Maintaine
W: http://hwmon.wiki.kernel.org/
W: http://www.roeck-us.net/linux/drivers/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
-F: Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
F: Documentation/devicetree/bindings/hwmon/ltc2978.txt
F: Documentation/devicetree/bindings/hwmon/max31785.txt
F: Documentation/hwmon/adm1275.rst
@@@ -15516,6 -15472,7 +15525,7 @@@ M: ath9k-devel(a)qca.qualcomm.co
L: linux-wireless(a)vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
+ F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
F: drivers/net/wireless/ath/ath9k/
QUALCOMM CAMERA SUBSYSTEM DRIVER
@@@ -15937,6 -15894,12 +15947,12 @@@ L: linux-wireless(a)vger.kernel.or
S: Maintained
F: drivers/net/wireless/realtek/rtw88/
+ REALTEK WIRELESS DRIVER (rtw89)
+ M: Ping-Ke Shih <pkshih(a)realtek.com>
+ L: linux-wireless(a)vger.kernel.org
+ S: Maintained
+ F: drivers/net/wireless/realtek/rtw89/
+
REDPINE WIRELESS DRIVER
M: Amitkumar Karwar <amitkarwar(a)gmail.com>
M: Siva Rebbagondla <siva8118(a)gmail.com>
@@@ -16171,7 -16134,7 +16187,7 @@@ F: include/uapi/linux/rkisp1-config.
ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER
M: Jacob Chen <jacob-chen(a)iotwrt.com>
-M: Ezequiel Garcia <ezequiel(a)collabora.com>
+M: Ezequiel Garcia <ezequiel(a)vanguardiasur.com.ar>
L: linux-media(a)vger.kernel.org
L: linux-rockchip(a)lists.infradead.org
S: Maintained
@@@ -16179,7 -16142,7 +16195,7 @@@ F: Documentation/devicetree/bindings/me
F: drivers/media/platform/rockchip/rga/
ROCKCHIP VIDEO DECODER DRIVER
-M: Ezequiel Garcia <ezequiel(a)collabora.com>
+M: Ezequiel Garcia <ezequiel(a)vanguardiasur.com.ar>
L: linux-media(a)vger.kernel.org
L: linux-rockchip(a)lists.infradead.org
S: Maintained
@@@ -17684,17 -17647,21 +17700,17 @@@ W: https://github.com/linux-speakup/spe
B: https://github.com/linux-speakup/speakup/issues
F: drivers/accessibility/speakup/
-SPEAR CLOCK FRAMEWORK SUPPORT
-M: Viresh Kumar <vireshk(a)kernel.org>
-L: linux-arm-kernel(a)lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-W: http://www.st.com/spear
-F: drivers/clk/spear/
-
-SPEAR PLATFORM SUPPORT
+SPEAR PLATFORM/CLOCK/PINCTRL SUPPORT
M: Viresh Kumar <vireshk(a)kernel.org>
M: Shiraz Hashim <shiraz.linux.kernel(a)gmail.com>
+M: soc(a)kernel.org
L: linux-arm-kernel(a)lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: http://www.st.com/spear
F: arch/arm/boot/dts/spear*
F: arch/arm/mach-spear/
+F: drivers/clk/spear/
+F: drivers/pinctrl/spear/
SPI NOR SUBSYSTEM
M: Tudor Ambarus <tudor.ambarus(a)microchip.com>
@@@ -18611,9 -18578,7 +18627,9 @@@ L: linux-pm(a)vger.kernel.or
S: Supported
Q: https://patchwork.kernel.org/project/linux-pm/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git thermal
+F: Documentation/ABI/testing/sysfs-class-thermal
F: Documentation/devicetree/bindings/thermal/
+F: Documentation/driver-api/thermal/
F: drivers/thermal/
F: include/linux/cpu_cooling.h
F: include/linux/thermal.h
@@@ -20387,7 -20352,6 +20403,7 @@@ X86 ARCHITECTURE (32-BIT AND 64-BIT
M: Thomas Gleixner <tglx(a)linutronix.de>
M: Ingo Molnar <mingo(a)redhat.com>
M: Borislav Petkov <bp(a)alien8.de>
+M: Dave Hansen <dave.hansen(a)linux.intel.com>
M: x86(a)kernel.org
R: "H. Peter Anvin" <hpa(a)zytor.com>
L: linux-kernel(a)vger.kernel.org
diff --combined drivers/infiniband/hw/mlx4/main.c
index 38742e233915,f3fa2fe6a88a..ceca05982f61
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@@ -2105,10 -2105,10 +2105,10 @@@ mlx4_ib_alloc_hw_device_stats(struct ib
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[0].name)
+ if (!diag[0].descs)
return NULL;
- return rdma_alloc_hw_stats_struct(diag[0].name, diag[0].num_counters,
+ return rdma_alloc_hw_stats_struct(diag[0].descs, diag[0].num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@@ -2118,10 -2118,10 +2118,10 @@@ mlx4_ib_alloc_hw_port_stats(struct ib_d
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[1].name)
+ if (!diag[1].descs)
return NULL;
- return rdma_alloc_hw_stats_struct(diag[1].name, diag[1].num_counters,
+ return rdma_alloc_hw_stats_struct(diag[1].descs, diag[1].num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@@ -2151,8 -2151,10 +2151,8 @@@ static int mlx4_ib_get_hw_stats(struct
}
static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
- const char ***name,
- u32 **offset,
- u32 *num,
- bool port)
+ struct rdma_stat_desc **pdescs,
+ u32 **offset, u32 *num, bool port)
{
u32 num_counters;
@@@ -2164,46 -2166,46 +2164,46 @@@
if (!port)
num_counters += ARRAY_SIZE(diag_device_only);
- *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
- if (!*name)
+ *pdescs = kcalloc(num_counters, sizeof(struct rdma_stat_desc),
+ GFP_KERNEL);
+ if (!*pdescs)
return -ENOMEM;
*offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
if (!*offset)
- goto err_name;
+ goto err;
*num = num_counters;
return 0;
-err_name:
- kfree(*name);
+err:
+ kfree(*pdescs);
return -ENOMEM;
}
static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
- const char **name,
- u32 *offset,
- bool port)
+ struct rdma_stat_desc *descs,
+ u32 *offset, bool port)
{
int i;
int j;
for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
- name[i] = diag_basic[i].name;
+ descs[i].name = diag_basic[i].name;
offset[i] = diag_basic[i].offset;
}
if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
- name[j] = diag_ext[i].name;
+ descs[j].name = diag_ext[i].name;
offset[j] = diag_ext[i].offset;
}
}
if (!port) {
for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
- name[j] = diag_device_only[i].name;
+ descs[j].name = diag_device_only[i].name;
offset[j] = diag_device_only[i].offset;
}
}
@@@ -2231,13 -2233,13 +2231,13 @@@ static int mlx4_ib_alloc_diag_counters(
if (i && !per_port)
continue;
- ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
+ ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs,
&diag[i].offset,
&diag[i].num_counters, i);
if (ret)
goto err_alloc;
- mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
+ mlx4_ib_fill_diag_counters(ibdev, diag[i].descs,
diag[i].offset, i);
}
@@@ -2247,7 -2249,7 +2247,7 @@@
err_alloc:
if (i) {
- kfree(diag[i - 1].name);
+ kfree(diag[i - 1].descs);
kfree(diag[i - 1].offset);
}
@@@ -2260,7 -2262,7 +2260,7 @@@ static void mlx4_ib_diag_cleanup(struc
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
kfree(ibdev->diag_counters[i].offset);
- kfree(ibdev->diag_counters[i].name);
+ kfree(ibdev->diag_counters[i].descs);
}
}
@@@ -2273,7 -2275,7 +2273,7 @@@ static void mlx4_ib_update_qps(struct m
u64 release_mac = MLX4_IB_INVALID_MAC;
struct mlx4_ib_qp *qp;
- new_smac = mlx4_mac_to_u64(dev->dev_addr);
+ new_smac = ether_addr_to_u64(dev->dev_addr);
atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
/* no need for update QP1 and mac registration in non-SRIOV */
diff --combined drivers/infiniband/hw/mlx4/qp.c
index 3a1a4ac9dd33,aea4182f33a4..b17d6ebc5b70
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@@ -1099,10 -1099,8 +1099,10 @@@ static int create_qp_common(struct ib_p
if (dev->steering_support ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
qp->flags |= MLX4_IB_QP_NETIF;
- else
+ else {
+ err = -EINVAL;
goto err;
+ }
}
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
@@@ -1855,7 -1853,7 +1855,7 @@@ static int mlx4_set_path(struct mlx4_ib
u16 vlan_id, u8 *smac)
{
return _mlx4_set_path(dev, &qp->ah_attr,
- mlx4_mac_to_u64(smac),
+ ether_addr_to_u64(smac),
vlan_id,
path, &mqp->pri, port);
}
diff --combined drivers/infiniband/hw/mlx5/odp.c
index 31596d8c5212,81147d774dd2..91eb615b89ee
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@@ -430,7 -430,7 +430,7 @@@ static struct mlx5_ib_mr *implicit_get_
mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
- mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE;
+ mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
mr->parent = imr;
odp->private = mr;
@@@ -500,7 -500,7 +500,7 @@@ struct mlx5_ib_mr *mlx5_ib_alloc_implic
}
imr->ibmr.pd = &pd->ibpd;
- imr->mmkey.iova = 0;
+ imr->ibmr.iova = 0;
imr->umem = &umem_odp->umem;
imr->ibmr.lkey = imr->mmkey.key;
imr->ibmr.rkey = imr->mmkey.key;
@@@ -738,7 -738,7 +738,7 @@@ static int pagefault_mr(struct mlx5_ib_
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- if (unlikely(io_virt < mr->mmkey.iova))
+ if (unlikely(io_virt < mr->ibmr.iova))
return -EFAULT;
if (mr->umem->is_dmabuf)
@@@ -747,7 -747,7 +747,7 @@@
if (!odp->is_implicit_odp) {
u64 user_va;
- if (check_add_overflow(io_virt - mr->mmkey.iova,
+ if (check_add_overflow(io_virt - mr->ibmr.iova,
(u64)odp->umem.address, &user_va))
return -EFAULT;
if (unlikely(user_va >= ib_umem_end(odp) ||
@@@ -788,7 -788,7 +788,7 @@@ struct pf_frame
int depth;
};
-static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
+static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
{
if (!mmkey)
return false;
@@@ -797,6 -797,21 +797,6 @@@
return mmkey->key == key;
}
-static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
-{
- struct mlx5_ib_mw *mw;
- struct mlx5_ib_devx_mr *devx_mr;
-
- if (mmkey->type == MLX5_MKEY_MW) {
- mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
- return mw->ndescs;
- }
-
- devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr,
- mmkey);
- return devx_mr->ndescs;
-}
-
/*
* Handle a single data segment in a page-fault WQE or RDMA region.
*
@@@ -816,11 -831,12 +816,11 @@@ static int pagefault_single_data_segmen
{
int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
struct pf_frame *head = NULL, *frame;
- struct mlx5_core_mkey *mmkey;
+ struct mlx5_ib_mkey *mmkey;
struct mlx5_ib_mr *mr;
struct mlx5_klm *pklm;
u32 *out = NULL;
size_t offset;
- int ndescs;
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
@@@ -869,6 -885,8 +869,6 @@@ next_mr
case MLX5_MKEY_MW:
case MLX5_MKEY_INDIRECT_DEVX:
- ndescs = get_indirect_num_descs(mmkey);
-
if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
mlx5_ib_dbg(dev, "indirection level exceeded\n");
ret = -EFAULT;
@@@ -876,7 -894,7 +876,7 @@@
}
outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
- sizeof(*pklm) * (ndescs - 2);
+ sizeof(*pklm) * (mmkey->ndescs - 2);
if (outlen > cur_outlen) {
kfree(out);
@@@ -891,14 -909,14 +891,14 @@@
pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
bsf0_klm0_pas_mtt0_1);
- ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
+ ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen);
if (ret)
goto end;
offset = io_virt - MLX5_GET64(query_mkey_out, out,
memory_key_mkey_entry.start_addr);
- for (i = 0; bcnt && i < ndescs; i++, pklm++) {
+ for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
if (offset >= be32_to_cpu(pklm->bcount)) {
offset -= be32_to_cpu(pklm->bcount);
continue;
@@@ -1541,6 -1559,7 +1541,7 @@@ int mlx5r_odp_create_eq(struct mlx5_ib_
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_IB_NUM_PF_EQE,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
@@@ -1685,31 -1704,25 +1686,31 @@@ get_prefetchable_mr(struct ib_pd *pd, e
u32 lkey)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr = NULL;
+ struct mlx5_ib_mkey *mmkey;
xa_lock(&dev->odp_mkeys);
mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
- if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
+ if (!mmkey || mmkey->key != lkey) {
+ mr = ERR_PTR(-ENOENT);
goto end;
+ }
+ if (mmkey->type != MLX5_MKEY_MR) {
+ mr = ERR_PTR(-EINVAL);
+ goto end;
+ }
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
if (mr->ibmr.pd != pd) {
- mr = NULL;
+ mr = ERR_PTR(-EPERM);
goto end;
}
/* prefetch with write-access must be supported by the MR */
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
!mr->umem->writable) {
- mr = NULL;
+ mr = ERR_PTR(-EPERM);
goto end;
}
@@@ -1741,7 -1754,7 +1742,7 @@@ static void mlx5_ib_prefetch_mr_work(st
destroy_prefetch_work(work);
}
-static bool init_prefetch_work(struct ib_pd *pd,
+static int init_prefetch_work(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 pf_flags, struct prefetch_mr_work *work,
struct ib_sge *sg_list, u32 num_sge)
@@@ -1752,19 -1765,17 +1753,19 @@@
work->pf_flags = pf_flags;
for (i = 0; i < num_sge; ++i) {
- work->frags[i].io_virt = sg_list[i].addr;
- work->frags[i].length = sg_list[i].length;
- work->frags[i].mr =
- get_prefetchable_mr(pd, advice, sg_list[i].lkey);
- if (!work->frags[i].mr) {
+ struct mlx5_ib_mr *mr;
+
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (IS_ERR(mr)) {
work->num_sge = i;
- return false;
+ return PTR_ERR(mr);
}
+ work->frags[i].io_virt = sg_list[i].addr;
+ work->frags[i].length = sg_list[i].length;
+ work->frags[i].mr = mr;
}
work->num_sge = num_sge;
- return true;
+ return 0;
}
static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
@@@ -1780,8 -1791,8 +1781,8 @@@
struct mlx5_ib_mr *mr;
mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
- if (!mr)
- return -ENOENT;
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
&bytes_mapped, pf_flags);
if (ret < 0) {
@@@ -1801,7 -1812,6 +1802,7 @@@ int mlx5_ib_advise_mr_prefetch(struct i
{
u32 pf_flags = 0;
struct prefetch_mr_work *work;
+ int rc;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
@@@ -1817,10 -1827,9 +1818,10 @@@
if (!work)
return -ENOMEM;
- if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
+ rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
+ if (rc) {
destroy_prefetch_work(work);
- return -EINVAL;
+ return rc;
}
queue_work(system_unbound_wq, &work->work);
return 0;
diff --combined drivers/infiniband/hw/qedr/main.c
index 8b5291d424d2,dc203f3d0f25..65ce6d0f1885
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@@ -228,6 -228,7 +228,6 @@@ static const struct ib_device_ops qedr_
.query_srq = qedr_query_srq,
.reg_user_mr = qedr_reg_user_mr,
.req_notify_cq = qedr_arm_cq,
- .resize_cq = qedr_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
@@@ -271,7 -272,7 +271,7 @@@ static int qedr_register_device(struct
static int qedr_alloc_mem_sb(struct qedr_dev *dev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
diff --combined drivers/net/ethernet/chelsio/cxgb3/common.h
index d115ec93296d,a309016f7f8c..ecd025dda8d6
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@@ -676,6 -676,8 +676,6 @@@ void t3_link_changed(struct adapter *ad
void t3_link_fault(struct adapter *adapter, int port_id);
int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
-int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data);
-int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data);
int t3_seeprom_wp(struct adapter *adapter, int enable);
int t3_get_tp_version(struct adapter *adapter, u32 *vers);
int t3_check_tpsram_version(struct adapter *adapter);
@@@ -708,7 -710,7 +708,7 @@@ int t3_mac_enable(struct cmac *mac, in
int t3_mac_disable(struct cmac *mac, int which);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
- int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
+ int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, int n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
diff --combined drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index e185f5f24849,9cf9e33664e4..bfffcaeee624
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@@ -2036,16 -2036,20 +2036,16 @@@ static int get_eeprom(struct net_devic
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- int i, err = 0;
-
- u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ int cnt;
e->magic = EEPROM_MAGIC;
- for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
- err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
+ cnt = pci_read_vpd(adapter->pdev, e->offset, e->len, data);
+ if (cnt < 0)
+ return cnt;
- if (!err)
- memcpy(data, buf + e->offset, e->len);
- kfree(buf);
- return err;
+ e->len = cnt;
+
+ return 0;
}
static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
@@@ -2054,6 -2058,7 +2054,6 @@@
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
u32 aligned_offset, aligned_len;
- __le32 *p;
u8 *buf;
int err;
@@@ -2067,9 -2072,12 +2067,9 @@@
buf = kmalloc(aligned_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
- if (!err && aligned_len > 4)
- err = t3_seeprom_read(adapter,
- aligned_offset + aligned_len - 4,
- (__le32 *) & buf[aligned_len - 4]);
- if (err)
+ err = pci_read_vpd(adapter->pdev, aligned_offset, aligned_len,
+ buf);
+ if (err < 0)
goto out;
memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
} else
@@@ -2079,13 -2087,17 +2079,13 @@@
if (err)
goto out;
- for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
- err = t3_seeprom_write(adapter, aligned_offset, *p);
- aligned_offset += 4;
- }
-
- if (!err)
+ err = pci_write_vpd(adapter->pdev, aligned_offset, aligned_len, buf);
+ if (err >= 0)
err = t3_seeprom_wp(adapter, 1);
out:
if (buf != data)
kfree(buf);
- return err;
+ return err < 0 ? err : 0;
}
static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@@ -2574,7 -2586,7 +2574,7 @@@ static int cxgb_set_mac_addr(struct net
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
if (offload_running(adapter))
write_smt_entry(adapter, pi->port_id);
diff --combined drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index cb85c2f21525,53feac8da503..da41eee2f25c
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@@ -29,6 -29,7 +29,7 @@@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+ #include <linux/etherdevice.h>
#include "common.h"
#include "regs.h"
#include "sge_defs.h"
@@@ -595,9 -596,80 +596,9 @@@ struct t3_vpd
u32 pad; /* for multiple-of-4 sizing and alignment */
};
-#define EEPROM_MAX_POLL 40
#define EEPROM_STAT_ADDR 0x4000
#define VPD_BASE 0xc00
-/**
- * t3_seeprom_read - read a VPD EEPROM location
- * @adapter: adapter to read
- * @addr: EEPROM address
- * @data: where to store the read data
- *
- * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
- * VPD ROM capability. A zero is written to the flag bit when the
- * address is written to the control register. The hardware device will
- * set the flag to 1 when 4 bytes have been read into the data register.
- */
-int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
-{
- u16 val;
- int attempts = EEPROM_MAX_POLL;
- u32 v;
- unsigned int base = adapter->params.pci.vpd_cap_addr;
-
- if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
- return -EINVAL;
-
- pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
- do {
- udelay(10);
- pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
- } while (!(val & PCI_VPD_ADDR_F) && --attempts);
-
- if (!(val & PCI_VPD_ADDR_F)) {
- CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
- return -EIO;
- }
- pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
- *data = cpu_to_le32(v);
- return 0;
-}
-
-/**
- * t3_seeprom_write - write a VPD EEPROM location
- * @adapter: adapter to write
- * @addr: EEPROM address
- * @data: value to write
- *
- * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
- * VPD ROM capability.
- */
-int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
-{
- u16 val;
- int attempts = EEPROM_MAX_POLL;
- unsigned int base = adapter->params.pci.vpd_cap_addr;
-
- if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
- return -EINVAL;
-
- pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
- le32_to_cpu(data));
- pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
- addr | PCI_VPD_ADDR_F);
- do {
- msleep(1);
- pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
- } while ((val & PCI_VPD_ADDR_F) && --attempts);
-
- if (val & PCI_VPD_ADDR_F) {
- CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
- return -EIO;
- }
- return 0;
-}
-
/**
* t3_seeprom_wp - enable/disable EEPROM write protection
* @adapter: the adapter
@@@ -607,14 -679,7 +608,14 @@@
*/
int t3_seeprom_wp(struct adapter *adapter, int enable)
{
- return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
+ u32 data = enable ? 0xc : 0;
+ int ret;
+
+ /* EEPROM_STAT_ADDR is outside VPD area, use pci_write_vpd_any() */
+ ret = pci_write_vpd_any(adapter->pdev, EEPROM_STAT_ADDR, sizeof(u32),
+ &data);
+
+ return ret < 0 ? ret : 0;
}
static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val)
@@@ -644,22 -709,24 +645,22 @@@ static int vpdstrtou16(char *s, u8 len
*/
static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- int i, addr, ret;
struct t3_vpd vpd;
+ u8 base_val = 0;
+ int addr, ret;
/*
* Card information is normally at VPD_BASE but some early cards had
* it at 0.
*/
- ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
- if (ret)
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
+ if (ret < 0)
return ret;
- addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
+ addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : 0;
- for (i = 0; i < sizeof(vpd); i += 4) {
- ret = t3_seeprom_read(adapter, addr + i,
- (__le32 *)((u8 *)&vpd + i));
- if (ret)
- return ret;
- }
+ ret = pci_read_vpd(adapter->pdev, addr, sizeof(vpd), &vpd);
+ if (ret < 0)
+ return ret;
ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
if (ret)
@@@ -3692,8 -3759,7 +3693,7 @@@ int t3_prep_adapter(struct adapter *ada
memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
- memcpy(adapter->port[i]->dev_addr, hw_addr,
- ETH_ALEN);
+ eth_hw_addr_set(adapter->port[i], hw_addr);
init_link_config(&p->link_config, p->phy.caps);
p->phy.ops->power_down(&p->phy, 1);
diff --combined drivers/net/ethernet/hisilicon/hns3/hnae3.h
index da3a593f6a56,3f7a9a4c59d5..9ae1aa96c545
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@@ -95,6 -95,7 +95,7 @@@ enum HNAE3_DEV_CAP_BITS
HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
};
#define hnae3_dev_fd_supported(hdev) \
@@@ -151,6 -152,9 +152,9 @@@
#define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps)
+ #define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@@ -294,6 -298,8 +298,8 @@@ enum hnae3_dbg_cmd
HNAE3_DBG_CMD_MAC_TNL_STATUS,
HNAE3_DBG_CMD_SERV_INFO,
HNAE3_DBG_CMD_UMV_INFO,
+ HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ HNAE3_DBG_CMD_COAL_INFO,
HNAE3_DBG_CMD_UNKNOWN,
};
@@@ -341,6 -347,9 +347,9 @@@ struct hnae3_dev_specs
u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
u16 max_frm_size;
u16 max_qset_num;
+ u16 umv_size;
+ u16 mc_mac_size;
+ u32 mac_stats_num;
};
struct hnae3_client_ops {
@@@ -568,7 -577,6 +577,7 @@@ struct hnae3_ae_ops
u32 *auto_neg, u32 *rx_en, u32 *tx_en);
int (*set_pauseparam)(struct hnae3_handle *handle,
u32 auto_neg, u32 rx_en, u32 tx_en);
+ int (*restore_pauseparam)(struct hnae3_handle *handle);
int (*set_autoneg)(struct hnae3_handle *handle, bool enable);
int (*get_autoneg)(struct hnae3_handle *handle);
@@@ -589,7 -597,7 +598,7 @@@
u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p);
- int (*set_mac_addr)(struct hnae3_handle *handle, void *p,
+ int (*set_mac_addr)(struct hnae3_handle *handle, const void *p,
bool is_first);
int (*do_ioctl)(struct hnae3_handle *handle,
struct ifreq *ifr, int cmd);
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index e54f96251fea,1a1bebd453d3..67364ab63a1f
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@@ -137,7 -137,7 +137,7 @@@ static struct hns3_dbg_cmd_info hns3_db
.name = "uc",
.cmd = HNAE3_DBG_CMD_MAC_UC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
.init = hns3_dbg_common_file_init,
},
{
@@@ -256,7 -256,7 +256,7 @@@
.name = "tqp",
.cmd = HNAE3_DBG_CMD_REG_TQP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
.init = hns3_dbg_common_file_init,
},
{
@@@ -298,7 -298,7 +298,7 @@@
.name = "fd_tcam",
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
.init = hns3_dbg_common_file_init,
},
{
@@@ -336,6 -336,20 +336,20 @@@
.buf_len = HNS3_DBG_READ_LEN,
.init = hns3_dbg_common_file_init,
},
+ {
+ .name = "page_pool_info",
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "coalesce_info",
+ .cmd = HNAE3_DBG_CMD_COAL_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
};
static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
@@@ -384,6 -398,26 +398,26 @@@
}
};
+ static const struct hns3_dbg_item coal_info_items[] = {
+ { "VEC_ID", 2 },
+ { "ALGO_STATE", 2 },
+ { "PROFILE_ID", 2 },
+ { "CQE_MODE", 2 },
+ { "TUNE_STATE", 2 },
+ { "STEPS_LEFT", 2 },
+ { "STEPS_RIGHT", 2 },
+ { "TIRED", 2 },
+ { "SW_GL", 2 },
+ { "SW_QL", 2 },
+ { "HW_GL", 2 },
+ { "HW_QL", 2 },
+ };
+
+ static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" };
+ static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" };
+ static const char * const
+ dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" };
+
static void hns3_dbg_fill_content(char *content, u16 len,
const struct hns3_dbg_item *items,
const char **result, u16 size)
@@@ -405,6 -439,94 +439,94 @@@
*pos++ = '\0';
}
+ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
+ char **result, int i, bool is_tx)
+ {
+ unsigned int gl_offset, ql_offset;
+ struct hns3_enet_coalesce *coal;
+ unsigned int reg_val;
+ unsigned int j = 0;
+ struct dim *dim;
+ bool ql_enable;
+
+ if (is_tx) {
+ coal = &tqp_vector->tx_group.coal;
+ dim = &tqp_vector->tx_group.dim;
+ gl_offset = HNS3_VECTOR_GL1_OFFSET;
+ ql_offset = HNS3_VECTOR_TX_QL_OFFSET;
+ ql_enable = tqp_vector->tx_group.coal.ql_enable;
+ } else {
+ coal = &tqp_vector->rx_group.coal;
+ dim = &tqp_vector->rx_group.dim;
+ gl_offset = HNS3_VECTOR_GL0_OFFSET;
+ ql_offset = HNS3_VECTOR_RX_QL_OFFSET;
+ ql_enable = tqp_vector->rx_group.coal.ql_enable;
+ }
+
+ sprintf(result[j++], "%d", i);
+ sprintf(result[j++], "%s", dim_state_str[dim->state]);
+ sprintf(result[j++], "%u", dim->profile_ix);
+ sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
+ sprintf(result[j++], "%s",
+ dim_tune_stat_str[dim->tune_state]);
+ sprintf(result[j++], "%u", dim->steps_left);
+ sprintf(result[j++], "%u", dim->steps_right);
+ sprintf(result[j++], "%u", dim->tired);
+ sprintf(result[j++], "%u", coal->int_gl);
+ sprintf(result[j++], "%u", coal->int_ql);
+ reg_val = readl(tqp_vector->mask_addr + gl_offset) &
+ HNS3_VECTOR_GL_MASK;
+ sprintf(result[j++], "%u", reg_val);
+ if (ql_enable) {
+ reg_val = readl(tqp_vector->mask_addr + ql_offset) &
+ HNS3_VECTOR_QL_MASK;
+ sprintf(result[j++], "%u", reg_val);
+ } else {
+ sprintf(result[j++], "NA");
+ }
+ }
+
+ static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len,
+ int *pos, bool is_tx)
+ {
+ char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(coal_info_items)];
+ struct hns3_enet_tqp_vector *tqp_vector;
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(coal_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%s interrupt coalesce info:\n",
+ is_tx ? "tx" : "rx");
+ hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
+ NULL, ARRAY_SIZE(coal_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ hns3_get_coal_info(tqp_vector, result, i, is_tx);
+ hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
+ (const char **)result,
+ ARRAY_SIZE(coal_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+ }
+
+ static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len)
+ {
+ int pos = 0;
+
+ hns3_dump_coal_info(h, buf, len, &pos, true);
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ hns3_dump_coal_info(h, buf, len, &pos, false);
+
+ return 0;
+ }
+
static const struct hns3_dbg_item tx_spare_info_items[] = {
{ "QUEUE_ID", 2 },
{ "COPYBREAK", 2 },
@@@ -462,7 -584,7 +584,7 @@@ static const struct hns3_dbg_item rx_qu
{ "TAIL", 2 },
{ "HEAD", 2 },
{ "FBDNUM", 2 },
- { "PKTNUM", 2 },
+ { "PKTNUM", 5 },
{ "COPYBREAK", 2 },
{ "RING_EN", 2 },
{ "RX_RING_EN", 2 },
@@@ -565,7 -687,7 +687,7 @@@ static const struct hns3_dbg_item tx_qu
{ "HEAD", 2 },
{ "FBDNUM", 2 },
{ "OFFSET", 2 },
- { "PKTNUM", 2 },
+ { "PKTNUM", 5 },
{ "RING_EN", 2 },
{ "TX_RING_EN", 2 },
{ "BASE_ADDR", 10 },
@@@ -790,13 -912,13 +912,13 @@@ static int hns3_dbg_rx_bd_info(struct h
}
static const struct hns3_dbg_item tx_bd_info_items[] = {
- { "BD_IDX", 5 },
- { "ADDRESS", 2 },
+ { "BD_IDX", 2 },
+ { "ADDRESS", 13 },
{ "VLAN_TAG", 2 },
{ "SIZE", 2 },
{ "T_CS_VLAN_TSO", 2 },
{ "OT_VLAN_TAG", 3 },
- { "TV", 2 },
+ { "TV", 5 },
{ "OLT_VLAN_LEN", 2 },
{ "PAYLEN_OL4CS", 2 },
{ "BD_FE_SC_VLD", 2 },
@@@ -924,6 -1046,12 +1046,12 @@@ hns3_dbg_dev_specs(struct hnae3_handle
dev_specs->max_tm_rate);
*pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
dev_specs->max_qset_num);
+ *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n",
+ dev_specs->umv_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n",
+ dev_specs->mc_mac_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
+ dev_specs->mac_stats_num);
}
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
@@@ -937,6 -1065,69 +1065,69 @@@
return 0;
}
+ static const struct hns3_dbg_item page_pool_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "ALLOCATE_CNT", 2 },
+ { "FREE_CNT", 6 },
+ { "POOL_SIZE(PAGE_NUM)", 2 },
+ { "ORDER", 2 },
+ { "NUMA_ID", 2 },
+ { "MAX_LEN", 2 },
+ };
+
+ static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
+ char **result, u32 index)
+ {
+ u32 j = 0;
+
+ sprintf(result[j++], "%u", index);
+ sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt);
+ sprintf(result[j++], "%u",
+ atomic_read(&ring->page_pool->pages_state_release_cnt));
+ sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
+ sprintf(result[j++], "%u", ring->page_pool->p.order);
+ sprintf(result[j++], "%d", ring->page_pool->p.nid);
+ sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024);
+ }
+
+ static int
+ hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
+ {
+ char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(page_pool_info_items)];
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ int pos = 0;
+ u32 i;
+
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items,
+ NULL, ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+ ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
+ hns3_dump_page_pool_info(ring, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ page_pool_info_items,
+ (const char **)result,
+ ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+ }
+
static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
{
u32 i;
@@@ -978,6 -1169,14 +1169,14 @@@ static const struct hns3_dbg_func hns3_
.cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
.dbg_dump = hns3_dbg_tx_queue_info,
},
+ {
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dbg_dump = hns3_dbg_page_pool_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_COAL_INFO,
+ .dbg_dump = hns3_dbg_coal_info,
+ },
};
static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 9cda8b3562b8,f0aa4fbd2200..4e0a8c2f7c05
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@@ -391,7 -391,7 +391,7 @@@ static int hclge_dbg_dump_mac(struct hc
static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u16 qset_id, qset_num;
int ret;
@@@ -408,12 -408,12 +408,12 @@@
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%04u %#x %#x %#x %#x\n",
- qset_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2, bitmap->bit3);
+ qset_id, req.bit0, req.bit1, req.bit2,
+ req.bit3);
}
return 0;
@@@ -422,7 -422,7 +422,7 @@@
static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pri_id, pri_num;
int ret;
@@@ -439,11 -439,12 +439,11 @@@
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%03u %#x %#x %#x\n",
- pri_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2);
+ pri_id, req.bit0, req.bit1, req.bit2);
}
return 0;
@@@ -452,7 -453,7 +452,7 @@@
static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pg_id;
int ret;
@@@ -465,11 -466,12 +465,11 @@@
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%03u %#x %#x %#x\n",
- pg_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2);
+ pg_id, req.bit0, req.bit1, req.bit2);
}
return 0;
@@@ -509,7 -511,7 +509,7 @@@ static int hclge_dbg_dump_dcb_queue(str
static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 port_id = 0;
int ret;
@@@ -519,12 -521,12 +519,12 @@@
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
- bitmap->bit0);
+ req.bit0);
*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
- bitmap->bit1);
+ req.bit1);
return 0;
}
@@@ -1990,6 -1992,9 +1990,9 @@@ static int hclge_dbg_dump_umv_info(stru
}
mutex_unlock(&hdev->vport_lock);
+ pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
+ hdev->used_mc_mac_num);
+
return 0;
}
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 269e579762b2,f1db6699f81f..22d08ed092cb
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@@ -156,174 -156,210 +156,210 @@@ static const char hns3_nic_test_strs[][
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
- {"mac_tx_mac_pause_num",
+ {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
- {"mac_rx_mac_pause_num",
+ {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
- {"mac_tx_control_pkt_num",
+ {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
+ {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
+ {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
- {"mac_rx_control_pkt_num",
+ {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
- {"mac_tx_pfc_pkt_num",
+ {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
- {"mac_tx_pfc_pri0_pkt_num",
+ {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
- {"mac_tx_pfc_pri1_pkt_num",
+ {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
- {"mac_tx_pfc_pri2_pkt_num",
+ {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
- {"mac_tx_pfc_pri3_pkt_num",
+ {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
- {"mac_tx_pfc_pri4_pkt_num",
+ {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
- {"mac_tx_pfc_pri5_pkt_num",
+ {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
- {"mac_tx_pfc_pri6_pkt_num",
+ {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
- {"mac_tx_pfc_pri7_pkt_num",
+ {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
- {"mac_rx_pfc_pkt_num",
+ {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
+ {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
+ {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
+ {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
+ {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
+ {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
+ {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
+ {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
+ {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
- {"mac_rx_pfc_pri0_pkt_num",
+ {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
- {"mac_rx_pfc_pri1_pkt_num",
+ {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
- {"mac_rx_pfc_pri2_pkt_num",
+ {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
- {"mac_rx_pfc_pri3_pkt_num",
+ {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
- {"mac_rx_pfc_pri4_pkt_num",
+ {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
- {"mac_rx_pfc_pri5_pkt_num",
+ {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
- {"mac_rx_pfc_pri6_pkt_num",
+ {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
- {"mac_rx_pfc_pri7_pkt_num",
+ {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
- {"mac_tx_total_pkt_num",
+ {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
+ {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
+ {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
+ {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
+ {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
+ {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
+ {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
+ {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
+ {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
- {"mac_tx_total_oct_num",
+ {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
- {"mac_tx_good_pkt_num",
+ {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
- {"mac_tx_bad_pkt_num",
+ {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
- {"mac_tx_good_oct_num",
+ {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
- {"mac_tx_bad_oct_num",
+ {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
- {"mac_tx_uni_pkt_num",
+ {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
- {"mac_tx_multi_pkt_num",
+ {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
- {"mac_tx_broad_pkt_num",
+ {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
- {"mac_tx_undersize_pkt_num",
+ {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
- {"mac_tx_oversize_pkt_num",
+ {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
- {"mac_tx_64_oct_pkt_num",
+ {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
- {"mac_tx_65_127_oct_pkt_num",
+ {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
- {"mac_tx_128_255_oct_pkt_num",
+ {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
- {"mac_tx_256_511_oct_pkt_num",
+ {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
- {"mac_tx_512_1023_oct_pkt_num",
+ {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
- {"mac_tx_1024_1518_oct_pkt_num",
+ {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
- {"mac_tx_1519_2047_oct_pkt_num",
+ {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
- {"mac_tx_2048_4095_oct_pkt_num",
+ {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
- {"mac_tx_4096_8191_oct_pkt_num",
+ {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
- {"mac_tx_8192_9216_oct_pkt_num",
+ {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
- {"mac_tx_9217_12287_oct_pkt_num",
+ {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
- {"mac_tx_12288_16383_oct_pkt_num",
+ {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
- {"mac_tx_1519_max_good_pkt_num",
+ {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
- {"mac_tx_1519_max_bad_pkt_num",
+ {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
- {"mac_rx_total_pkt_num",
+ {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
- {"mac_rx_total_oct_num",
+ {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
- {"mac_rx_good_pkt_num",
+ {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
- {"mac_rx_bad_pkt_num",
+ {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
- {"mac_rx_good_oct_num",
+ {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
- {"mac_rx_bad_oct_num",
+ {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
- {"mac_rx_uni_pkt_num",
+ {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
- {"mac_rx_multi_pkt_num",
+ {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
- {"mac_rx_broad_pkt_num",
+ {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
- {"mac_rx_undersize_pkt_num",
+ {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
- {"mac_rx_oversize_pkt_num",
+ {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
- {"mac_rx_64_oct_pkt_num",
+ {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
- {"mac_rx_65_127_oct_pkt_num",
+ {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
- {"mac_rx_128_255_oct_pkt_num",
+ {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
- {"mac_rx_256_511_oct_pkt_num",
+ {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
- {"mac_rx_512_1023_oct_pkt_num",
+ {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
- {"mac_rx_1024_1518_oct_pkt_num",
+ {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
- {"mac_rx_1519_2047_oct_pkt_num",
+ {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
- {"mac_rx_2048_4095_oct_pkt_num",
+ {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
- {"mac_rx_4096_8191_oct_pkt_num",
+ {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
- {"mac_rx_8192_9216_oct_pkt_num",
+ {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
- {"mac_rx_9217_12287_oct_pkt_num",
+ {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
- {"mac_rx_12288_16383_oct_pkt_num",
+ {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
- {"mac_rx_1519_max_good_pkt_num",
+ {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
- {"mac_rx_1519_max_bad_pkt_num",
+ {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
- {"mac_tx_fragment_pkt_num",
+ {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
- {"mac_tx_undermin_pkt_num",
+ {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
- {"mac_tx_jabber_pkt_num",
+ {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
- {"mac_tx_err_all_pkt_num",
+ {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
- {"mac_tx_from_app_good_pkt_num",
+ {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
- {"mac_tx_from_app_bad_pkt_num",
+ {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
- {"mac_rx_fragment_pkt_num",
+ {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
- {"mac_rx_undermin_pkt_num",
+ {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
- {"mac_rx_jabber_pkt_num",
+ {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
- {"mac_rx_fcs_err_pkt_num",
+ {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
- {"mac_rx_send_app_good_pkt_num",
+ {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
- {"mac_rx_send_app_bad_pkt_num",
+ {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
};
@@@ -451,8 -487,9 +487,9 @@@ static int hclge_mac_update_stats_defec
u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
__le64 *desc_data;
- int i, k, n;
+ u32 data_size;
int ret;
+ u32 i;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
@@@ -463,33 -500,37 +500,37 @@@
return ret;
}
- for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
- /* for special opcode 0032, only the first desc has the head */
- if (unlikely(i == 0)) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_RD_FIRST_STATS_NUM;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_RD_OTHER_STATS_NUM;
- }
+ /* The first desc has a 64-bit header, so data size need to minus 1 */
+ data_size = sizeof(desc) / (sizeof(u64)) - 1;
- for (k = 0; k < n; k++) {
- *data += le64_to_cpu(*desc_data);
- data++;
- desc_data++;
- }
+ desc_data = (__le64 *)(&desc[0].data[0]);
+ for (i = 0; i < data_size; i++) {
+ /* data memory is continuous becase only the first desc has a
+ * header in this command
+ */
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
}
return 0;
}
- static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
+ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
{
+ #define HCLGE_REG_NUM_PER_DESC 4
+
+ u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc *desc;
__le64 *desc_data;
- u16 i, k, n;
+ u32 data_size;
+ u32 desc_num;
int ret;
+ u32 i;
+
+ /* The first desc has a 64-bit header, so need to consider it */
+ desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections,
* so GFP_ATOMIC is more suitalbe here
@@@ -505,21 -546,16 +546,16 @@@
return ret;
}
- for (i = 0; i < desc_num; i++) {
- /* for special opcode 0034, only the first desc has the head */
- if (i == 0) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_RD_FIRST_STATS_NUM;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_RD_OTHER_STATS_NUM;
- }
+ data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
- for (k = 0; k < n; k++) {
- *data += le64_to_cpu(*desc_data);
- data++;
- desc_data++;
- }
+ desc_data = (__le64 *)(&desc[0].data[0]);
+ for (i = 0; i < data_size; i++) {
+ /* data memory is continuous becase only the first desc has a
+ * header in this command
+ */
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
}
kfree(desc);
@@@ -527,42 -563,37 +563,37 @@@
return 0;
}
- static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
+ static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
{
struct hclge_desc desc;
- __le32 *desc_data;
- u32 reg_num;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query mac statistic reg number, ret = %d\n",
+ ret);
return ret;
+ }
- desc_data = (__le32 *)(&desc.data[0]);
- reg_num = le32_to_cpu(*desc_data);
-
- *desc_num = 1 + ((reg_num - 3) >> 2) +
- (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
+ *reg_num = le32_to_cpu(desc.data[0]);
+ if (*reg_num == 0) {
+ dev_err(&hdev->pdev->dev,
+ "mac statistic reg number is invalid!\n");
+ return -ENODATA;
+ }
return 0;
}
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
- u32 desc_num;
- int ret;
-
- ret = hclge_mac_query_reg_num(hdev, &desc_num);
/* The firmware supports the new statistics acquisition method */
- if (!ret)
- ret = hclge_mac_update_stats_complete(hdev, desc_num);
- else if (ret == -EOPNOTSUPP)
- ret = hclge_mac_update_stats_defective(hdev);
+ if (hdev->ae_dev->dev_specs.mac_stats_num)
+ return hclge_mac_update_stats_complete(hdev);
else
- dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
-
- return ret;
+ return hclge_mac_update_stats_defective(hdev);
}
static int hclge_tqps_update_stats(struct hnae3_handle *handle)
@@@ -670,20 -701,39 +701,39 @@@ static u8 *hclge_tqps_get_strings(struc
return buff;
}
- static u64 *hclge_comm_get_stats(const void *comm_stats,
+ static int hclge_comm_get_count(struct hclge_dev *hdev,
+ const struct hclge_comm_stats_str strs[],
+ u32 size)
+ {
+ int count = 0;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
+ count++;
+
+ return count;
+ }
+
+ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
const struct hclge_comm_stats_str strs[],
int size, u64 *data)
{
u64 *buf = data;
u32 i;
- for (i = 0; i < size; i++)
- buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
+ for (i = 0; i < size; i++) {
+ if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
+ continue;
+
+ *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
+ buf++;
+ }
- return buf + size;
+ return buf;
}
- static u8 *hclge_comm_get_strings(u32 stringset,
+ static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
const struct hclge_comm_stats_str strs[],
int size, u8 *data)
{
@@@ -694,6 -744,9 +744,9 @@@
return buff;
for (i = 0; i < size; i++) {
+ if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
+ continue;
+
snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
buff = buff + ETH_GSTRING_LEN;
}
@@@ -785,7 -838,8 +838,8 @@@ static int hclge_get_sset_count(struct
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
} else if (stringset == ETH_SS_STATS) {
- count = ARRAY_SIZE(g_mac_stats_string) +
+ count = hclge_comm_get_count(hdev, g_mac_stats_string,
+ ARRAY_SIZE(g_mac_stats_string)) +
hclge_tqps_get_sset_count(handle, stringset);
}
@@@ -795,12 -849,14 +849,14 @@@
static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
u8 *data)
{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
u8 *p = (char *)data;
int size;
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
- p = hclge_comm_get_strings(stringset, g_mac_stats_string,
+ p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
size, p);
p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
@@@ -834,7 -890,7 +890,7 @@@ static void hclge_get_stats(struct hnae
struct hclge_dev *hdev = vport->back;
u64 *p;
- p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
+ p = hclge_comm_get_stats(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
@@@ -1037,96 -1093,100 +1093,100 @@@ static int hclge_check_port_speed(struc
return -EINVAL;
}
- static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
+ static void hclge_convert_setting_sr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
}
- static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
+ static void hclge_convert_setting_lr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
- mac->supported);
+ link_mode);
}
- static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
+ static void hclge_convert_setting_cr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
}
- static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
+ static void hclge_convert_setting_kr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
}
static void hclge_convert_setting_fec(struct hclge_mac *mac)
@@@ -1170,9 -1230,9 +1230,9 @@@ static void hclge_parse_fiber_link_mode
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
mac->supported);
- hclge_convert_setting_sr(mac, speed_ability);
- hclge_convert_setting_lr(mac, speed_ability);
- hclge_convert_setting_cr(mac, speed_ability);
+ hclge_convert_setting_sr(speed_ability, mac->supported);
+ hclge_convert_setting_lr(speed_ability, mac->supported);
+ hclge_convert_setting_cr(speed_ability, mac->supported);
if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
@@@ -1188,7 -1248,7 +1248,7 @@@ static void hclge_parse_backplane_link_
{
struct hclge_mac *mac = &hdev->hw.mac;
- hclge_convert_setting_kr(mac, speed_ability);
+ hclge_convert_setting_kr(speed_ability, mac->supported);
if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
@@@ -1342,8 -1402,6 +1402,6 @@@ static void hclge_parse_cfg(struct hclg
cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_UMV_TBL_SPACE_M,
HCLGE_CFG_UMV_TBL_SPACE_S);
- if (!cfg->umv_space)
- cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
HCLGE_CFG_PF_RSS_SIZE_M,
@@@ -1419,6 -1477,7 +1477,7 @@@ static void hclge_set_default_dev_specs
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
+ ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@@ -1440,6 -1499,8 +1499,8 @@@
ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
+ ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
+ ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@@ -1460,6 -1521,21 +1521,21 @@@
dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
if (!dev_specs->max_frm_size)
dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
+ if (!dev_specs->umv_size)
+ dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+ }
+
+ static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
+ {
+ u32 reg_num = 0;
+ int ret;
+
+ ret = hclge_mac_query_reg_num(hdev, ®_num);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
+ return 0;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
@@@ -1468,6 -1544,10 +1544,10 @@@
int ret;
int i;
+ ret = hclge_query_mac_stats_num(hdev);
+ if (ret)
+ return ret;
+
/* set default specifications as devices lower than version V3 do not
* support querying specifications from firmware.
*/
@@@ -1549,7 -1629,10 +1629,10 @@@ static int hclge_configure(struct hclge
hdev->tm_info.num_pg = 1;
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
- hdev->wanted_umv_size = cfg.umv_space;
+ if (cfg.umv_space)
+ hdev->wanted_umv_size = cfg.umv_space;
+ else
+ hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
hdev->gro_en = true;
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
@@@ -2847,29 -2930,33 +2930,29 @@@ static void hclge_mbx_task_schedule(str
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task,
- delay_time);
+ mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
@@@ -2964,6 -3051,82 +3047,82 @@@ static void hclge_update_link_status(st
clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
}
+ static void hclge_update_speed_advertising(struct hclge_mac *mac)
+ {
+ u32 speed_ability;
+
+ if (hclge_get_speed_bit(mac->speed, &speed_ability))
+ return;
+
+ switch (mac->module_type) {
+ case HNAE3_MODULE_TYPE_FIBRE_LR:
+ hclge_convert_setting_lr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_FIBRE_SR:
+ case HNAE3_MODULE_TYPE_AOC:
+ hclge_convert_setting_sr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_CR:
+ hclge_convert_setting_cr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_KR:
+ hclge_convert_setting_kr(speed_ability, mac->advertising);
+ break;
+ default:
+ break;
+ }
+ }
+
+ static void hclge_update_fec_advertising(struct hclge_mac *mac)
+ {
+ if (mac->fec_mode & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->advertising);
+ else
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->advertising);
+ }
+
+ static void hclge_update_pause_advertising(struct hclge_dev *hdev)
+ {
+ struct hclge_mac *mac = &hdev->hw.mac;
+ bool rx_en, tx_en;
+
+ switch (hdev->fc_mode_last_time) {
+ case HCLGE_FC_RX_PAUSE:
+ rx_en = true;
+ tx_en = false;
+ break;
+ case HCLGE_FC_TX_PAUSE:
+ rx_en = false;
+ tx_en = true;
+ break;
+ case HCLGE_FC_FULL:
+ rx_en = true;
+ tx_en = true;
+ break;
+ default:
+ rx_en = false;
+ tx_en = false;
+ break;
+ }
+
+ linkmode_set_pause(mac->advertising, tx_en, rx_en);
+ }
+
+ static void hclge_update_advertising(struct hclge_dev *hdev)
+ {
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ linkmode_zero(mac->advertising);
+ hclge_update_speed_advertising(mac);
+ hclge_update_fec_advertising(mac);
+ hclge_update_pause_advertising(hdev);
+ }
+
static void hclge_update_port_capability(struct hclge_dev *hdev,
struct hclge_mac *mac)
{
@@@ -2986,7 -3149,7 +3145,7 @@@
} else {
linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
mac->supported);
- linkmode_zero(mac->advertising);
+ hclge_update_advertising(hdev);
}
}
@@@ -3487,14 -3650,33 +3646,14 @@@ static void hclge_get_misc_vector(struc
hdev->num_msi_used += 1;
}
-static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
- affinity_notify);
-
- cpumask_copy(&hdev->affinity_mask, mask);
-}
-
-static void hclge_irq_affinity_release(struct kref *ref)
-{
-}
-
static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
{
irq_set_affinity_hint(hdev->misc_vector.vector_irq,
&hdev->affinity_mask);
-
- hdev->affinity_notify.notify = hclge_irq_affinity_notify;
- hdev->affinity_notify.release = hclge_irq_affinity_release;
- irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
- &hdev->affinity_notify);
}
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
{
- irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
}
@@@ -8475,6 -8657,9 +8634,9 @@@ static int hclge_init_umv_space(struct
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ if (hdev->ae_dev->dev_specs.mc_mac_size)
+ set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
+
return 0;
}
@@@ -8492,6 -8677,8 +8654,8 @@@ static void hclge_reset_umv_space(struc
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_alloc_vport + 1);
mutex_unlock(&hdev->vport_lock);
+
+ hdev->used_mc_mac_num = 0;
}
static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
@@@ -8746,6 -8933,7 +8910,7 @@@ int hclge_add_mc_addr_common(struct hcl
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
+ bool is_new_addr = false;
int status;
/* mac addr check */
@@@ -8759,6 -8947,13 +8924,13 @@@
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (status) {
+ if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
+ hdev->used_mc_mac_num >=
+ hdev->ae_dev->dev_specs.mc_mac_size)
+ goto err_no_space;
+
+ is_new_addr = true;
+
/* This mac addr do not exist, add new entry for it */
memset(desc[0].data, 0, sizeof(desc[0].data));
memset(desc[1].data, 0, sizeof(desc[0].data));
@@@ -8768,12 -8963,18 +8940,18 @@@
if (status)
return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- /* if already overflow, not to print each time */
- if (status == -ENOSPC &&
- !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
- dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ if (status == -ENOSPC)
+ goto err_no_space;
+ else if (!status && is_new_addr)
+ hdev->used_mc_mac_num++;
return status;
+
+ err_no_space:
+ /* if already overflow, not to print each time */
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ return -ENOSPC;
}
static int hclge_rm_mc_addr(struct hnae3_handle *handle,
@@@ -8810,12 -9011,15 +8988,15 @@@ int hclge_rm_mc_addr_common(struct hclg
if (status)
return status;
- if (hclge_is_all_function_id_zero(desc))
+ if (hclge_is_all_function_id_zero(desc)) {
/* All the vfid is zero, so need to delete this entry */
status = hclge_remove_mac_vlan_tbl(vport, &req);
- else
+ if (!status)
+ hdev->used_mc_mac_num--;
+ } else {
/* Not all the vfid is zero, update the vfid */
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
+ }
} else if (status == -ENOENT) {
status = 0;
}
@@@ -9391,7 -9595,7 +9572,7 @@@ int hclge_update_mac_node_for_dev_addr(
return 0;
}
- static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
+ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
const unsigned char *new_addr = (const unsigned char *)p;
@@@ -10998,35 -11202,6 +11179,35 @@@ static int hclge_set_pauseparam(struct
return -EOPNOTSUPP;
}
+static int hclge_restore_pauseparam(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 auto_neg, rx_pause, tx_pause;
+ int ret;
+
+ hclge_get_pauseparam(handle, &auto_neg, &rx_pause, &tx_pause);
+ /* when autoneg is disabled, the pause setting of phy has no effect
+ * unless the link goes down.
+ */
+ ret = phy_suspend(hdev->hw.mac.phydev);
+ if (ret)
+ return ret;
+
+ phy_set_asym_pause(hdev->hw.mac.phydev, rx_pause, tx_pause);
+
+ ret = phy_resume(hdev->hw.mac.phydev);
+ if (ret)
+ return ret;
+
+ ret = hclge_mac_pause_setup_hw(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "restore pauseparam error, ret = %d.\n", ret);
+
+ return ret;
+}
+
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex)
{
@@@ -12990,7 -13165,6 +13171,7 @@@ static const struct hnae3_ae_ops hclge_
.halt_autoneg = hclge_halt_autoneg,
.get_pauseparam = hclge_get_pauseparam,
.set_pauseparam = hclge_set_pauseparam,
+ .restore_pauseparam = hclge_restore_pauseparam,
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
@@@ -13059,7 -13233,7 +13240,7 @@@ static int hclge_init(void
{
pr_info("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 69cd8f87b4c8,4f8403af84be..9e1eede599ec
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@@ -403,8 -403,13 +403,13 @@@ struct hclge_tm_info
u8 pfc_en; /* PFC enabled or not for user priority */
};
+ /* max number of mac statistics on each version */
+ #define HCLGE_MAC_STATS_MAX_NUM_V1 84
+ #define HCLGE_MAC_STATS_MAX_NUM_V2 105
+
struct hclge_comm_stats_str {
char desc[ETH_GSTRING_LEN];
+ u32 stats_num;
unsigned long offset;
};
@@@ -412,6 -417,7 +417,7 @@@
struct hclge_mac_stats {
u64 mac_tx_mac_pause_num;
u64 mac_rx_mac_pause_num;
+ u64 rsv0;
u64 mac_tx_pfc_pri0_pkt_num;
u64 mac_tx_pfc_pri1_pkt_num;
u64 mac_tx_pfc_pri2_pkt_num;
@@@ -448,7 -454,7 +454,7 @@@
u64 mac_tx_1519_2047_oct_pkt_num;
u64 mac_tx_2048_4095_oct_pkt_num;
u64 mac_tx_4096_8191_oct_pkt_num;
- u64 rsv0;
+ u64 rsv1;
u64 mac_tx_8192_9216_oct_pkt_num;
u64 mac_tx_9217_12287_oct_pkt_num;
u64 mac_tx_12288_16383_oct_pkt_num;
@@@ -475,7 -481,7 +481,7 @@@
u64 mac_rx_1519_2047_oct_pkt_num;
u64 mac_rx_2048_4095_oct_pkt_num;
u64 mac_rx_4096_8191_oct_pkt_num;
- u64 rsv1;
+ u64 rsv2;
u64 mac_rx_8192_9216_oct_pkt_num;
u64 mac_rx_9217_12287_oct_pkt_num;
u64 mac_rx_12288_16383_oct_pkt_num;
@@@ -498,6 -504,28 +504,28 @@@
u64 mac_rx_pfc_pause_pkt_num;
u64 mac_tx_ctrl_pkt_num;
u64 mac_rx_ctrl_pkt_num;
+
+ /* duration of pfc */
+ u64 mac_tx_pfc_pri0_xoff_time;
+ u64 mac_tx_pfc_pri1_xoff_time;
+ u64 mac_tx_pfc_pri2_xoff_time;
+ u64 mac_tx_pfc_pri3_xoff_time;
+ u64 mac_tx_pfc_pri4_xoff_time;
+ u64 mac_tx_pfc_pri5_xoff_time;
+ u64 mac_tx_pfc_pri6_xoff_time;
+ u64 mac_tx_pfc_pri7_xoff_time;
+ u64 mac_rx_pfc_pri0_xoff_time;
+ u64 mac_rx_pfc_pri1_xoff_time;
+ u64 mac_rx_pfc_pri2_xoff_time;
+ u64 mac_rx_pfc_pri3_xoff_time;
+ u64 mac_rx_pfc_pri4_xoff_time;
+ u64 mac_rx_pfc_pri5_xoff_time;
+ u64 mac_rx_pfc_pri6_xoff_time;
+ u64 mac_rx_pfc_pri7_xoff_time;
+
+ /* duration of pause */
+ u64 mac_tx_pause_xoff_time;
+ u64 mac_rx_pause_xoff_time;
};
#define HCLGE_STATS_TIMER_INTERVAL 300UL
@@@ -938,12 -966,15 +966,14 @@@ struct hclge_dev
u16 priv_umv_size;
/* unicast mac vlan space shared by PF and its VFs */
u16 share_umv_size;
+ /* multicast mac address number used by PF and its VFs */
+ u16 used_mc_mac_num;
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
/* affinity mask and notify for misc interrupt */
cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
struct hclge_ptp *ptp;
struct devlink *devlink;
};
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index cf00ad7bb881,3306050ad72c..645b2c0011e6
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@@ -1349,7 -1349,7 +1349,7 @@@ static void hclgevf_get_mac_addr(struc
ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
- static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
+ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@@ -2232,7 -2232,6 +2232,7 @@@ static void hclgevf_get_misc_vector(str
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
!test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
&hdev->state))
mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
@@@ -3450,8 -3449,6 +3450,8 @@@ static int hclgevf_init_hdev(struct hcl
hclgevf_init_rxd_adv_layout(hdev);
+ set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
+
hdev->last_reset_time = jiffies;
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
HCLGEVF_DRIVER_NAME);
@@@ -3902,7 -3899,7 +3902,7 @@@ static int hclgevf_init(void
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
- hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
+ hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
if (!hclgevf_wq) {
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
return -ENOMEM;
diff --combined drivers/net/ethernet/intel/ice/ice_ptp.c
index d1ef3d48a4b0,a1be0d04a2d0..bf7247c6f58e
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@@ -6,6 -6,252 +6,252 @@@
#define E810_OUT_PROP_DELAY_NS 1
+ static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
+ /* name idx func chan */
+ { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
+ { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
+ { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
+ { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
+ { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
+ };
+
+ /**
+ * ice_get_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Read the configuration of the SMA control logic and put it into the
+ * ptp_pin_desc structure
+ */
+ static int
+ ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
+ {
+ u8 data, i;
+ int status;
+
+ /* Read initial pin state */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* initialize with defaults */
+ for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
+ snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
+ "%s", ice_pin_desc_e810t[i].name);
+ ptp_pins[i].index = ice_pin_desc_e810t[i].index;
+ ptp_pins[i].func = ice_pin_desc_e810t[i].func;
+ ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
+ }
+
+ /* Parse SMA1/UFL1 */
+ switch (data & ICE_SMA1_MASK_E810T) {
+ case ICE_SMA1_MASK_E810T:
+ default:
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_DIR_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_PEROUT;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_TX_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case 0:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_PEROUT;
+ break;
+ }
+
+ /* Parse SMA2/UFL2 */
+ switch (data & ICE_SMA2_MASK_E810T) {
+ case ICE_SMA2_MASK_E810T:
+ default:
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_EXTTS;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ case ICE_SMA2_DIR_EN_E810T:
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ }
+
+ return 0;
+ }
+
+ /**
+ * ice_ptp_set_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Set the configuration of the SMA control logic based on the configuration in
+ * num_pins parameter
+ */
+ static int
+ ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
+ const struct ptp_pin_desc *ptp_pins)
+ {
+ int status;
+ u8 data;
+
+ /* SMA1 and UFL1 cannot be set to TX at the same time */
+ if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT)
+ return -EINVAL;
+
+ /* SMA2 and UFL2 cannot be set to RX at the same time */
+ if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS)
+ return -EINVAL;
+
+ /* Read initial pin state value */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* Set the right sate based on the desired configuration */
+ data &= ~ICE_SMA1_MASK_E810T;
+ if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
+ data |= ICE_SMA1_MASK_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX");
+ data |= ICE_SMA1_TX_EN_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ /* U.FL 1 TX will always enable SMA 1 RX */
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 TX");
+ data |= ICE_SMA1_DIR_EN_E810T;
+ }
+
+ data &= ~ICE_SMA2_MASK_E810T;
+ if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
+ data |= ICE_SMA2_MASK_E810T;
+ } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 RX");
+ data |= (ICE_SMA2_TX_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "UFL2 RX");
+ data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX");
+ data |= (ICE_SMA2_DIR_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
+ data |= ICE_SMA2_DIR_EN_E810T;
+ }
+
+ return ice_write_sma_ctrl_e810t(hw, data);
+ }
+
+ /**
+ * ice_ptp_set_sma_e810t
+ * @info: the driver's PTP info structure
+ * @pin: pin index in kernel structure
+ * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
+ *
+ * Set the configuration of a single SMA pin
+ */
+ static int
+ ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func)
+ {
+ struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (pin < SMA1 || func > PTP_PF_PEROUT)
+ return -EOPNOTSUPP;
+
+ err = ice_get_sma_config_e810t(hw, ptp_pins);
+ if (err)
+ return err;
+
+ /* Disable the same function on the other pin sharing the channel */
+ if (pin == SMA1 && ptp_pins[UFL1].func == func)
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ if (pin == UFL1 && ptp_pins[SMA1].func == func)
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+
+ if (pin == SMA2 && ptp_pins[UFL2].func == func)
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ if (pin == UFL2 && ptp_pins[SMA2].func == func)
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+
+ /* Set up new pin function in the temp table */
+ ptp_pins[pin].func = func;
+
+ return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
+ }
+
+ /**
+ * ice_verify_pin_e810t
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Verify if pin supports requested pin function. If the Check pins consistency.
+ * Reconfigure the SMA logic attached to the given pin to enable its
+ * desired functionality
+ */
+ static int
+ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+ {
+ /* Don't allow channel reassignment */
+ if (chan != ice_pin_desc_e810t[pin].chan)
+ return -EOPNOTSUPP;
+
+ /* Check if functions are properly assigned */
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ if (pin == UFL1)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PEROUT:
+ if (pin == UFL2 || pin == GNSS)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PHYSYNC:
+ return -EOPNOTSUPP;
+ }
+
+ return ice_ptp_set_sma_e810t(info, pin, func);
+ }
+
/**
* ice_set_tx_tstamp - Enable or disable Tx timestamping
* @pf: The PF pointer to search in
@@@ -735,17 -981,34 +981,34 @@@ ice_ptp_gpio_enable_e810(struct ptp_clo
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct ice_perout_channel clk_cfg = {0};
+ bool sma_pres = false;
unsigned int chan;
u32 gpio_pin;
int err;
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
+ sma_pres = true;
+
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
chan = rq->perout.index;
- if (chan == PPS_CLK_GEN_CHAN)
+ if (sma_pres) {
+ if (chan == ice_pin_desc_e810t[SMA1].chan)
+ clk_cfg.gpio_pin = GPIO_20;
+ else if (chan == ice_pin_desc_e810t[SMA2].chan)
+ clk_cfg.gpio_pin = GPIO_22;
+ else
+ return -1;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ clk_cfg.gpio_pin = GPIO_20;
+ else
+ clk_cfg.gpio_pin = GPIO_22;
+ } else if (chan == PPS_CLK_GEN_CHAN) {
clk_cfg.gpio_pin = PPS_PIN_INDEX;
- else
+ } else {
clk_cfg.gpio_pin = chan;
+ }
clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
rq->perout.period.nsec);
@@@ -757,7 -1020,19 +1020,19 @@@
break;
case PTP_CLK_REQ_EXTTS:
chan = rq->extts.index;
- gpio_pin = chan;
+ if (sma_pres) {
+ if (chan < ice_pin_desc_e810t[SMA2].chan)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else {
+ gpio_pin = chan;
+ }
err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
rq->extts.flags);
@@@ -1012,7 -1287,7 +1287,7 @@@ int ice_ptp_set_ts_config(struct ice_p
* The timestamp is in ns, so we must convert the result first.
*/
void
- ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
u32 ts_high;
@@@ -1037,14 -1312,94 +1312,94 @@@
}
}
+ /**
+ * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Disable the OS access to the SMA pins. Called to clear out the OS
+ * indications of pin support when we fail to setup the E810-T SMA control
+ * register.
+ */
+ static void
+ ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+ {
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
+
+ info->enable = NULL;
+ info->verify = NULL;
+ info->n_pins = 0;
+ info->n_ext_ts = 0;
+ info->n_per_out = 0;
+ }
+
+ /**
+ * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Finish setting up the SMA pins by allocating pin_config, and setting it up
+ * according to the current status of the SMA. On failure, disable all of the
+ * extended SMA pin support.
+ */
+ static void
+ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+ {
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ /* Allocate memory for kernel pins interface */
+ info->pin_config = devm_kcalloc(dev, info->n_pins,
+ sizeof(*info->pin_config), GFP_KERNEL);
+ if (!info->pin_config) {
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+ return;
+ }
+
+ /* Read current SMA status */
+ err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
+ if (err)
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+ }
+
+ /**
+ * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
+ * @pf: pointer to the PF instance
+ * @info: PTP clock capabilities
+ */
+ static void
+ ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+ {
+ /* Check if SMA controller is in the netlist */
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
+ !ice_is_pca9575_present(&pf->hw))
+ ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
+
+ if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
+ info->n_per_out = N_PER_OUT_E810T_NO_SMA;
+ return;
+ }
+
+ info->n_per_out = N_PER_OUT_E810T;
+ info->n_ext_ts = N_EXT_TS_E810;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
+
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
+ }
+
/**
* ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
* @info: PTP clock capabilities
*/
static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
{
- info->n_per_out = E810_N_PER_OUT;
- info->n_ext_ts = E810_N_EXT_TS;
+ info->n_per_out = N_PER_OUT_E810;
+ info->n_ext_ts = N_EXT_TS_E810;
}
/**
@@@ -1062,7 -1417,10 +1417,10 @@@ ice_ptp_set_funcs_e810(struct ice_pf *p
{
info->enable = ice_ptp_gpio_enable_e810;
- ice_ptp_setup_pins_e810(info);
+ if (ice_is_e810t(&pf->hw))
+ ice_ptp_setup_pins_e810t(pf, info);
+ else
+ ice_ptp_setup_pins_e810(info);
}
/**
@@@ -1571,9 -1929,6 +1929,9 @@@ err_kworker
*/
void ice_ptp_release(struct ice_pf *pf)
{
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return;
+
/* Disable timestamping for both Tx and Rx */
ice_ptp_cfg_timestamp(pf, false);
diff --combined drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 02db3148240a,da1bec04efff..eae9aa9c0811
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@@ -745,7 -745,7 +745,7 @@@ static int mlx5_fw_tracer_set_mtrc_conf
MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY);
MLX5_SET(mtrc_conf, in, log_trace_buffer_size,
ilog2(TRACER_BUFFER_PAGE_NUM));
- MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key);
+ MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey);
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_MTRC_CONF, 0, 1);
@@@ -1028,7 -1028,7 +1028,7 @@@ int mlx5_fw_tracer_init(struct mlx5_fw_
err_notifier_unregister:
mlx5_eq_notifier_unregister(dev, &tracer->nb);
- mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
+ mlx5_core_destroy_mkey(dev, tracer->buff.mkey);
err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
err_cancel_work:
@@@ -1051,7 -1051,7 +1051,7 @@@ void mlx5_fw_tracer_cleanup(struct mlx5
if (tracer->owner)
mlx5_fw_tracer_ownership_release(tracer);
- mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey);
+ mlx5_core_destroy_mkey(tracer->dev, tracer->buff.mkey);
mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn);
}
@@@ -1069,7 -1069,6 +1069,6 @@@ void mlx5_fw_tracer_destroy(struct mlx5
mlx5_fw_tracer_clean_saved_traces_array(tracer);
mlx5_fw_tracer_free_strings_db(tracer);
mlx5_fw_tracer_destroy_log_buf(tracer);
- flush_workqueue(tracer->work_queue);
destroy_workqueue(tracer->work_queue);
kvfree(tracer);
}
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en.h
index 7761daa25f63,67453dd75b93..f0ac6b0d9653
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@@ -79,6 -79,11 +79,11 @@@ struct page_pool
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5E_RX_MAX_HEAD (256)
+ #define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
+ #define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
+ #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
+ #define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024)
+ #define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096)
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
@@@ -152,6 -157,25 +157,25 @@@
#define MLX5E_UMR_WQEBBS \
(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
+ #define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
+ (sizeof(struct mlx5e_umr_wqe) +\
+ (sizeof(struct mlx5_klm) * (sgl_len)))
+
+ #define MLX5E_KLM_UMR_WQEBBS(klm_entries) \
+ (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB))
+
+ #define MLX5E_KLM_UMR_DS_CNT(klm_entries)\
+ (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS))
+
+ #define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\
+ (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
+
+ #define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
+ ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
+
+ #define MLX5E_MAX_KLM_PER_WQE(mdev) \
+ MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE)
+
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
#define mlx5e_dbg(mlevel, priv, format, ...) \
@@@ -217,11 -241,12 +241,12 @@@ struct mlx5e_umr_wqe
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
- struct mlx5_mtt inline_mtts[0];
+ union {
+ struct mlx5_mtt inline_mtts[0];
+ struct mlx5_klm inline_klms[0];
+ };
};
- extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
-
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER,
MLX5E_PFLAG_TX_CQE_BASED_MODER,
@@@ -244,6 -269,21 +269,21 @@@
#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
+ enum packet_merge {
+ MLX5E_PACKET_MERGE_NONE,
+ MLX5E_PACKET_MERGE_LRO,
+ MLX5E_PACKET_MERGE_SHAMPO,
+ };
+
+ struct mlx5e_packet_merge_param {
+ enum packet_merge type;
+ u32 timeout;
+ struct {
+ u8 match_criteria_type;
+ u8 alignment_granularity;
+ } shampo;
+ };
+
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@@ -253,18 -293,20 +293,20 @@@
u16 mode;
u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ struct {
+ struct mlx5e_mqprio_rl *rl;
+ } channel;
} mqprio;
bool rx_cqe_compress_def;
bool tunneled_offload_en;
struct dim_cq_moder rx_cq_moderation;
struct dim_cq_moder tx_cq_moderation;
- bool lro_en;
+ struct mlx5e_packet_merge_param packet_merge;
u8 tx_min_inline_mode;
bool vlan_strip_disable;
bool scatter_fcs_en;
bool rx_dim_enabled;
bool tx_dim_enabled;
- u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
struct mlx5e_xsk *xsk;
@@@ -286,7 -328,8 +328,8 @@@ enum
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
- MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
+ MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
+ MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
};
struct mlx5e_cq {
@@@ -577,6 -620,7 +620,7 @@@ typedef struct sk_buff
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
+ typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params);
@@@ -598,6 -642,25 +642,25 @@@ struct mlx5e_rq_frags_info
u8 wqe_bulk;
};
+ struct mlx5e_shampo_hd {
- struct mlx5_core_mkey mkey;
++ u32 mkey;
+ struct mlx5e_dma_info *info;
+ struct page *last_page;
+ u16 hd_per_wq;
+ u16 hd_per_wqe;
+ unsigned long *bitmap;
+ u16 pi;
+ u16 ci;
+ __be32 key;
+ u64 last_addr;
+ };
+
+ struct mlx5e_hw_gro_data {
+ struct sk_buff *skb;
+ struct flow_keys fk;
+ int second_ip_id;
+ };
+
struct mlx5e_rq {
/* data path */
union {
@@@ -619,6 -682,7 +682,7 @@@
u8 umr_in_progress;
u8 umr_last_bulk;
u8 umr_completed;
+ struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
struct {
@@@ -638,6 -702,8 +702,8 @@@
struct mlx5e_icosq *icosq;
struct mlx5e_priv *priv;
+ struct mlx5e_hw_gro_data *hw_gro_data;
+
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_post_rx_wqes post_wqes;
mlx5e_fp_dealloc_wqe dealloc_wqe;
@@@ -665,7 -731,7 +731,7 @@@
u8 wq_type;
u32 rqn;
struct mlx5_core_dev *mdev;
- struct mlx5_core_mkey umr_mkey;
+ u32 umr_mkey;
struct mlx5e_dma_info wqe_overflow;
/* XDP read-mostly */
@@@ -879,11 -945,13 +945,13 @@@ struct mlx5e_priv
#endif
struct mlx5e_scratchpad scratchpad;
struct mlx5e_htb htb;
+ struct mlx5e_mqprio_rl *mqprio_rl;
};
struct mlx5e_rx_handlers {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe_shampo;
};
extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
@@@ -913,11 -981,13 +981,13 @@@ void mlx5e_build_ptys2ethtool_map(void)
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
+ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
+ int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf);
void mlx5e_set_rx_mode_work(struct work_struct *work);
@@@ -1003,7 -1073,8 +1073,8 @@@ int mlx5e_modify_sq(struct mlx5_core_de
struct mlx5e_modify_sq_param *p);
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid);
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
+ struct mlx5e_sq_stats *sq_stats);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5fce9401ac74,6f398f636f01..9febe4a916df
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@@ -218,6 -218,45 +218,45 @@@ static inline void mlx5e_build_umr_wqe(
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
+ static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
+ {
+ rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
+ GFP_KERNEL, node);
+ if (!rq->mpwqe.shampo)
+ return -ENOMEM;
+ return 0;
+ }
+
+ static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
+ {
+ kvfree(rq->mpwqe.shampo);
+ }
+
+ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
+ {
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+ shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
+ node);
+ if (!shampo->bitmap)
+ return -ENOMEM;
+
+ shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq,
+ sizeof(*shampo->info)),
+ GFP_KERNEL, node);
+ if (!shampo->info) {
+ kvfree(shampo->bitmap);
+ return -ENOMEM;
+ }
+ return 0;
+ }
+
+ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
+ {
+ kvfree(rq->mpwqe.shampo->bitmap);
+ kvfree(rq->mpwqe.shampo->info);
+ }
+
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
@@@ -233,9 -272,10 +272,9 @@@
return 0;
}
- static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
- u64 npages, u8 page_shift, u32 *umr_mkey,
- dma_addr_t filler_addr)
+ static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
- u64 npages, u8 page_shift,
- struct mlx5_core_mkey *umr_mkey,
++ u64 npages, u8 page_shift, u32 *umr_mkey,
+ dma_addr_t filler_addr)
{
struct mlx5_mtt *mtt;
int inlen;
@@@ -283,12 -323,59 +322,58 @@@
return err;
}
+ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
- u64 nentries,
- struct mlx5_core_mkey *umr_mkey)
++ u64 nentries, u32 *umr_mkey)
+ {
+ int inlen;
+ void *mkc;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
+ mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(mkc, mkc, translations_octword_size, nentries);
+ MLX5_SET(mkc, mkc, length64, 1);
+ err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
+
+ kvfree(in);
+ return err;
+ }
+
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
- return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey,
- rq->wqe_overflow.addr);
+ return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, PAGE_SHIFT,
+ &rq->umr_mkey, rq->wqe_overflow.addr);
+ }
+
+ static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq *rq)
+ {
+ u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+
+ if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) {
+ mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
+ max_klm_size, rq->mpwqe.shampo->hd_per_wq);
+ return -EINVAL;
+ }
+ return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
+ &rq->mpwqe.shampo->mkey);
}
static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
@@@ -402,6 -489,65 +487,65 @@@ static int mlx5e_init_rxq_rq(struct mlx
return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
}
+ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rqp,
+ struct mlx5e_rq *rq,
+ u32 *pool_size,
+ int node)
+ {
+ void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ int wq_size;
+ int err;
+
+ if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ return 0;
+ err = mlx5e_rq_shampo_hd_alloc(rq, node);
+ if (err)
+ goto out;
+ rq->mpwqe.shampo->hd_per_wq =
+ mlx5e_shampo_hd_per_wq(mdev, params, rqp);
+ err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
+ if (err)
+ goto err_shampo_hd;
+ err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
+ if (err)
+ goto err_shampo_info;
+ rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
+ if (!rq->hw_gro_data) {
+ err = -ENOMEM;
+ goto err_hw_gro_data;
+ }
+ rq->mpwqe.shampo->key =
- cpu_to_be32(rq->mpwqe.shampo->mkey.key);
++ cpu_to_be32(rq->mpwqe.shampo->mkey);
+ rq->mpwqe.shampo->hd_per_wqe =
+ mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
+ wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+ *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
+ MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+ return 0;
+
+ err_hw_gro_data:
+ mlx5e_rq_shampo_hd_info_free(rq);
+ err_shampo_info:
- mlx5_core_destroy_mkey(mdev, &rq->mpwqe.shampo->mkey);
++ mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
+ err_shampo_hd:
+ mlx5e_rq_shampo_hd_free(rq);
+ out:
+ return err;
+ }
+
+ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
+ {
+ if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ return;
+
+ kvfree(rq->hw_gro_data);
+ mlx5e_rq_shampo_hd_info_free(rq);
- mlx5_core_destroy_mkey(rq->mdev, &rq->mpwqe.shampo->mkey);
++ mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
+ mlx5e_rq_shampo_hd_free(rq);
+ }
+
static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5e_rq_param *rqp,
@@@ -454,11 -600,16 +598,16 @@@
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_drop_page;
- rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
+ rq->mkey_be = cpu_to_be32(rq->umr_mkey);
err = mlx5e_rq_alloc_mpwqe_info(rq, node);
if (err)
goto err_rq_mkey;
+
+ err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
+ if (err)
+ goto err_free_by_rq_type;
+
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
@@@ -486,7 -637,7 +635,7 @@@
if (err)
goto err_rq_frags;
- rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key);
+ rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
}
if (xsk) {
@@@ -511,14 -662,14 +660,14 @@@
if (IS_ERR(rq->page_pool)) {
err = PTR_ERR(rq->page_pool);
rq->page_pool = NULL;
- goto err_free_by_rq_type;
+ goto err_free_shampo;
}
if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
}
if (err)
- goto err_free_by_rq_type;
+ goto err_free_shampo;
for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@@ -527,8 -678,10 +676,10 @@@
u32 byte_count =
rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
u64 dma_offset = mlx5e_get_mpwqe_offset(i);
+ u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
+ 0 : rq->buff.headroom;
- wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
+ wqe->data[0].addr = cpu_to_be64(dma_offset + headroom);
wqe->data[0].byte_count = cpu_to_be32(byte_count);
wqe->data[0].lkey = rq->mkey_be;
} else {
@@@ -568,12 -721,14 +719,14 @@@
return 0;
+ err_free_shampo:
+ mlx5e_rq_free_shampo(rq);
err_free_by_rq_type:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
err_rq_mkey:
- mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
+ mlx5_core_destroy_mkey(mdev, rq->umr_mkey);
err_rq_drop_page:
mlx5e_free_mpwqe_rq_drop_page(rq);
break;
@@@ -606,8 -761,9 +759,9 @@@ static void mlx5e_free_rq(struct mlx5e_
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
- mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
+ mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey);
mlx5e_free_mpwqe_rq_drop_page(rq);
+ mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
kvfree(rq->wqe.frags);
@@@ -661,6 -817,12 +815,12 @@@ int mlx5e_create_rq(struct mlx5e_rq *rq
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+ MLX5_SET(wq, wq, log_headers_buffer_entry_num,
+ order_base_2(rq->mpwqe.shampo->hd_per_wq));
- MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey.key);
++ MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
+ }
+
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
@@@ -800,6 -962,15 +960,15 @@@ void mlx5e_free_rx_in_progress_descs(st
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
}
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+ u16 len;
+
+ len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) &
+ (rq->mpwqe.shampo->hd_per_wq - 1);
+ mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false);
+ rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci;
+ }
+
rq->mpwqe.actual_wq_head = wq->head;
rq->mpwqe.umr_in_progress = 0;
rq->mpwqe.umr_completed = 0;
@@@ -825,6 -996,10 +994,10 @@@ void mlx5e_free_rx_descs(struct mlx5e_r
mlx5_wq_ll_pop(wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
+
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq,
+ 0, true);
} else {
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
@@@ -844,6 -1019,9 +1017,9 @@@ int mlx5e_open_rq(struct mlx5e_params *
struct mlx5_core_dev *mdev = rq->mdev;
int err;
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
+ __set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);
+
err = mlx5e_alloc_rq(params, xsk, param, node, rq);
if (err)
return err;
@@@ -929,9 -1107,10 +1105,10 @@@ static int mlx5e_alloc_xdpsq_fifo(struc
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+ size_t size;
- xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
- GFP_KERNEL, numa);
+ size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq);
+ xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
if (!xdpi_fifo->xi)
return -ENOMEM;
@@@ -945,10 -1124,11 +1122,11 @@@
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ size_t size;
int err;
- sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
- GFP_KERNEL, numa);
+ size = array_size(sizeof(*sq->db.wqe_info), wq_sz);
+ sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
if (!sq->db.wqe_info)
return -ENOMEM;
@@@ -1297,7 -1477,8 +1475,8 @@@ static int mlx5e_set_sq_maxrate(struct
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid)
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
+ struct mlx5e_sq_stats *sq_stats)
{
struct mlx5e_create_sq_param csp = {};
u32 tx_rate;
@@@ -1307,10 -1488,7 +1486,7 @@@
if (err)
return err;
- if (qos_queue_group_id)
- sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
- else
- sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+ sq->stats = sq_stats;
csp.tisn = tisn;
csp.tis_lst_sz = 1;
@@@ -1704,6 -1882,36 +1880,36 @@@ static void mlx5e_close_tx_cqs(struct m
mlx5e_close_cq(&c->sq[tc].cq);
}
+ static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
+ {
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+ if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
+ return tc;
+
+ WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
+ return -ENOENT;
+ }
+
+ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
+ u32 *hw_id)
+ {
+ int tc;
+
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
+ !params->mqprio.channel.rl) {
+ *hw_id = 0;
+ return 0;
+ }
+
+ tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
+ if (tc < 0)
+ return tc;
+
+ return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
+ }
+
static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@@@ -1712,9 -1920,16 +1918,16 @@@
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels;
+ u32 qos_queue_group_id;
+
+ err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
+ if (err)
+ goto err_close_sqs;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0);
+ params, &cparam->txq_sq, &c->sq[tc], tc,
+ qos_queue_group_id,
+ &c->priv->channel_stats[c->ix].sq[tc]);
if (err)
goto err_close_sqs;
}
@@@ -1990,7 -2205,7 +2203,7 @@@ static int mlx5e_open_channel(struct ml
c->cpu = cpu;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
@@@ -2184,17 -2399,14 +2397,14 @@@ void mlx5e_close_channels(struct mlx5e_
chs->num = 0;
}
- static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
+ static int mlx5e_modify_tirs_packet_merge(struct mlx5e_priv *priv)
{
struct mlx5e_rx_res *res = priv->rx_res;
- struct mlx5e_lro_param lro_param;
-
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
- return mlx5e_rx_res_lro_set_param(res, &lro_param);
+ return mlx5e_rx_res_packet_merge_set_param(res, &priv->channels.params.packet_merge);
}
- static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro);
+ static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_packet_merge);
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u16 mtu)
@@@ -2339,6 -2551,13 +2549,13 @@@ static int mlx5e_update_netdev_queues(s
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
}
+ if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+ priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
+ }
return 0;
@@@ -2900,15 -3119,18 +3117,18 @@@ static void mlx5e_params_mqprio_dcb_set
{
params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc;
+ params->mqprio.channel.rl = NULL;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels);
}
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
- struct tc_mqprio_qopt *qopt)
+ struct tc_mqprio_qopt *qopt,
+ struct mlx5e_mqprio_rl *rl)
{
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
params->mqprio.num_tc = qopt->num_tc;
+ params->mqprio.channel.rl = rl;
mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
}
@@@ -2968,9 -3190,13 +3188,13 @@@ static int mlx5e_mqprio_channel_validat
netdev_err(netdev, "Min tx rate is not supported\n");
return -EINVAL;
}
+
if (mqprio->max_rate[i]) {
- netdev_err(netdev, "Max tx rate is not supported\n");
- return -EINVAL;
+ int err;
+
+ err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
+ if (err)
+ return err;
}
if (mqprio->qopt.offset[i] != agg_count) {
@@@ -2989,11 -3215,22 +3213,22 @@@
return 0;
}
+ static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
+ {
+ int tc;
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
+ if (mqprio->max_rate[tc])
+ return true;
+ return false;
+ }
+
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
+ struct mlx5e_mqprio_rl *rl;
bool nch_changed;
int err;
@@@ -3001,13 -3238,32 +3236,32 @@@
if (err)
return err;
+ rl = NULL;
+ if (mlx5e_mqprio_rate_limit(mqprio)) {
+ rl = mlx5e_mqprio_rl_alloc();
+ if (!rl)
+ return -ENOMEM;
+ err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
+ mqprio->max_rate);
+ if (err) {
+ mlx5e_mqprio_rl_free(rl);
+ return err;
+ }
+ }
+
new_params = priv->channels.params;
- mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt);
+ mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx;
- return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ if (err && rl) {
+ mlx5e_mqprio_rl_cleanup(rl);
+ mlx5e_mqprio_rl_free(rl);
+ }
+
+ return err;
}
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@@@ -3225,7 -3481,7 +3479,7 @@@ static int mlx5e_set_mac(struct net_dev
return -EADDRNOTAVAIL;
netif_addr_lock_bh(netdev);
- ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ eth_hw_addr_set(netdev, saddr->sa_data);
netif_addr_unlock_bh(netdev);
mlx5e_nic_set_rx_mode(priv);
@@@ -3269,16 -3525,59 +3523,59 @@@ static int set_feature_lro(struct net_d
}
new_params = *cur_params;
- new_params.lro_en = enable;
- if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
- mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
- reset = false;
+ if (enable)
+ new_params.packet_merge.type = MLX5E_PACKET_MERGE_LRO;
+ else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)
+ new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
+ else
+ goto out;
+
+ if (!(cur_params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO &&
+ new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)) {
+ if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
+ reset = false;
+ }
}
err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_modify_tirs_lro_ctx, NULL, reset);
+ mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
+ out:
+ mutex_unlock(&priv->state_lock);
+ return err;
+ }
+
+ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
+ {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_params new_params;
+ bool reset = true;
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->channels.params;
+
+ if (enable) {
+ if (MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+ netdev_warn(netdev, "Can't set HW-GRO when CQE compress is active\n");
+ err = -EINVAL;
+ goto out;
+ }
+ new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
+ new_params.packet_merge.shampo.match_criteria_type =
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
+ new_params.packet_merge.shampo.alignment_granularity =
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE;
+ } else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
+ new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
+ } else {
+ goto out;
+ }
+
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
out:
mutex_unlock(&priv->state_lock);
return err;
@@@ -3457,6 -3756,7 +3754,7 @@@ int mlx5e_set_features(struct net_devic
mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
@@@ -3517,6 -3817,10 +3815,10 @@@ static netdev_features_t mlx5e_fix_feat
netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
features &= ~NETIF_F_LRO;
}
+ if (features & NETIF_F_GRO_HW) {
+ netdev_warn(netdev, "Disabling HW-GRO, not supported in legacy RQ\n");
+ features &= ~NETIF_F_GRO_HW;
+ }
}
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
@@@ -3605,7 -3909,7 +3907,7 @@@ int mlx5e_change_mtu(struct net_device
goto out;
}
- if (params->lro_en)
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
reset = false;
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@@ -4062,8 -4366,8 +4364,8 @@@ static int mlx5e_xdp_allowed(struct mlx
struct net_device *netdev = priv->netdev;
struct mlx5e_params new_params;
- if (priv->channels.params.lro_en) {
- netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+ if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
+ netdev_warn(netdev, "can't set XDP while HW-GRO/LRO is on, disable them first\n");
return -EINVAL;
}
@@@ -4320,9 -4624,10 +4622,10 @@@ void mlx5e_build_nic_params(struct mlx5
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
/* No XSK params: checking the availability of striding RQ in general. */
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- params->lro_en = !slow_pci_heuristic(mdev);
+ params->packet_merge.type = slow_pci_heuristic(mdev) ?
+ MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
}
- params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+ params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
@@@ -4350,13 -4655,17 +4653,17 @@@
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ u8 addr[ETH_ALEN];
- mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
- if (is_zero_ether_addr(netdev->dev_addr) &&
+ mlx5_query_mac_address(priv->mdev, addr);
+ if (is_zero_ether_addr(addr) &&
!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
eth_hw_addr_random(netdev);
mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
+ return;
}
+
+ eth_hw_addr_set(netdev, addr);
}
static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
@@@ -4453,6 -4762,10 +4760,10 @@@ static void mlx5e_build_nic_netdev(stru
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ if (!!MLX5_CAP_GEN(mdev, shampo) &&
+ mlx5e_check_fragmented_striding_rq_cap(mdev))
+ netdev->hw_features |= NETIF_F_GRO_HW;
+
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@@ -4503,6 -4816,7 +4814,7 @@@
if (fcs_enabled)
netdev->features &= ~NETIF_F_RXALL;
netdev->features &= ~NETIF_F_LRO;
+ netdev->features &= ~NETIF_F_GRO_HW;
netdev->features &= ~NETIF_F_RXFCS;
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
@@@ -4607,7 -4921,6 +4919,6 @@@ static int mlx5e_init_nic_rx(struct mlx
{
struct mlx5_core_dev *mdev = priv->mdev;
enum mlx5e_rx_res_features features;
- struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
@@@ -4625,9 -4938,9 +4936,9 @@@
features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP;
if (priv->channels.params.tunneled_offload_en)
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
- priv->max_nch, priv->drop_rq.rqn, &lro_param,
+ priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
@@@ -4861,6 -5174,11 +5172,11 @@@ void mlx5e_priv_cleanup(struct mlx5e_pr
kfree(priv->htb.qos_sq_stats[i]);
kvfree(priv->htb.qos_sq_stats);
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+
memset(priv, 0, sizeof(*priv));
}
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 29a6586ef28d,fe979edd96dc..f63c8ff3ef3f
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@@ -33,9 -33,12 +33,12 @@@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
+ #include <linux/bitmap.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
+ #include <net/udp.h>
+ #include <net/tcp.h>
#include "en.h"
#include "en/txrx.h"
#include "en_tc.h"
@@@ -62,10 -65,12 +65,12 @@@ mlx5e_skb_from_cqe_mpwrq_nonlinear(stru
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
.handle_rx_cqe = mlx5e_handle_rx_cqe,
.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
};
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
@@@ -185,8 -190,9 +190,9 @@@ static inline u32 mlx5e_decompress_cqes
mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
- INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
- mlx5e_handle_rx_cqe, rq, &cqd->title);
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
+ rq, &cqd->title);
}
mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
wq->cc = cqcc;
@@@ -206,8 -212,9 +212,9 @@@ static inline u32 mlx5e_decompress_cqes
mlx5e_read_title_slot(rq, wq, cc);
mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
mlx5e_decompress_cqe(rq, wq, cc);
- INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
- mlx5e_handle_rx_cqe, rq, &cqd->title);
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
+ rq, &cqd->title);
cqd->mini_arr_idx++;
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
@@@ -448,13 -455,13 +455,13 @@@ mlx5e_add_skb_frag(struct mlx5e_rq *rq
static inline void
mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
struct mlx5e_dma_info *dma_info,
- int offset_from, u32 headlen)
+ int offset_from, int dma_offset, u32 headlen)
{
const void *from = page_address(dma_info->page) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
- dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
+ dma_sync_single_for_cpu(pdev, dma_info->addr + dma_offset, len,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, from, len);
}
@@@ -494,6 -501,157 +501,157 @@@ static void mlx5e_post_rx_mpwqe(struct
mlx5_wq_ll_update_db_record(wq);
}
+ /* This function returns the size of the continuous free space inside a bitmap
+ * that starts from first and no longer than len including circular ones.
+ */
+ static int bitmap_find_window(unsigned long *bitmap, int len,
+ int bitmap_size, int first)
+ {
+ int next_one, count;
+
+ next_one = find_next_bit(bitmap, bitmap_size, first);
+ if (next_one == bitmap_size) {
+ if (bitmap_size - first >= len)
+ return len;
+ next_one = find_next_bit(bitmap, bitmap_size, 0);
+ count = next_one + bitmap_size - first;
+ } else {
+ count = next_one - first;
+ }
+
+ return min(len, count);
+ }
+
+ static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
+ __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs)
+ {
+ memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms));
+ umr_wqe->ctrl.opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR);
+ umr_wqe->ctrl.umr_mkey = key;
+ umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
+ | MLX5E_KLM_UMR_DS_CNT(klm_len));
+ umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
+ umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len);
+ umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ }
+
+ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
+ struct mlx5e_icosq *sq,
+ u16 klm_entries, u16 index)
+ {
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries;
- u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey.key;
++ u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
+ struct page *page = shampo->last_page;
+ u64 addr = shampo->last_addr;
+ struct mlx5e_dma_info *dma_info;
+ struct mlx5e_umr_wqe *umr_wqe;
+ int headroom;
+
+ headroom = rq->buff.headroom;
+ new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
+ entries = ALIGN(klm_entries, MLX5_UMR_KLM_ALIGNMENT);
+ wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
+ pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
+ umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
+ build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs);
+
+ for (i = 0; i < entries; i++, index++) {
+ dma_info = &shampo->info[index];
+ if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
+ MLX5_UMR_KLM_ALIGNMENT))
+ goto update_klm;
+ header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
+ MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
+ if (!(header_offset & (PAGE_SIZE - 1))) {
+ err = mlx5e_page_alloc(rq, dma_info);
+ if (unlikely(err))
+ goto err_unmap;
+ addr = dma_info->addr;
+ page = dma_info->page;
+ } else {
+ dma_info->addr = addr + header_offset;
+ dma_info->page = page;
+ }
+
+ update_klm:
+ umr_wqe->inline_klms[i].bcount =
+ cpu_to_be32(MLX5E_RX_MAX_HEAD);
+ umr_wqe->inline_klms[i].key = cpu_to_be32(lkey);
+ umr_wqe->inline_klms[i].va =
+ cpu_to_be64(dma_info->addr + headroom);
+ }
+
+ sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
+ .num_wqebbs = wqe_bbs,
+ .shampo.len = new_entries,
+ };
+
+ shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1);
+ shampo->last_page = page;
+ shampo->last_addr = addr;
+ sq->pc += wqe_bbs;
+ sq->doorbell_cseg = &umr_wqe->ctrl;
+
+ return 0;
+
+ err_unmap:
+ while (--i >= 0) {
+ if (--index < 0)
+ index = shampo->hd_per_wq - 1;
+ dma_info = &shampo->info[index];
+ if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
+ dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
+ mlx5e_page_release(rq, dma_info, true);
+ }
+ }
+ rq->stats->buff_alloc_err++;
+ return err;
+ }
+
+ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
+ {
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u16 klm_entries, num_wqe, index, entries_before;
+ struct mlx5e_icosq *sq = rq->icosq;
+ int i, err, max_klm_entries, len;
+
+ max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
+ klm_entries = bitmap_find_window(shampo->bitmap,
+ shampo->hd_per_wqe,
+ shampo->hd_per_wq, shampo->pi);
+ if (!klm_entries)
+ return 0;
+
+ klm_entries += (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
+ index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_ALIGNMENT);
+ entries_before = shampo->hd_per_wq - index;
+
+ if (unlikely(entries_before < klm_entries))
+ num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) +
+ DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries);
+ else
+ num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries);
+
+ for (i = 0; i < num_wqe; i++) {
+ len = (klm_entries > max_klm_entries) ? max_klm_entries :
+ klm_entries;
+ if (unlikely(index + len > shampo->hd_per_wq))
+ len = shampo->hd_per_wq - index;
+ err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
+ if (unlikely(err))
+ return err;
+ index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
+ klm_entries -= len;
+ }
+
+ return 0;
+ }
+
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
@@@ -514,6 -672,12 +672,12 @@@
goto err;
}
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+ err = mlx5e_alloc_rx_hd_mpwqe(rq);
+ if (unlikely(err))
+ goto err;
+ }
+
pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
@@@ -558,6 -722,44 +722,44 @@@ err
return err;
}
+ /* This function is responsible to dealloc SHAMPO header buffer.
+ * close == true specifies that we are in the middle of closing RQ operation so
+ * we go over all the entries and if they are not in use we free them,
+ * otherwise we only go over a specific range inside the header buffer that are
+ * not in use.
+ */
+ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close)
+ {
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ int hd_per_wq = shampo->hd_per_wq;
+ struct page *deleted_page = NULL;
+ struct mlx5e_dma_info *hd_info;
+ int i, index = start;
+
+ for (i = 0; i < len; i++, index++) {
+ if (index == hd_per_wq)
+ index = 0;
+
+ if (close && !test_bit(index, shampo->bitmap))
+ continue;
+
+ hd_info = &shampo->info[index];
+ hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
+ if (hd_info->page != deleted_page) {
+ deleted_page = hd_info->page;
+ mlx5e_page_release(rq, hd_info, false);
+ }
+ }
+
+ if (start + len > hd_per_wq) {
+ len -= hd_per_wq - start;
+ bitmap_clear(shampo->bitmap, start, hd_per_wq - start);
+ start = 0;
+ }
+
+ bitmap_clear(shampo->bitmap, start, len);
+ }
+
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
@@@ -629,6 -831,28 +831,28 @@@ void mlx5e_free_icosq_descs(struct mlx5
sq->cc = sqcc;
}
+ static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
+ struct mlx5e_icosq *sq)
+ {
+ struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
+ struct mlx5e_shampo_hd *shampo;
+ /* assume 1:1 relationship between RQ and icosq */
+ struct mlx5e_rq *rq = &c->rq;
+ int end, from, len = umr.len;
+
+ shampo = rq->mpwqe.shampo;
+ end = shampo->hd_per_wq;
+ from = shampo->ci;
+ if (from + len > shampo->hd_per_wq) {
+ len -= end - from;
+ bitmap_set(shampo->bitmap, from, end - from);
+ from = 0;
+ }
+
+ bitmap_set(shampo->bitmap, from, len);
+ shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1);
+ }
+
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
@@@ -685,6 -909,9 +909,9 @@@
break;
case MLX5E_ICOSQ_WQE_NOP:
break;
+ case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR:
+ mlx5e_handle_shampo_hd_umr(wi->shampo, sq);
+ break;
#ifdef CONFIG_MLX5_EN_TLS
case MLX5E_ICOSQ_WQE_UMR_TLS:
break;
@@@ -782,8 -1009,8 +1009,8 @@@ static void mlx5e_lro_update_tcp_hdr(st
if (tcp_ack) {
tcp->ack = 1;
- tcp->ack_seq = cqe->lro_ack_seq_num;
- tcp->window = cqe->lro_tcp_win;
+ tcp->ack_seq = cqe->lro.ack_seq_num;
+ tcp->window = cqe->lro.tcp_win;
}
}
@@@ -809,7 -1036,7 +1036,7 @@@ static void mlx5e_lro_update_hdr(struc
tcp = ip_p + sizeof(struct iphdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
- ipv4->ttl = cqe->lro_min_ttl;
+ ipv4->ttl = cqe->lro.min_ttl;
ipv4->tot_len = cpu_to_be16(tot_len);
ipv4->check = 0;
ipv4->check = ip_fast_csum((unsigned char *)ipv4,
@@@ -829,7 -1056,7 +1056,7 @@@
tcp = ip_p + sizeof(struct ipv6hdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
- ipv6->hop_limit = cqe->lro_min_ttl;
+ ipv6->hop_limit = cqe->lro.min_ttl;
ipv6->payload_len = cpu_to_be16(payload_len);
mlx5e_lro_update_tcp_hdr(cqe, tcp);
@@@ -841,6 -1068,142 +1068,142 @@@
}
}
+ static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
+ {
+ struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index];
+ u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom;
+
+ return page_address(last_head->page) + head_offset;
+ }
+
+ static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
+ {
+ int udp_off = rq->hw_gro_data->fk.control.thoff;
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct udphdr *uh;
+
+ uh = (struct udphdr *)(skb->data + udp_off);
+ uh->len = htons(skb->len - udp_off);
+
+ if (uh->check)
+ uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr,
+ ipv4->daddr, 0);
+
+ skb->csum_start = (unsigned char *)uh - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
+ }
+
+ static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
+ {
+ int udp_off = rq->hw_gro_data->fk.control.thoff;
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct udphdr *uh;
+
+ uh = (struct udphdr *)(skb->data + udp_off);
+ uh->len = htons(skb->len - udp_off);
+
+ if (uh->check)
+ uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr,
+ &ipv6->daddr, 0);
+
+ skb->csum_start = (unsigned char *)uh - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
+ }
+
+ static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ struct tcphdr *skb_tcp_hd)
+ {
+ u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ struct tcphdr *last_tcp_hd;
+ void *last_hd_addr;
+
+ last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
+ last_tcp_hd = last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
+ tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH);
+ }
+
+ static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
+ struct mlx5_cqe64 *cqe, bool match)
+ {
+ int tcp_off = rq->hw_gro_data->fk.control.thoff;
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct tcphdr *tcp;
+
+ tcp = (struct tcphdr *)(skb->data + tcp_off);
+ if (match)
+ mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
+
+ tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
+ ipv4->daddr, 0);
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
+ if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+
+ skb->csum_start = (unsigned char *)tcp - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+
+ if (tcp->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+ }
+
+ static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
+ struct mlx5_cqe64 *cqe, bool match)
+ {
+ int tcp_off = rq->hw_gro_data->fk.control.thoff;
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct tcphdr *tcp;
+
+ tcp = (struct tcphdr *)(skb->data + tcp_off);
+ if (match)
+ mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
+
+ tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr,
+ &ipv6->daddr, 0);
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
+ skb->csum_start = (unsigned char *)tcp - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+
+ if (tcp->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+ }
+
+ static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
+ {
+ bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP));
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ if (is_ipv4) {
+ int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr);
+ struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff);
+ __be16 newlen = htons(skb->len - nhoff);
+
+ csum_replace2(&ipv4->check, ipv4->tot_len, newlen);
+ ipv4->tot_len = newlen;
+
+ if (ipv4->protocol == IPPROTO_TCP)
+ mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match);
+ else
+ mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4);
+ } else {
+ int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr);
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff);
+
+ ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6));
+
+ if (ipv6->nexthdr == IPPROTO_TCP)
+ mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match);
+ else
+ mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6);
+ }
+ }
+
static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
struct sk_buff *skb)
{
@@@ -1090,6 -1453,27 +1453,27 @@@ static inline void mlx5e_build_rx_skb(s
stats->mcast_packets++;
}
+ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
+ struct sk_buff *skb)
+ {
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->packets++;
+ stats->gro_packets++;
+ stats->bytes += cqe_bcnt;
+ stats->gro_bytes += cqe_bcnt;
+ if (NAPI_GRO_CB(skb)->count != 1)
+ return;
+ mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ skb_reset_network_header(skb);
+ if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
+ napi_gro_receive(rq->cq.napi, skb);
+ rq->hw_gro_data->skb = NULL;
+ }
+ }
+
static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
@@@ -1199,7 -1583,8 +1583,8 @@@ mlx5e_skb_from_cqe_nonlinear(struct mlx
}
/* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
+ mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, head_wi->offset,
+ headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@@ -1395,6 -1780,30 +1780,30 @@@ const struct mlx5e_rx_handlers mlx5e_rx
};
#endif
+ static void
+ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
+ u32 data_bcnt, u32 data_offset)
+ {
+ net_prefetchw(skb->data);
+
+ while (data_bcnt) {
+ u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
+ unsigned int truesize;
+
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ truesize = pg_consumed_bytes;
+ else
+ truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
+
+ mlx5e_add_skb_frag(rq, skb, di, data_offset,
+ pg_consumed_bytes, truesize);
+
+ data_bcnt -= pg_consumed_bytes;
+ data_offset = 0;
+ di++;
+ }
+ }
+
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
@@@ -1420,20 -1829,9 +1829,9 @@@
frag_offset -= PAGE_SIZE;
}
- while (byte_cnt) {
- u32 pg_consumed_bytes =
- min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
- unsigned int truesize =
- ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
-
- mlx5e_add_skb_frag(rq, skb, di, frag_offset,
- pg_consumed_bytes, truesize);
- byte_cnt -= pg_consumed_bytes;
- frag_offset = 0;
- di++;
- }
+ mlx5e_fill_skb_data(skb, rq, di, byte_cnt, frag_offset);
/* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
+ mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@@ -1487,6 -1885,181 +1885,181 @@@ mlx5e_skb_from_cqe_mpwrq_linear(struct
return skb;
}
+ static void
+ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe, u16 header_index)
+ {
+ struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index];
+ u16 head_offset = head->addr & (PAGE_SIZE - 1);
+ u16 head_size = cqe->shampo.header_size;
+ u16 rx_headroom = rq->buff.headroom;
+ struct sk_buff *skb = NULL;
+ void *hdr, *data;
+ u32 frag_size;
+
+ hdr = page_address(head->page) + head_offset;
+ data = hdr + rx_headroom;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
+
+ if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
+ /* build SKB around header */
+ dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
+ prefetchw(hdr);
+ prefetch(data);
+ skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
+
+ if (unlikely(!skb))
+ return;
+
+ /* queue up for recycling/reuse */
+ page_ref_inc(head->page);
+
+ } else {
+ /* allocate SKB and copy header for large header */
+ rq->stats->gro_large_hds++;
+ skb = napi_alloc_skb(rq->cq.napi,
+ ALIGN(head_size, sizeof(long)));
+ if (unlikely(!skb)) {
+ rq->stats->buff_alloc_err++;
+ return;
+ }
+
+ prefetchw(skb->data);
+ mlx5e_copy_skb_header(rq->pdev, skb, head,
+ head_offset + rx_headroom,
+ rx_headroom, head_size);
+ /* skb linear part was allocated with headlen and aligned to long */
+ skb->tail += head_size;
+ skb->len += head_size;
+ }
+ rq->hw_gro_data->skb = skb;
+ NAPI_GRO_CB(skb)->count = 1;
+ skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size;
+ }
+
+ static void
+ mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz)
+ {
+ skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+ unsigned int frag_size = skb_frag_size(last_frag);
+ unsigned int frag_truesize;
+
+ frag_truesize = ALIGN(frag_size, BIT(log_stride_sz));
+ skb->truesize += frag_truesize - frag_size;
+ }
+
+ static void
+ mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
+ {
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->gro_skbs++;
+ if (likely(skb_shinfo(skb)->nr_frags))
+ mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
+ if (NAPI_GRO_CB(skb)->count > 1)
+ mlx5e_shampo_update_hdr(rq, cqe, match);
+ napi_gro_receive(rq->cq.napi, skb);
+ rq->hw_gro_data->skb = NULL;
+ }
+
+ static bool
+ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
+ {
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+
+ return PAGE_SIZE * nr_frags + data_bcnt <= GSO_MAX_SIZE;
+ }
+
+ static void
+ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
+ {
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u64 addr = shampo->info[header_index].addr;
+
+ if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
+ shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE);
+ mlx5e_page_release(rq, &shampo->info[header_index], true);
+ }
+ bitmap_clear(shampo->bitmap, header_index, 1);
+ }
+
+ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+ {
+ u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
+ u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset);
+ u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
+ u32 data_offset = wqe_offset & (PAGE_SIZE - 1);
+ u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+ u16 wqe_id = be16_to_cpu(cqe->wqe_id);
+ u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ struct sk_buff **skb = &rq->hw_gro_data->skb;
+ bool flush = cqe->shampo.flush;
+ bool match = cqe->shampo.match;
+ struct mlx5e_rq_stats *stats = rq->stats;
+ struct mlx5e_rx_wqe_ll *wqe;
+ struct mlx5e_dma_info *di;
+ struct mlx5e_mpw_info *wi;
+ struct mlx5_wq_ll *wq;
+
+ wi = &rq->mpwqe.info[wqe_id];
+ wi->consumed_strides += cstrides;
+
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ trigger_report(rq, cqe);
+ stats->wqe_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+ stats->mpwqe_filler_cqes++;
+ stats->mpwqe_filler_strides += cstrides;
+ goto mpwrq_cqe_out;
+ }
+
+ stats->gro_match_packets += match;
+
+ if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
+ match = false;
+ mlx5e_shampo_flush_skb(rq, cqe, match);
+ }
+
+ if (!*skb) {
+ mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
+ if (unlikely(!*skb))
+ goto free_hd_entry;
+ } else {
+ NAPI_GRO_CB(*skb)->count++;
+ if (NAPI_GRO_CB(*skb)->count == 2 &&
+ rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
+ void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
+ int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff -
+ sizeof(struct iphdr);
+ struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff);
+
+ rq->hw_gro_data->second_ip_id = ntohs(iph->id);
+ }
+ }
+
+ di = &wi->umr.dma_info[page_idx];
+ mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
+
+ mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
+ if (flush)
+ mlx5e_shampo_flush_skb(rq, cqe, match);
+ free_hd_entry:
+ mlx5e_free_rx_shampo_hd_entry(rq, header_index);
+ mpwrq_cqe_out:
+ if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
+ return;
+
+ wq = &rq->mpwqe.wq;
+ wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
+ mlx5e_free_rx_mpwqe(rq, wi, true);
+ mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+ }
+
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
@@@ -1579,11 -2152,15 +2152,15 @@@ int mlx5e_poll_rx_cq(struct mlx5e_cq *c
mlx5_cqwq_pop(cqwq);
- INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
- mlx5e_handle_rx_cqe, rq, cqe);
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
+ rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out:
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
+ mlx5e_shampo_flush_skb(rq, NULL, false);
+
if (rcu_access_pointer(rq->xdp_prog))
mlx5e_xdp_rx_poll_complete(rq);
@@@ -1784,15 -2361,24 +2361,24 @@@ int mlx5e_rq_set_handlers(struct mlx5e_
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
if (mlx5_fpga_is_ipsec_device(mdev)) {
netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
return -EINVAL;
}
- if (!rq->handle_rx_cqe) {
- netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
- return -EINVAL;
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
+ if (!rq->handle_rx_cqe) {
+ netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n");
+ return -EINVAL;
+ }
+ } else {
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
+ if (!rq->handle_rx_cqe) {
+ netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
+ return -EINVAL;
+ }
}
+
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
rq->wqe.skb_from_cqe = xsk ?
diff --combined drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 71a08f84d49d,873efde0d458..386ab9a2d490
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@@ -99,9 -99,6 +99,9 @@@
#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
+#define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
+#define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
+
#define BY_PASS_PRIO_NUM_LEVELS 1
#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_NUM_PRIOS)
@@@ -209,63 -206,34 +209,63 @@@ static struct init_tree_node egress_roo
}
};
-#define RDMA_RX_BYPASS_PRIO 0
-#define RDMA_RX_KERNEL_PRIO 1
+enum {
+ RDMA_RX_COUNTERS_PRIO,
+ RDMA_RX_BYPASS_PRIO,
+ RDMA_RX_KERNEL_PRIO,
+};
+
+#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
+#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
+#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
+
static struct init_tree_node rdma_rx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 2,
+ .ar_size = 3,
.children = (struct init_tree_node[]) {
+ [RDMA_RX_COUNTERS_PRIO] =
+ ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
+ RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
[RDMA_RX_BYPASS_PRIO] =
- ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
+ ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
[RDMA_RX_KERNEL_PRIO] =
- ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
+ ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
ADD_MULTIPLE_PRIO(1, 1))),
}
};
+enum {
+ RDMA_TX_COUNTERS_PRIO,
+ RDMA_TX_BYPASS_PRIO,
+};
+
+#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
+#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
+
static struct init_tree_node rdma_tx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 1,
+ .ar_size = 2,
.children = (struct init_tree_node[]) {
- ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
+ [RDMA_TX_COUNTERS_PRIO] =
+ ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
+ RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
+ [RDMA_TX_BYPASS_PRIO] =
+ ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS_RDMA_TX,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
- ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
BY_PASS_PRIO_NUM_LEVELS))),
}
};
@@@ -2223,6 -2191,10 +2223,10 @@@ struct mlx5_flow_namespace *mlx5_get_fl
if (steering->fdb_root_ns)
return &steering->fdb_root_ns->ns;
return NULL;
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ if (steering->port_sel_root_ns)
+ return &steering->port_sel_root_ns->ns;
+ return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
@@@ -2247,12 -2219,6 +2251,12 @@@
prio = RDMA_RX_KERNEL_PRIO;
} else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
root_ns = steering->rdma_tx_root_ns;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) {
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_COUNTERS_PRIO;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) {
+ root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_COUNTERS_PRIO;
} else { /* Must be NIC RX */
root_ns = steering->root_ns;
prio = type;
@@@ -2634,6 -2600,7 +2638,7 @@@ void mlx5_cleanup_fs(struct mlx5_core_d
steering->fdb_root_ns = NULL;
kfree(steering->fdb_sub_ns);
steering->fdb_sub_ns = NULL;
+ cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
@@@ -2672,6 -2639,21 +2677,21 @@@ static int init_sniffer_rx_root_ns(stru
return PTR_ERR_OR_ZERO(prio);
}
+ #define PORT_SEL_NUM_LEVELS 3
+ static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
+ {
+ struct fs_prio *prio;
+
+ steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
+ if (!steering->port_sel_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
+ PORT_SEL_NUM_LEVELS);
+ return PTR_ERR_OR_ZERO(prio);
+ }
+
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
{
int err;
@@@ -3058,6 -3040,12 +3078,12 @@@ int mlx5_init_fs(struct mlx5_core_dev *
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
+ err = init_port_sel_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
err = init_rdma_rx_root_ns(steering);
@@@ -3262,6 -3250,52 +3288,52 @@@ void mlx5_packet_reformat_dealloc(struc
}
EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
+ int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
+ {
+ return definer->id;
+ }
+
+ struct mlx5_flow_definer *
+ mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask)
+ {
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_definer *definer;
+ int id;
+
+ root = get_root_namespace(dev, ns_type);
+ if (!root)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ definer = kzalloc(sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return ERR_PTR(-ENOMEM);
+
+ definer->ns_type = ns_type;
+ id = root->cmds->create_match_definer(root, format_id, match_mask);
+ if (id < 0) {
+ mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
+ kfree(definer);
+ return ERR_PTR(id);
+ }
+ definer->id = id;
+ return definer;
+ }
+
+ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer)
+ {
+ struct mlx5_flow_root_namespace *root;
+
+ root = get_root_namespace(dev, definer->ns_type);
+ if (WARN_ON(!root))
+ return;
+
+ root->cmds->destroy_match_definer(root, definer->id);
+ kfree(definer);
+ }
+
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns)
{
diff --combined drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index baebc1aac5d0,84297cc1b509..ea1efdecc88c
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@@ -67,7 -67,7 +67,7 @@@ static void mlx5i_build_nic_params(stru
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
- params->lro_en = false;
+ params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
params->tunneled_offload_en = false;
}
@@@ -219,7 -219,7 +219,7 @@@ void mlx5i_uninit_underlay_qp(struct ml
int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
{
- unsigned char *dev_addr = priv->netdev->dev_addr;
+ const unsigned char *dev_addr = priv->netdev->dev_addr;
u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
struct mlx5i_priv *ipriv = priv->ppriv;
@@@ -336,6 -336,8 +336,8 @@@ static int mlx5i_create_flow_steering(s
goto err_destroy_arfs_tables;
}
+ mlx5e_ethtool_init_steering(priv);
+
return 0;
err_destroy_arfs_tables:
@@@ -348,12 -350,12 +350,12 @@@ static void mlx5i_destroy_flow_steering
{
mlx5e_destroy_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
+ mlx5e_ethtool_cleanup_steering(priv);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
@@@ -368,9 -370,9 +370,9 @@@
goto err_destroy_q_counters;
}
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
- priv->max_nch, priv->drop_rq.rqn, &lro_param,
+ priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
@@@ -472,13 -474,11 +474,13 @@@ int mlx5i_dev_init(struct net_device *d
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
struct mlx5i_priv *ipriv = priv->ppriv;
+ u8 addr_mod[3];
/* Set dev address using underlay QP */
- dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff;
- dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff;
- dev->dev_addr[3] = (ipriv->qpn) & 0xff;
+ addr_mod[0] = (ipriv->qpn >> 16) & 0xff;
+ addr_mod[1] = (ipriv->qpn >> 8) & 0xff;
+ addr_mod[2] = (ipriv->qpn) & 0xff;
+ dev_addr_mod(dev, 1, addr_mod, sizeof(addr_mod));
/* Add QPN to net-device mapping to HT */
mlx5i_pkey_add_qpn(dev, ipriv->qpn);
diff --combined drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 6c67185c05c1,73fed94af09a..3f47d2b3b6e6
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@@ -4,7 -4,7 +4,7 @@@
#ifndef _DR_TYPES_
#define _DR_TYPES_
- #include <linux/mlx5/driver.h>
+ #include <linux/mlx5/vport.h>
#include <linux/refcount.h>
#include "fs_core.h"
#include "wq.h"
@@@ -14,7 -14,6 +14,6 @@@
#define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5
- #define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2
#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
@@@ -752,9 -751,9 +751,9 @@@ struct mlx5dr_esw_caps
struct mlx5dr_cmd_vport_cap {
u16 vport_gvmi;
u16 vhca_gvmi;
+ u16 num;
u64 icm_address_rx;
u64 icm_address_tx;
- u32 num;
};
struct mlx5dr_roce_cap {
@@@ -763,6 -762,11 +762,11 @@@
u8 fl_rc_qp_when_roce_enabled:1;
};
+ struct mlx5dr_vports {
+ struct mlx5dr_cmd_vport_cap esw_manager_caps;
+ struct xarray vports_caps_xa;
+ };
+
struct mlx5dr_cmd_caps {
u16 gvmi;
u64 nic_rx_drop_address;
@@@ -786,7 -790,6 +790,6 @@@
u8 flex_parser_id_gtpu_first_ext_dw_0;
u8 max_ft_level;
u16 roce_min_src_udp;
- u8 num_esw_ports;
u8 sw_format_ver;
bool eswitch_manager;
bool rx_sw_owner;
@@@ -795,11 -798,11 +798,11 @@@
u8 rx_sw_owner_v2:1;
u8 tx_sw_owner_v2:1;
u8 fdb_sw_owner_v2:1;
- u32 num_vports;
struct mlx5dr_esw_caps esw_caps;
- struct mlx5dr_cmd_vport_cap *vports_caps;
+ struct mlx5dr_vports vports;
bool prio_tag_required;
struct mlx5dr_roce_cap roce_caps;
+ u8 is_ecpf:1;
u8 isolate_vl_tc:1;
};
@@@ -826,10 -829,6 +829,6 @@@ struct mlx5dr_domain_info
struct mlx5dr_cmd_caps caps;
};
- struct mlx5dr_domain_cache {
- struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
- };
-
struct mlx5dr_domain {
struct mlx5dr_domain *peer_dmn;
struct mlx5_core_dev *mdev;
@@@ -841,7 -840,7 +840,7 @@@
struct mlx5dr_icm_pool *action_icm_pool;
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
- struct mlx5dr_domain_cache cache;
+ struct xarray csum_fts_xa;
struct mlx5dr_ste_ctx *ste_ctx;
};
@@@ -942,7 -941,7 +941,7 @@@ struct mlx5dr_action_dest_tbl
struct mlx5dr_action_ctr {
u32 ctr_id;
- u32 offeset;
+ u32 offset;
};
struct mlx5dr_action_vport {
@@@ -1102,18 -1101,8 +1101,8 @@@ mlx5dr_ste_htbl_may_grow(struct mlx5dr_
return true;
}
- static inline struct mlx5dr_cmd_vport_cap *
- mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
- {
- if (!caps->vports_caps ||
- (vport >= caps->num_vports && vport != WIRE_PORT))
- return NULL;
-
- if (vport == WIRE_PORT)
- vport = caps->num_vports;
-
- return &caps->vports_caps[vport];
- }
+ struct mlx5dr_cmd_vport_cap *
+ mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport);
struct mlx5dr_cmd_query_flow_table_details {
u8 status;
@@@ -1154,7 -1143,7 +1143,7 @@@ int mlx5dr_cmd_set_fte_modify_and_vport
u32 table_id,
u32 group_id,
u32 modify_header_id,
- u32 vport_id);
+ u16 vport_id);
int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
u32 table_type,
u32 table_id);
@@@ -1275,7 -1264,7 +1264,7 @@@ struct mlx5dr_cq
struct mlx5dr_mr {
struct mlx5_core_dev *mdev;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
dma_addr_t dma_addr;
void *addr;
size_t size;
@@@ -1372,12 -1361,12 +1361,12 @@@ struct mlx5dr_fw_recalc_cs_ft
};
struct mlx5dr_fw_recalc_cs_ft *
- mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
+ mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num);
void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
- int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
- u32 vport_num,
- u64 *rx_icm_addr);
+ int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u16 vport_num,
+ u64 *rx_icm_addr);
int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_cmd_flow_destination_hw_info *dest,
int num_dest,
diff --combined drivers/net/ethernet/microchip/lan743x_main.c
index c4f32c7e0db1,03d02403c19e..9d1091237567
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@@ -816,7 -816,7 +816,7 @@@ static int lan743x_mac_init(struct lan7
eth_random_addr(adapter->mac_address);
}
lan743x_mac_set_address(adapter, adapter->mac_address);
- ether_addr_copy(netdev->dev_addr, adapter->mac_address);
+ eth_hw_addr_set(netdev, adapter->mac_address);
return 0;
}
@@@ -1743,16 -1743,6 +1743,16 @@@ static int lan743x_tx_ring_init(struct
ret = -EINVAL;
goto cleanup;
}
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
+ dev_warn(&tx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
ring_allocation_size = ALIGN(tx->ring_size *
sizeof(struct lan743x_tx_descriptor),
PAGE_SIZE);
@@@ -2286,16 -2276,6 +2286,16 @@@ static int lan743x_rx_ring_init(struct
ret = -EINVAL;
goto cleanup;
}
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
+ dev_warn(&rx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
ring_allocation_size = ALIGN(rx->ring_size *
sizeof(struct lan743x_rx_descriptor),
PAGE_SIZE);
@@@ -2665,7 -2645,7 +2665,7 @@@ static int lan743x_netdev_set_mac_addre
ret = eth_prepare_mac_addr_change(netdev, sock_addr);
if (ret)
return ret;
- ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
lan743x_mac_set_address(adapter, sock_addr->sa_data);
lan743x_rfe_update_mac_address(adapter);
return 0;
@@@ -3039,8 -3019,6 +3039,8 @@@ static int lan743x_pm_resume(struct dev
if (ret) {
netif_err(adapter, probe, adapter->netdev,
"lan743x_hardware_init returned %d\n", ret);
+ lan743x_pci_cleanup(adapter);
+ return ret;
}
/* open netdev when netdev is at running state while resume.
diff --combined drivers/net/ethernet/nxp/lpc_eth.c
index c910fa2f40a4,a63cc295b979..bc39558fe82b
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@@ -419,7 -419,7 +419,7 @@@ struct netdata_local
/*
* MAC support functions
*/
- static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
+ static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac)
{
u32 tmp;
@@@ -1015,6 -1015,9 +1015,6 @@@ static int lpc_eth_close(struct net_dev
napi_disable(&pldat->napi);
netif_stop_queue(ndev);
- if (ndev->phydev)
- phy_stop(ndev->phydev);
-
spin_lock_irqsave(&pldat->lock, flags);
__lpc_eth_reset(pldat);
netif_carrier_off(ndev);
@@@ -1022,8 -1025,6 +1022,8 @@@
writel(0, LPC_ENET_MAC2(pldat->net_base));
spin_unlock_irqrestore(&pldat->lock, flags);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
clk_disable_unprepare(pldat->clk);
return 0;
@@@ -1092,7 -1093,7 +1092,7 @@@ static int lpc_set_mac_address(struct n
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&pldat->lock, flags);
@@@ -1231,6 -1232,7 +1231,7 @@@ static int lpc_eth_drv_probe(struct pla
struct net_device *ndev;
dma_addr_t dma_handle;
struct resource *res;
+ u8 addr[ETH_ALEN];
int irq, ret;
/* Setup network interface for RMII or MII mode */
@@@ -1346,10 -1348,11 +1347,11 @@@
pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Get MAC address from current HW setting (POR state is all zeros) */
- __lpc_get_mac(pldat, ndev->dev_addr);
+ __lpc_get_mac(pldat, addr);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
- of_get_mac_address(np, ndev->dev_addr);
+ of_get_ethdev_address(np, ndev);
}
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --combined drivers/net/ethernet/smsc/smc91x.c
index e4fc6484faa8,a31c159e96ea..96a9400b5737
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@@ -1851,6 -1851,7 +1851,7 @@@ static int smc_probe(struct net_device
int retval;
unsigned int val, revision_register;
const char *version_string;
+ u8 addr[ETH_ALEN];
DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
@@@ -1922,7 -1923,8 +1923,8 @@@
/* Get the MAC address */
SMC_SELECT_BANK(lp, 1);
- SMC_GET_MAC_ADDR(lp, dev->dev_addr);
+ SMC_GET_MAC_ADDR(lp, addr);
+ eth_hw_addr_set(dev, addr);
/* now, reset the chip, and put it into a known state */
smc_reset(dev);
@@@ -2190,7 -2192,6 +2192,7 @@@ static const struct of_device_id smc91x
};
MODULE_DEVICE_TABLE(of, smc91x_match);
+#if defined(CONFIG_GPIOLIB)
/**
* try_toggle_control_gpio - configure a gpio if it exists
* @dev: net device
@@@ -2221,15 -2222,6 +2223,15 @@@ static int try_toggle_control_gpio(stru
return 0;
}
+#else
+static int try_toggle_control_gpio(struct device *dev,
+ struct gpio_desc **desc,
+ const char *name, int index,
+ int value, unsigned int nsdelay)
+{
+ return 0;
+}
+#endif
#endif
/*
diff --combined drivers/net/usb/lan78xx.c
index 63cd72c5f580,03319fdb5235..f20376c1ef3f
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@@ -1817,7 -1817,7 +1817,7 @@@ static void lan78xx_init_mac_address(st
lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
- ether_addr_copy(dev->net->dev_addr, addr);
+ eth_hw_addr_set(dev->net, addr);
}
/* MDIO read and write wrappers for phylib */
@@@ -2416,7 -2416,7 +2416,7 @@@ static int lan78xx_set_mac_addr(struct
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
addr_lo = netdev->dev_addr[0] |
netdev->dev_addr[1] << 8 |
@@@ -4122,12 -4122,6 +4122,12 @@@ static int lan78xx_probe(struct usb_int
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+ /* Reject broken descriptors. */
+ if (dev->maxpacket == 0) {
+ ret = -ENODEV;
+ goto out4;
+ }
+
/* driver requires remote-wakeup capability during autosuspend. */
intf->needs_remote_wakeup = 1;
diff --combined drivers/net/usb/usbnet.c
index a33d7fb82a00,350bae673ed4..9a6450f796dc
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@@ -165,12 -165,13 +165,13 @@@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints)
int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
{
+ u8 addr[ETH_ALEN];
int tmp = -1, ret;
unsigned char buf [13];
ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
if (ret == 12)
- tmp = hex2bin(dev->net->dev_addr, buf, 6);
+ tmp = hex2bin(addr, buf, 6);
if (tmp < 0) {
dev_dbg(&dev->udev->dev,
"bad MAC string %d fetch, %d\n", iMACAddress, tmp);
@@@ -178,6 -179,7 +179,7 @@@
ret = -EINVAL;
return ret;
}
+ eth_hw_addr_set(dev->net, addr);
return 0;
}
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
@@@ -1726,7 -1728,7 +1728,7 @@@ usbnet_probe (struct usb_interface *ude
dev->net = net;
strscpy(net->name, "usb%d", sizeof(net->name));
- memcpy (net->dev_addr, node_id, sizeof node_id);
+ eth_hw_addr_set(net, node_id);
/* rx and tx sides can use different message sizes;
* bind() should set rx_urb_size in that case.
@@@ -1790,7 -1792,6 +1792,7 @@@
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
if (dev->maxpacket == 0) {
/* that is a broken device */
+ status = -ENODEV;
goto out4;
}
diff --combined drivers/net/xen-netfront.c
index fc41ba95f81d,57437e4b8a94..911f43986a8c
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@@ -1730,10 -1730,6 +1730,10 @@@ static int netfront_resume(struct xenbu
dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ netif_tx_lock_bh(info->netdev);
+ netif_device_detach(info->netdev);
+ netif_tx_unlock_bh(info->netdev);
+
xennet_disconnect_backend(info);
return 0;
}
@@@ -2161,6 -2157,7 +2161,7 @@@ static int talk_to_netback(struct xenbu
unsigned int max_queues = 0;
struct netfront_queue *queue = NULL;
unsigned int num_queues = 1;
+ u8 addr[ETH_ALEN];
info->netdev->irq = 0;
@@@ -2174,11 -2171,12 +2175,12 @@@
"feature-split-event-channels", 0);
/* Read mac addr. */
- err = xen_net_read_mac(dev, info->netdev->dev_addr);
+ err = xen_net_read_mac(dev, addr);
if (err) {
xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
goto out_unlocked;
}
+ eth_hw_addr_set(info->netdev, addr);
info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
"feature-xdp-headroom", 0);
@@@ -2353,10 -2351,6 +2355,10 @@@ static int xennet_connect(struct net_de
* domain a kick because we've probably just requeued some
* packets.
*/
+ netif_tx_lock_bh(np->netdev);
+ netif_device_attach(np->netdev);
+ netif_tx_unlock_bh(np->netdev);
+
netif_carrier_on(np->netdev);
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
diff --combined drivers/soc/fsl/dpio/dpio-service.c
index 4d119e98ce7c,3fd0d0840287..1d2b27e3ea63
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@@ -12,6 -12,7 +12,7 @@@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+ #include <linux/dim.h>
#include <linux/slab.h>
#include "dpio.h"
@@@ -28,6 -29,14 +29,14 @@@ struct dpaa2_io
spinlock_t lock_notifications;
struct list_head notifications;
struct device *dev;
+
+ /* Net DIM */
+ struct dim rx_dim;
+ /* protect against concurrent Net DIM updates */
+ spinlock_t dim_lock;
+ u16 event_ctr;
+ u64 bytes;
+ u64 frames;
};
struct dpaa2_io_store {
@@@ -59,7 -68,7 +68,7 @@@ static inline struct dpaa2_io *service_
* potentially being migrated away.
*/
if (cpu < 0)
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
/* If a specific cpu was requested, pick it up immediately */
return dpio_by_cpu[cpu];
@@@ -100,6 -109,17 +109,17 @@@ struct dpaa2_io *dpaa2_io_service_selec
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+ static void dpaa2_io_dim_work(struct work_struct *w)
+ {
+ struct dim *dim = container_of(w, struct dim, work);
+ struct dim_cq_moder moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim);
+
+ dpaa2_io_set_irq_coalescing(d, moder.usec);
+ dim->state = DIM_START_MEASURE;
+ }
+
/**
* dpaa2_io_create() - create a dpaa2_io object.
* @desc: the dpaa2_io descriptor
@@@ -114,6 -134,7 +134,7 @@@ struct dpaa2_io *dpaa2_io_create(const
struct device *dev)
{
struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+ u32 qman_256_cycles_per_ns;
if (!obj)
return NULL;
@@@ -127,7 -148,15 +148,15 @@@
obj->dpio_desc = *desc;
obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
+ obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk;
obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
+
+ /* Compute how many 256 QBMAN cycles fit into one ns. This is because
+ * the interrupt timeout period register needs to be specified in QBMAN
+ * clock cycles in increments of 256.
+ */
+ qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000);
+ obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns;
obj->swp = qbman_swp_init(&obj->swp_desc);
if (!obj->swp) {
@@@ -138,6 -167,7 +167,7 @@@
INIT_LIST_HEAD(&obj->node);
spin_lock_init(&obj->lock_mgmt_cmd);
spin_lock_init(&obj->lock_notifications);
+ spin_lock_init(&obj->dim_lock);
INIT_LIST_HEAD(&obj->notifications);
/* For now only enable DQRR interrupts */
@@@ -155,6 -185,12 +185,12 @@@
obj->dev = dev;
+ memset(&obj->rx_dim, 0, sizeof(obj->rx_dim));
+ INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work);
+ obj->event_ctr = 0;
+ obj->bytes = 0;
+ obj->frames = 0;
+
return obj;
}
@@@ -194,6 -230,8 +230,8 @@@ irqreturn_t dpaa2_io_irq(struct dpaa2_i
struct qbman_swp *swp;
u32 status;
+ obj->event_ctr++;
+
swp = obj->swp;
status = qbman_swp_interrupt_read_status(swp);
if (!status)
@@@ -462,7 -500,7 +500,7 @@@ int dpaa2_io_service_enqueue_multiple_f
qbman_eq_desc_set_no_orp(&ed, 0);
qbman_eq_desc_set_fq(&ed, fqid);
- return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
+ return qbman_swp_enqueue_multiple(d->swp, &ed, fd, NULL, nb);
}
EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
@@@ -779,3 -817,82 +817,82 @@@ int dpaa2_io_query_bp_count(struct dpaa
return 0;
}
EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
+
+ /**
+ * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+ int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff)
+ {
+ struct qbman_swp *swp = d->swp;
+
+ return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1,
+ irq_holdoff);
+ }
+ EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing);
+
+ /**
+ * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+ void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff)
+ {
+ struct qbman_swp *swp = d->swp;
+
+ qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff);
+ }
+ EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing);
+
+ /**
+ * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing
+ * @d: the given DPIO object
+ * @use_adaptive_rx_coalesce: adaptive coalescing state
+ */
+ void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
+ int use_adaptive_rx_coalesce)
+ {
+ d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce;
+ }
+ EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing);
+
+ /**
+ * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state
+ * @d: the given DPIO object
+ *
+ * Return 1 when adaptive coalescing is enabled on the DPIO object and 0
+ * otherwise.
+ */
+ int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d)
+ {
+ return d->swp->use_adaptive_rx_coalesce;
+ }
+ EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing);
+
+ /**
+ * dpaa2_io_update_net_dim() - Update Net DIM
+ * @d: the given DPIO object
+ * @frames: how many frames have been dequeued by the user since the last call
+ * @bytes: how many bytes have been dequeued by the user since the last call
+ */
+ void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes)
+ {
+ struct dim_sample dim_sample = {};
+
+ if (!d->swp->use_adaptive_rx_coalesce)
+ return;
+
+ spin_lock(&d->dim_lock);
+
+ d->bytes += bytes;
+ d->frames += frames;
+
+ dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample);
+ net_dim(&d->rx_dim, dim_sample);
+
+ spin_unlock(&d->dim_lock);
+ }
+ EXPORT_SYMBOL(dpaa2_io_update_net_dim);
diff --combined drivers/soc/fsl/dpio/qbman-portal.c
index c4282cc7b391,3474bf5f88d5..1397ec21873a
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@@ -29,6 -29,7 +29,7 @@@
#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
#define QBMAN_CINH_SWP_DQPI 0xa00
+ #define QBMAN_CINH_SWP_DQRR_ITR 0xa80
#define QBMAN_CINH_SWP_DCAP 0xac0
#define QBMAN_CINH_SWP_SDQCR 0xb00
#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
@@@ -38,6 -39,7 +39,7 @@@
#define QBMAN_CINH_SWP_IER 0xe40
#define QBMAN_CINH_SWP_ISDR 0xe80
#define QBMAN_CINH_SWP_IIR 0xec0
+ #define QBMAN_CINH_SWP_ITPR 0xf40
/* CENA register offsets */
#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
@@@ -355,6 -357,9 +357,9 @@@ struct qbman_swp *qbman_swp_init(const
& p->eqcr.pi_ci_mask;
p->eqcr.available = p->eqcr.pi_ring_size;
+ /* Initialize the software portal with a irq timeout period of 0us */
+ qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0);
+
return p;
}
@@@ -674,9 -679,9 +679,9 @@@ int qbman_swp_enqueue_multiple_direct(s
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
/* Skip copying the verb */
- memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
- memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
- &fd[i], sizeof(*fd));
+ memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
eqcr_pi++;
}
@@@ -688,9 -693,9 +693,9 @@@
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
- struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
@@@ -732,7 -737,8 +737,7 @@@ int qbman_swp_enqueue_multiple_mem_back
int i, num_enqueued = 0;
unsigned long irq_flags;
- spin_lock(&s->access_spinlock);
- local_irq_save(irq_flags);
+ spin_lock_irqsave(&s->access_spinlock, irq_flags);
half_mask = (s->eqcr.pi_ci_mask>>1);
full_mask = s->eqcr.pi_ci_mask;
@@@ -743,7 -749,8 +748,7 @@@
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
eqcr_ci, s->eqcr.ci);
if (!s->eqcr.available) {
- local_irq_restore(irq_flags);
- spin_unlock(&s->access_spinlock);
+ spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
return 0;
}
}
@@@ -756,9 -763,9 +761,9 @@@
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
/* Skip copying the verb */
- memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
- memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
- &fd[i], sizeof(*fd));
+ memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
eqcr_pi++;
}
@@@ -768,9 -775,9 +773,9 @@@
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
- struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
@@@ -782,7 -789,8 +787,7 @@@
dma_wmb();
qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
(QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
- local_irq_restore(irq_flags);
- spin_unlock(&s->access_spinlock);
+ spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
return num_enqueued;
}
@@@ -829,9 -837,9 +834,9 @@@ int qbman_swp_enqueue_multiple_desc_dir
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]);
/* Skip copying the verb */
- memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
- memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
- &fd[i], sizeof(*fd));
+ memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
eqcr_pi++;
}
@@@ -899,9 -907,9 +904,9 @@@ int qbman_swp_enqueue_multiple_desc_mem
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]);
/* Skip copying the verb */
- memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
- memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
- &fd[i], sizeof(*fd));
+ memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
eqcr_pi++;
}
@@@ -1793,3 -1801,56 +1798,56 @@@ u32 qbman_bp_info_num_free_bufs(struct
{
return le32_to_cpu(a->fill);
}
+
+ /**
+ * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+ int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff)
+ {
+ u32 itp, max_holdoff;
+
+ /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles
+ * increments. This depends on the QBMAN internal frequency.
+ */
+ itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns;
+ if (itp > 4096) {
+ max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000;
+ pr_err("irq_holdoff must be <= %uus\n", max_holdoff);
+ return -EINVAL;
+ }
+
+ if (irq_threshold >= p->dqrr.dqrr_size) {
+ pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1);
+ return -EINVAL;
+ }
+
+ p->irq_threshold = irq_threshold;
+ p->irq_holdoff = irq_holdoff;
+
+ qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold);
+ qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp);
+
+ return 0;
+ }
+
+ /**
+ * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold (an IRQ is generated when there are more
+ * DQRR entries in the portal than the threshold)
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+ void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff)
+ {
+ if (irq_threshold)
+ *irq_threshold = p->irq_threshold;
+ if (irq_holdoff)
+ *irq_holdoff = p->irq_holdoff;
+ }
diff --combined include/linux/bpf.h
index 3db6f6c95489,1c7fd7c4c6d3..e6f5579f9356
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@@ -48,6 -48,7 +48,7 @@@ extern struct idr btf_idr
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;
+ typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
@@@ -142,7 -143,8 +143,8 @@@ struct bpf_map_ops
int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee);
- int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn,
+ int (*map_for_each_callback)(struct bpf_map *map,
+ bpf_callback_t callback_fn,
void *callback_ctx, u64 flags);
/* BTF name and id of struct allocated by map_alloc */
@@@ -929,11 -931,8 +931,11 @@@ struct bpf_array_aux
* stored in the map to make sure that all callers and callees have
* the same prog type and JITed flag.
*/
- enum bpf_prog_type type;
- bool jited;
+ struct {
+ spinlock_t lock;
+ enum bpf_prog_type type;
+ bool jited;
+ } owner;
/* Programs with direct jumps into programs part of this array. */
struct list_head poke_progs;
struct bpf_map *map;
@@@ -1092,6 -1091,7 +1094,7 @@@ bool bpf_prog_array_compatible(struct b
int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+ const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
@@@ -2220,6 -2220,8 +2223,8 @@@ int bpf_arch_text_poke(void *ip, enum b
struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+ #define MAX_BPRINTF_VARARGS 12
+
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u32 **bin_buf, u32 num_args);
void bpf_bprintf_cleanup(void);
diff --combined include/linux/filter.h
index ef03ff34234d,47f80adbe744..8231a6a257f6
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@@ -360,10 -360,9 +360,9 @@@ static inline bool insn_is_zext(const s
.off = 0, \
.imm = TGT })
- /* Function call */
+ /* Convert function address to BPF immediate */
- #define BPF_CAST_CALL(x) \
- ((u64 (*)(u64, u64, u64, u64, u64))(x))
+ #define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
@@@ -371,7 -370,7 +370,7 @@@
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
- .imm = ((FUNC) - __bpf_call_base) })
+ .imm = BPF_CALL_IMM(FUNC) })
/* Raw code statement block */
@@@ -1051,7 -1050,6 +1050,7 @@@ extern int bpf_jit_enable
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
extern long bpf_jit_limit;
+extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
diff --combined include/linux/mlx5/device.h
index ed0230ff9422,56bcf95d4ab7..9c25edfd59a6
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@@ -290,6 -290,7 +290,7 @@@ enum
MLX5_UMR_INLINE = (1 << 7),
};
+ #define MLX5_UMR_KLM_ALIGNMENT 4
#define MLX5_UMR_MTT_ALIGNMENT 0x40
#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
@@@ -541,19 -542,21 +542,21 @@@ struct mlx5_cmd_layout
u8 status_own;
};
- enum mlx5_fatal_assert_bit_offsets {
- MLX5_RFR_OFFSET = 31,
+ enum mlx5_rfr_severity_bit_offsets {
+ MLX5_RFR_BIT_OFFSET = 0x7,
};
struct health_buffer {
- __be32 assert_var[5];
- __be32 rsvd0[3];
+ __be32 assert_var[6];
+ __be32 rsvd0[2];
__be32 assert_exit_ptr;
__be32 assert_callra;
- __be32 rsvd1[2];
+ __be32 rsvd1[1];
+ __be32 time;
__be32 fw_ver;
__be32 hw_id;
- __be32 rfr;
+ u8 rfr_severity;
+ u8 rsvd2[3];
u8 irisc_index;
u8 synd;
__be16 ext_synd;
@@@ -577,7 -580,9 +580,9 @@@ struct mlx5_init_seg
__be32 rsvd1[120];
__be32 initializing;
struct health_buffer health;
- __be32 rsvd2[880];
+ __be32 rsvd2[878];
+ __be32 cmd_exec_to;
+ __be32 cmd_q_init_to;
__be32 internal_timer_h;
__be32 internal_timer_l;
__be32 rsvd3[2];
@@@ -795,10 -800,23 +800,23 @@@ struct mlx5_cqe64
u8 tls_outer_l3_tunneled;
u8 rsvd0;
__be16 wqe_id;
- u8 lro_tcppsh_abort_dupack;
- u8 lro_min_ttl;
- __be16 lro_tcp_win;
- __be32 lro_ack_seq_num;
+ union {
+ struct {
+ u8 tcppsh_abort_dupack;
+ u8 min_ttl;
+ __be16 tcp_win;
+ __be32 ack_seq_num;
+ } lro;
+ struct {
+ u8 reserved0:1;
+ u8 match:1;
+ u8 flush:1;
+ u8 reserved3:5;
+ u8 header_size;
+ __be16 header_entry_index;
+ __be32 data_offset;
+ } shampo;
+ };
__be32 rss_hash_result;
u8 rss_hash_type;
u8 ml_path;
@@@ -868,7 -886,7 +886,7 @@@ static inline u8 get_cqe_opcode(struct
static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
- return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+ return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
}
static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
@@@ -1182,7 -1200,9 +1200,9 @@@ enum mlx5_cap_type
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
+ MLX5_CAP_DEV_SHAMPO = 0x1d,
MLX5_CAP_GENERAL_2 = 0x20,
+ MLX5_CAP_PORT_SELECTION = 0x25,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@@ -1340,6 -1360,20 +1360,20 @@@ enum mlx5_qcam_feature_groups
MLX5_GET(e_switch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
+ #define MLX5_CAP_PORT_SELECTION(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
+
+ #define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
+
+ #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
+
+ #define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
+
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
@@@ -1412,6 -1446,9 +1446,9 @@@
#define MLX5_CAP_IPSEC(mdev, cap)\
MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
+ #define MLX5_CAP_DEV_SHAMPO(mdev, cap)\
+ MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
@@@ -1456,8 -1493,6 +1493,8 @@@ static inline u16 mlx5_to_sw_pkey_sz(in
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
+#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
+#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
diff --combined include/linux/mlx5/driver.h
index 5cc19b9483b9,f617dfbcd9fd..a623ec635947
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@@ -59,15 -59,13 +59,13 @@@
#define MLX5_ADEV_NAME "mlx5_core"
+ #define MLX5_IRQ_EQ_CTRL (U8_MAX)
+
enum {
MLX5_BOARD_ID_LEN = 64,
};
enum {
- /* one minute for the sake of bringup. Generally, commands must always
- * complete and we may need to increase this timeout value
- */
- MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@@ -136,6 -134,7 +134,7 @@@ enum
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MFRL = 0x9028,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MRTC = 0x902d,
MLX5_REG_MTRC_CAP = 0x9040,
MLX5_REG_MTRC_CONF = 0x9041,
MLX5_REG_MTRC_STDB = 0x9042,
@@@ -154,6 -153,7 +153,7 @@@
MLX5_REG_MIRC = 0x9162,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
+ MLX5_REG_DTOR = 0xC00E,
};
enum mlx5_qpts_trust_state {
@@@ -357,6 -357,22 +357,6 @@@ struct mlx5_core_sig_ctx
u32 sigerr_count;
};
-enum {
- MLX5_MKEY_MR = 1,
- MLX5_MKEY_MW,
- MLX5_MKEY_INDIRECT_DEVX,
-};
-
-struct mlx5_core_mkey {
- u64 iova;
- u64 size;
- u32 key;
- u32 pd;
- u32 type;
- struct wait_queue_head wait;
- refcount_t usecount;
-};
-
#define MLX5_24BIT_MASK ((1 << 24) - 1)
enum mlx5_res_type {
@@@ -425,6 -441,7 +425,7 @@@ struct mlx5_core_health
struct work_struct report_work;
struct devlink_health_reporter *fw_reporter;
struct devlink_health_reporter *fw_fatal_reporter;
+ struct delayed_work update_fw_log_ts_work;
};
struct mlx5_qp_table {
@@@ -637,7 -654,7 +638,7 @@@ struct mlx5e_resources
struct mlx5e_hw_objs {
u32 pdn;
struct mlx5_td td;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
struct mlx5_sq_bfreg bfreg;
} hw_objs;
struct devlink_port dl_port;
@@@ -736,6 -753,7 +737,7 @@@ struct mlx5_core_dev
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
} caps;
+ struct mlx5_timeouts *timeouts;
u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
@@@ -989,6 -1007,8 +991,6 @@@ void mlx5_cmd_mbox_status(void *out, u
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
-int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
-int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
void mlx5_health_flush(struct mlx5_core_dev *dev);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
@@@ -1006,11 -1026,13 +1008,11 @@@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *out, int outlen);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
+ int inlen);
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
+ int outlen);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
@@@ -1222,6 -1244,16 +1224,16 @@@ static inline int mlx5_core_native_port
return MLX5_CAP_GEN(dev, native_port_num);
}
+ static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev)
+ {
+ int idx = MLX5_CAP_GEN(dev, native_port_num);
+
+ if (idx >= 1 && idx <= MLX5_MAX_PORTS)
+ return idx - 1;
+ else
+ return PCI_FUNC(dev->pdev->devfn);
+ }
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
@@@ -1230,11 -1262,12 +1242,12 @@@ static inline bool mlx5_is_roce_init_en
{
struct devlink *devlink = priv_to_devlink(dev);
union devlink_param_value val;
+ int err;
- devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- &val);
- return val.vbool;
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
+ return err ? MLX5_CAP_GEN(dev, roce) : val.vbool;
}
#endif /* MLX5_DRIVER_H */
diff --combined include/linux/mlx5/fs.h
index f2c3da2006d9,7a43fec63a35..90c548aa6389
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@@ -83,8 -83,7 +83,9 @@@ enum mlx5_flow_namespace_type
MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_TX,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_PORT_SEL,
};
enum {
@@@ -99,6 -98,7 +100,7 @@@
struct mlx5_pkt_reformat;
struct mlx5_modify_hdr;
+ struct mlx5_flow_definer;
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_namespace;
@@@ -259,6 -259,13 +261,13 @@@ struct mlx5_modify_hdr *mlx5_modify_hea
void *modify_actions);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr);
+ struct mlx5_flow_definer *
+ mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask);
+ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer);
+ int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
struct mlx5_pkt_reformat_params {
int type;
diff --combined include/linux/mlx5/mlx5_ifc.h
index 9011c4495867,0bb78c04336c..3636df90899a
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@@ -94,6 -94,7 +94,7 @@@ enum
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
+ MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
MLX5_OBJ_TYPE_PSV = 0xff03,
@@@ -342,7 -343,7 +343,7 @@@ struct mlx5_ifc_flow_table_fields_suppo
u8 outer_geneve_oam[0x1];
u8 outer_geneve_protocol_type[0x1];
u8 outer_geneve_opt_len[0x1];
- u8 reserved_at_1e[0x1];
+ u8 source_vhca_port[0x1];
u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1];
@@@ -393,14 -394,6 +394,14 @@@
u8 metadata_reg_c_0[0x1];
};
+struct mlx5_ifc_flow_table_fields_supported_2_bits {
+ u8 reserved_at_0[0xe];
+ u8 bth_opcode[0x1];
+ u8 reserved_at_f[0x11];
+
+ u8 reserved_at_20[0x60];
+};
+
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
u8 reserved_at_1[0x1];
@@@ -547,7 -540,7 +548,7 @@@ struct mlx5_ifc_fte_match_set_misc_bit
union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
- u8 reserved_at_b8[0x8];
+ u8 bth_opcode[0x8];
u8 geneve_vni[0x18];
u8 reserved_at_d8[0x7];
@@@ -764,15 -757,7 +765,15 @@@ struct mlx5_ifc_flow_table_nic_cap_bit
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x1200];
+ u8 reserved_at_e00[0x700];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma;
+
+ u8 reserved_at_1580[0x280];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_transmit_rdma;
+
+ u8 reserved_at_1880[0x780];
u8 sw_steering_nic_rx_action_drop_icm_address[0x40];
@@@ -783,6 -768,18 +784,18 @@@
u8 reserved_at_20c0[0x5f40];
};
+ struct mlx5_ifc_port_selection_cap_bits {
+ u8 reserved_at_0[0x10];
+ u8 port_select_flow_table[0x1];
+ u8 reserved_at_11[0xf];
+
+ u8 reserved_at_20[0x1e0];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
+
+ u8 reserved_at_400[0x7c00];
+ };
+
enum {
MLX5_FDB_TO_VPORT_REG_C_0 = 0x01,
MLX5_FDB_TO_VPORT_REG_C_1 = 0x02,
@@@ -1322,7 -1319,8 +1335,8 @@@ struct mlx5_ifc_cmd_hca_cap_bits
u8 vhca_resource_manager[0x1];
u8 hca_cap_2[0x1];
- u8 reserved_at_21[0x2];
+ u8 reserved_at_21[0x1];
+ u8 dtor[0x1];
u8 event_on_vhca_state_teardown_request[0x1];
u8 event_on_vhca_state_in_use[0x1];
u8 event_on_vhca_state_active[0x1];
@@@ -1352,7 -1350,9 +1366,9 @@@
u8 reserved_at_b0[0x1];
u8 uplink_follow[0x1];
u8 ts_cqe_to_dest_cqn[0x1];
- u8 reserved_at_b3[0xd];
+ u8 reserved_at_b3[0x7];
+ u8 shampo[0x1];
+ u8 reserved_at_bb[0x5];
u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
@@@ -1530,7 -1530,8 +1546,8 @@@
u8 uar_4k[0x1];
u8 reserved_at_241[0x9];
u8 uar_sz[0x6];
- u8 reserved_at_248[0x2];
+ u8 port_selection_cap[0x1];
+ u8 reserved_at_248[0x1];
u8 umem_uid_0[0x1];
u8 reserved_at_250[0x5];
u8 log_pg_sz[0x8];
@@@ -1603,7 -1604,8 +1620,8 @@@
u8 log_max_tis_per_sq[0x5];
u8 ext_stride_num_range[0x1];
- u8 reserved_at_3a1[0x2];
+ u8 roce_rw_supported[0x1];
+ u8 reserved_at_3a2[0x1];
u8 log_max_stride_sz_rq[0x5];
u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
@@@ -1732,7 -1734,7 +1750,7 @@@
u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
- u8 reserved_at_6e0[0x10];
+ u8 max_num_match_definer[0x10];
u8 sf_base_id[0x10];
u8 flex_parser_id_gtpu_dw_2[0x4];
@@@ -1747,7 -1749,7 +1765,7 @@@
u8 reserved_at_760[0x20];
u8 vhca_tunnel_commands[0x40];
- u8 reserved_at_7c0[0x40];
+ u8 match_definer_format_supported[0x40];
};
struct mlx5_ifc_cmd_hca_cap_2_bits {
@@@ -1766,6 -1768,7 +1784,7 @@@ enum mlx5_flow_destination_type
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
+ MLX5_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
@@@ -1892,7 -1895,21 +1911,21 @@@ struct mlx5_ifc_wq_bits
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
- u8 reserved_at_140[0x4c0];
+ u8 reserved_at_140[0x80];
+
+ u8 headers_mkey[0x20];
+
+ u8 shampo_enable[0x1];
+ u8 reserved_at_1e1[0x4];
+ u8 log_reservation_size[0x3];
+ u8 reserved_at_1e8[0x5];
+ u8 log_max_num_of_packets_per_reservation[0x3];
+ u8 reserved_at_1f0[0x6];
+ u8 log_headers_entry_size[0x2];
+ u8 reserved_at_1f8[0x4];
+ u8 log_headers_buffer_entry_num[0x4];
+
+ u8 reserved_at_200[0x400];
struct mlx5_ifc_cmd_pas_bits pas[];
};
@@@ -2823,6 -2840,40 +2856,40 @@@ struct mlx5_ifc_dropped_packet_logged_b
u8 reserved_at_0[0xe0];
};
+ struct mlx5_ifc_default_timeout_bits {
+ u8 to_multiplier[0x3];
+ u8 reserved_at_3[0x9];
+ u8 to_value[0x14];
+ };
+
+ struct mlx5_ifc_dtor_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ struct mlx5_ifc_default_timeout_bits pcie_toggle_to;
+
+ u8 reserved_at_40[0x60];
+
+ struct mlx5_ifc_default_timeout_bits health_poll_to;
+
+ struct mlx5_ifc_default_timeout_bits full_crdump_to;
+
+ struct mlx5_ifc_default_timeout_bits fw_reset_to;
+
+ struct mlx5_ifc_default_timeout_bits flush_on_err_to;
+
+ struct mlx5_ifc_default_timeout_bits pci_sync_update_to;
+
+ struct mlx5_ifc_default_timeout_bits tear_down_to;
+
+ struct mlx5_ifc_default_timeout_bits fsm_reactivate_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_pages_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to;
+
+ u8 reserved_at_1c0[0x40];
+ };
+
enum {
MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
@@@ -3134,6 -3185,20 +3201,20 @@@ struct mlx5_ifc_roce_addr_layout_bits
u8 reserved_at_e0[0x20];
};
+ struct mlx5_ifc_shampo_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 shampo_log_max_reservation_size[0x5];
+ u8 reserved_at_8[0x3];
+ u8 shampo_log_min_reservation_size[0x5];
+ u8 shampo_min_mss_size[0x10];
+
+ u8 reserved_at_20[0x3];
+ u8 shampo_max_log_headers_entry_size[0x5];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_40[0x7c0];
+ };
+
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
@@@ -3144,6 -3209,7 +3225,7 @@@
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
+ struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
struct mlx5_ifc_debug_cap_bits debug_cap;
@@@ -3151,6 -3217,7 +3233,7 @@@
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
+ struct mlx5_ifc_shampo_cap_bits shampo_cap;
u8 reserved_at_0[0x8000];
};
@@@ -3325,8 -3392,9 +3408,9 @@@ enum
};
enum {
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0),
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1),
+ MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO = BIT(2),
};
enum {
@@@ -3351,7 -3419,7 +3435,7 @@@ struct mlx5_ifc_tirc_bits
u8 reserved_at_80[0x4];
u8 lro_timeout_period_usecs[0x10];
- u8 lro_enable_mask[0x4];
+ u8 packet_merge_mask[0x4];
u8 lro_max_ip_payload_size[0x8];
u8 reserved_at_a0[0x40];
@@@ -3533,6 -3601,18 +3617,18 @@@ enum
MLX5_RQC_STATE_ERR = 0x3,
};
+ enum {
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_BYTE = 0x0,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE = 0x1,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_PAGE = 0x2,
+ };
+
+ enum {
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_NO_MATCH = 0x0,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED = 0x1,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_FIVE_TUPLE = 0x2,
+ };
+
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
u8 delay_drop_en[0x1];
@@@ -3565,7 -3645,13 +3661,13 @@@
u8 reserved_at_c0[0x10];
u8 hairpin_peer_vhca[0x10];
- u8 reserved_at_e0[0xa0];
+ u8 reserved_at_e0[0x46];
+ u8 shampo_no_match_alignment_granularity[0x2];
+ u8 reserved_at_128[0x6];
+ u8 shampo_match_criteria_type[0x2];
+ u8 reservation_timeout[0x10];
+
+ u8 reserved_at_140[0x40];
struct mlx5_ifc_wq_bits wq;
};
@@@ -4113,13 -4199,19 +4215,19 @@@ struct mlx5_ifc_health_buffer_bits
u8 assert_callra[0x20];
- u8 reserved_at_140[0x40];
+ u8 reserved_at_140[0x20];
+
+ u8 time[0x20];
u8 fw_version[0x20];
u8 hw_id[0x20];
- u8 reserved_at_1c0[0x20];
+ u8 rfr[0x1];
+ u8 reserved_at_1c1[0x3];
+ u8 valid[0x1];
+ u8 severity[0x3];
+ u8 reserved_at_1c8[0x18];
u8 irisc_index[0x8];
u8 synd[0x8];
@@@ -5632,6 -5724,236 +5740,236 @@@ struct mlx5_ifc_query_fte_in_bits
u8 reserved_at_120[0xe0];
};
+ struct mlx5_ifc_match_definer_format_0_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 metadata_reg_c_1[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_dmac_15_0[0x10];
+ u8 outer_ethertype[0x10];
+
+ u8 reserved_at_180[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 outer_l4_type_ext[0x4];
+ u8 reserved_at_1a4[0x2];
+ u8 outer_ipsec_layer[0x2];
+ u8 outer_l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 outer_l2_ok[0x1];
+ u8 outer_l3_ok[0x1];
+ u8 outer_l4_ok[0x1];
+ u8 outer_second_vlan_type[0x2];
+ u8 outer_second_vlan_prio[0x3];
+ u8 outer_second_vlan_cfi[0x1];
+ u8 outer_second_vlan_vid[0xc];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 inner_ipv4_checksum_ok[0x1];
+ u8 inner_l4_checksum_ok[0x1];
+ u8 outer_ipv4_checksum_ok[0x1];
+ u8 outer_l4_checksum_ok[0x1];
+ u8 inner_l3_ok[0x1];
+ u8 inner_l4_ok[0x1];
+ u8 outer_l3_ok_duplicate[0x1];
+ u8 outer_l4_ok_duplicate[0x1];
+ u8 outer_tcp_cwr[0x1];
+ u8 outer_tcp_ece[0x1];
+ u8 outer_tcp_urg[0x1];
+ u8 outer_tcp_ack[0x1];
+ u8 outer_tcp_psh[0x1];
+ u8 outer_tcp_rst[0x1];
+ u8 outer_tcp_syn[0x1];
+ u8 outer_tcp_fin[0x1];
+ };
+
+ struct mlx5_ifc_match_definer_format_22_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 outer_ip_src_addr[0x20];
+
+ u8 outer_ip_dest_addr[0x20];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+ };
+
+ struct mlx5_ifc_match_definer_format_23_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 inner_ip_src_addr[0x20];
+
+ u8 inner_ip_dest_addr[0x20];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 inner_ip_frag[0x1];
+ u8 inner_qp_type[0x2];
+ u8 inner_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 inner_l3_type[0x2];
+ u8 inner_l4_type[0x2];
+ u8 inner_first_vlan_type[0x2];
+ u8 inner_first_vlan_prio[0x3];
+ u8 inner_first_vlan_cfi[0x1];
+ u8 inner_first_vlan_vid[0xc];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+ };
+
+ struct mlx5_ifc_match_definer_format_29_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+ };
+
+ struct mlx5_ifc_match_definer_format_30_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+ };
+
+ struct mlx5_ifc_match_definer_format_31_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+ };
+
+ struct mlx5_ifc_match_definer_format_32_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+ };
+
+ struct mlx5_ifc_match_definer_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x10];
+ u8 format_id[0x10];
+
+ u8 reserved_at_a0[0x160];
+
+ u8 match_mask[16][0x20];
+ };
+
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 vhca_tunnel_id[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+ };
+
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+ };
+
+ struct mlx5_ifc_create_match_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_match_definer_bits obj_context;
+ };
+
+ struct mlx5_ifc_create_match_definer_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ };
+
enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
@@@ -6385,7 -6707,7 +6723,7 @@@ struct mlx5_ifc_modify_tir_bitmask_bit
u8 reserved_at_3c[0x1];
u8 hash[0x1];
u8 reserved_at_3e[0x1];
- u8 lro[0x1];
+ u8 packet_merge[0x1];
};
struct mlx5_ifc_modify_tir_out_bits {
@@@ -7585,7 -7907,7 +7923,7 @@@ struct mlx5_ifc_dealloc_uar_out_bits
struct mlx5_ifc_dealloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@@ -8105,6 -8427,11 +8443,11 @@@ struct mlx5_ifc_create_flow_group_out_b
u8 reserved_at_60[0x20];
};
+ enum {
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1,
+ };
+
enum {
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
@@@ -8126,7 -8453,9 +8469,9 @@@ struct mlx5_ifc_create_flow_group_in_bi
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x4];
+ u8 group_type[0x4];
+ u8 reserved_at_90[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@@ -8141,7 -8470,10 +8486,10 @@@
u8 end_flow_index[0x20];
- u8 reserved_at_140[0xa0];
+ u8 reserved_at_140[0x10];
+ u8 match_definer_id[0x10];
+
+ u8 reserved_at_160[0x80];
u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
@@@ -8432,7 -8764,7 +8780,7 @@@ struct mlx5_ifc_alloc_uar_out_bits
struct mlx5_ifc_alloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@@ -10076,6 -10408,17 +10424,17 @@@ struct mlx5_ifc_pddr_reg_bits
union mlx5_ifc_pddr_reg_page_data_auto_bits page_data;
};
+ struct mlx5_ifc_mrtc_reg_bits {
+ u8 time_synced[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0x20];
+
+ u8 time_h[0x20];
+
+ u8 time_l[0x20];
+ };
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@@ -10137,6 -10480,7 +10496,7 @@@
struct mlx5_ifc_mirc_reg_bits mirc_reg;
struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
+ struct mlx5_ifc_mrtc_reg_bits mrtc_reg;
u8 reserved_at_0[0x60e0];
};
@@@ -10414,9 -10758,16 +10774,16 @@@ struct mlx5_ifc_dcbx_param_bits
u8 reserved_at_a0[0x160];
};
+ enum {
+ MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT,
+ };
+
struct mlx5_ifc_lagc_bits {
u8 fdb_selection_mode[0x1];
- u8 reserved_at_1[0x1c];
+ u8 reserved_at_1[0x14];
+ u8 port_select_mode[0x3];
+ u8 reserved_at_18[0x5];
u8 lag_state[0x3];
u8 reserved_at_20[0x14];
@@@ -10630,29 -10981,6 +10997,6 @@@ struct mlx5_ifc_dealloc_memic_out_bits
u8 reserved_at_40[0x40];
};
- struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
-
- u8 vhca_tunnel_id[0x10];
- u8 obj_type[0x10];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
- };
-
- struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
- };
-
struct mlx5_ifc_umem_bits {
u8 reserved_at_0[0x80];
diff --combined include/net/cfg80211.h
index 27336fc70467,7c9d5db4f0e6..423f97b982ff
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@@ -739,6 -739,22 +739,22 @@@ struct cfg80211_tid_config
struct cfg80211_tid_cfg tid_conf[];
};
+ /**
+ * struct cfg80211_fils_aad - FILS AAD data
+ * @macaddr: STA MAC address
+ * @kek: FILS KEK
+ * @kek_len: FILS KEK length
+ * @snonce: STA Nonce
+ * @anonce: AP Nonce
+ */
+ struct cfg80211_fils_aad {
+ const u8 *macaddr;
+ const u8 *kek;
+ u8 kek_len;
+ const u8 *snonce;
+ const u8 *anonce;
+ };
+
/**
* cfg80211_get_chandef_type - return old channel type from chandef
* @chandef: the channel definition
@@@ -1040,6 -1056,36 +1056,36 @@@ struct cfg80211_crypto_settings
enum nl80211_sae_pwe_mechanism sae_pwe;
};
+ /**
+ * struct cfg80211_mbssid_config - AP settings for multi bssid
+ *
+ * @tx_wdev: pointer to the transmitted interface in the MBSSID set
+ * @index: index of this AP in the multi bssid group.
+ * @ema: set to true if the beacons should be sent out in EMA mode.
+ */
+ struct cfg80211_mbssid_config {
+ struct wireless_dev *tx_wdev;
+ u8 index;
+ bool ema;
+ };
+
+ /**
+ * struct cfg80211_mbssid_elems - Multiple BSSID elements
+ *
+ * @cnt: Number of elements in array %elems.
+ *
+ * @elem: Array of multiple BSSID element(s) to be added into Beacon frames.
+ * @elem.data: Data for multiple BSSID elements.
+ * @elem.len: Length of data.
+ */
+ struct cfg80211_mbssid_elems {
+ u8 cnt;
+ struct {
+ const u8 *data;
+ size_t len;
+ } elem[];
+ };
+
/**
* struct cfg80211_beacon_data - beacon data
* @head: head portion of beacon (before TIM IE)
@@@ -1058,6 -1104,7 +1104,7 @@@
* @assocresp_ies_len: length of assocresp_ies in octets
* @probe_resp_len: length of probe response template (@probe_resp)
* @probe_resp: probe response template (AP mode only)
+ * @mbssid_ies: multiple BSSID elements
* @ftm_responder: enable FTM responder functionality; -1 for no change
* (which also implies no change in LCI/civic location data)
* @lci: Measurement Report element content, starting with Measurement Token
@@@ -1075,6 -1122,7 +1122,7 @@@ struct cfg80211_beacon_data
const u8 *probe_resp;
const u8 *lci;
const u8 *civicloc;
+ struct cfg80211_mbssid_elems *mbssid_ies;
s8 ftm_responder;
size_t head_len, tail_len;
@@@ -1189,6 -1237,7 +1237,7 @@@ enum cfg80211_ap_settings_flags
* @he_oper: HE operation IE (or %NULL if HE isn't enabled)
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ * @mbssid_config: AP settings for multiple bssid
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@@ -1221,6 -1270,7 +1270,7 @@@
struct cfg80211_he_bss_color he_bss_color;
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+ struct cfg80211_mbssid_config mbssid_config;
};
/**
@@@ -4018,6 -4068,10 +4068,10 @@@ struct mgmt_frame_regs
* @set_sar_specs: Update the SAR (TX power) settings.
*
* @color_change: Initiate a color change.
+ *
+ * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use
+ * those to decrypt (Re)Association Request and encrypt (Re)Association
+ * Response frame.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@@ -4348,6 -4402,8 +4402,8 @@@
int (*color_change)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_color_change_settings *params);
+ int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_fils_aad *fils_aad);
};
/*
@@@ -4981,6 -5037,13 +5037,13 @@@ struct wiphy_iftype_akm_suites
* %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
* @sar_capa: SAR control capabilities
* @rfkill: a pointer to the rfkill structure
+ *
+ * @mbssid_max_interfaces: maximum number of interfaces supported by the driver
+ * in a multiple BSSID set. This field must be set to a non-zero value
+ * by the driver to advertise MBSSID support.
+ * @ema_max_profile_periodicity: maximum profile periodicity supported by
+ * the driver. Setting this field to a non-zero value indicates that the
+ * driver supports enhanced multi-BSSID advertisements (EMA AP).
*/
struct wiphy {
struct mutex mtx;
@@@ -5125,6 -5188,9 +5188,9 @@@
struct rfkill *rfkill;
+ u8 mbssid_max_interfaces;
+ u8 ema_max_profile_periodicity;
+
char priv[] __aligned(NETDEV_ALIGN);
};
@@@ -5376,6 -5442,7 +5442,6 @@@ static inline void wiphy_unlock(struct
* netdev and may otherwise be used by driver read-only, will be update
* by cfg80211 on change_interface
* @mgmt_registrations: list of registrations for management frames
- * @mgmt_registrations_lock: lock for the list
* @mgmt_registrations_need_update: mgmt registrations were updated,
* need to propagate the update to the driver
* @mtx: mutex used to lock data in this struct, may be used by drivers
@@@ -5422,6 -5489,7 +5488,6 @@@ struct wireless_dev
u32 identifier;
struct list_head mgmt_registrations;
- spinlock_t mgmt_registrations_lock;
u8 mgmt_registrations_need_update:1;
struct mutex mtx;
@@@ -5490,7 -5558,7 +5556,7 @@@
unsigned long unprot_beacon_reported;
};
- static inline u8 *wdev_address(struct wireless_dev *wdev)
+ static inline const u8 *wdev_address(struct wireless_dev *wdev)
{
if (wdev->netdev)
return wdev->netdev->dev_addr;
@@@ -6308,6 -6376,17 +6374,17 @@@ static inline void cfg80211_gen_new_bss
u64_to_ether_addr(new_bssid_u64, new_bssid);
}
+ /**
+ * cfg80211_get_ies_channel_number - returns the channel number from ies
+ * @ie: IEs
+ * @ielen: length of IEs
+ * @band: enum nl80211_band of the channel
+ *
+ * Returns the channel number, or -1 if none could be determined.
+ */
+ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ enum nl80211_band band);
+
/**
* cfg80211_is_element_inherited - returns if element ID should be inherited
* @element: element to check
diff --combined include/net/sock.h
index 463f390d90b3,ff4e62aa62e5..db95ceaf4d4c
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@@ -259,10 -259,11 +259,11 @@@ struct bpf_local_storage
* @sk_rcvbuf: size of receive buffer in bytes
* @sk_wq: sock wait queue and async head
* @sk_rx_dst: receive input route used by early demux
+ * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst
+ * @sk_rx_dst_cookie: cookie for @sk_rx_dst
* @sk_dst_cache: destination cache
* @sk_dst_pending_confirm: need to confirm neighbour
* @sk_policy: flow policy
- * @sk_rx_skb_cache: cache copy of recently accessed RX skb
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
@@@ -270,6 -271,7 +271,7 @@@
* @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward
+ * @sk_reserved_mem: space reserved and non-reclaimable for the socket
* @sk_napi_id: id of the last napi context to receive data for sk
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
@@@ -329,7 -331,6 +331,6 @@@
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
* @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
- * @sk_tx_skb_cache: cache copy of recently accessed TX skb
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_cgrp_data: cgroup data for this cgroup
@@@ -394,7 -395,6 +395,6 @@@ struct sock
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
- struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
/*
* The backlog queue is special, it is always used with
@@@ -413,6 -413,7 +413,7 @@@
#define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc;
+ u32 sk_reserved_mem;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_ll_usec;
/* ===== mostly read cache line ===== */
@@@ -431,6 -432,9 +432,9 @@@
struct xfrm_policy __rcu *sk_policy[2];
#endif
struct dst_entry *sk_rx_dst;
+ int sk_rx_dst_ifindex;
+ u32 sk_rx_dst_cookie;
+
struct dst_entry __rcu *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
@@@ -443,7 -447,6 +447,6 @@@
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
};
- struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
@@@ -1208,7 -1211,7 +1211,7 @@@ struct proto
#endif
bool (*stream_memory_free)(const struct sock *sk, int wake);
- bool (*stream_memory_read)(const struct sock *sk);
+ bool (*sock_is_readable)(struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
void (*leave_memory_pressure)(struct sock *sk);
@@@ -1237,7 -1240,7 +1240,7 @@@
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
- struct percpu_counter *orphan_count;
+ unsigned int __percpu *orphan_count;
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
@@@ -1518,20 -1521,49 +1521,49 @@@ sk_rmem_schedule(struct sock *sk, struc
skb_pfmemalloc(skb);
}
+ static inline int sk_unused_reserved_mem(const struct sock *sk)
+ {
+ int unused_mem;
+
+ if (likely(!sk->sk_reserved_mem))
+ return 0;
+
+ unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
+ atomic_read(&sk->sk_rmem_alloc);
+
+ return unused_mem > 0 ? unused_mem : 0;
+ }
+
static inline void sk_mem_reclaim(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable >= SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk, reclaimable);
+ }
+
+ static inline void sk_mem_reclaim_final(struct sock *sk)
+ {
+ sk->sk_reserved_mem = 0;
+ sk_mem_reclaim(sk);
}
static inline void sk_mem_reclaim_partial(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable > SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk, reclaimable - 1);
}
static inline void sk_mem_charge(struct sock *sk, int size)
@@@ -1543,9 -1575,12 +1575,12 @@@
static inline void sk_mem_uncharge(struct sock *sk, int size)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
/* Avoid a possible overflow.
* TCP send queues can make this happen, if sk_mem_reclaim()
@@@ -1554,22 -1589,14 +1589,14 @@@
* If we reach 2 MBytes, reclaim 1 MBytes right now, there is
* no need to hold that much forward allocation anyway.
*/
- if (unlikely(sk->sk_forward_alloc >= 1 << 21))
+ if (unlikely(reclaimable >= 1 << 21))
__sk_mem_reclaim(sk, 1 << 20);
}
- DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
sk_wmem_queued_add(sk, -skb->truesize);
sk_mem_uncharge(sk, skb->truesize);
- if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
- !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
- skb_ext_reset(skb);
- skb_zcopy_clear(skb, true);
- sk->sk_tx_skb_cache = skb;
- return;
- }
__kfree_skb(skb);
}
@@@ -1889,10 -1916,8 +1916,8 @@@ static inline void sk_rx_queue_set(stru
if (skb_rx_queue_recorded(skb)) {
u16 rx_queue = skb_get_rx_queue(skb);
- if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
- return;
-
- sk->sk_rx_queue_mapping = rx_queue;
+ if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+ WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
}
#endif
}
@@@ -1900,15 -1925,19 +1925,19 @@@
static inline void sk_rx_queue_clear(struct sock *sk)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
- sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
+ WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING);
#endif
}
static inline int sk_rx_queue_get(const struct sock *sk)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
- if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
- return sk->sk_rx_queue_mapping;
+ if (sk) {
+ int res = READ_ONCE(sk->sk_rx_queue_mapping);
+
+ if (res != NO_QUEUE_MAPPING)
+ return res;
+ }
#endif
return -1;
@@@ -2388,13 -2417,11 +2417,11 @@@ static inline void sk_stream_moderate_s
return;
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+ val = max_t(u32, val, sk_unused_reserved_mem(sk));
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
}
- struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
- bool force_schedule);
-
/**
* sk_page_frag - return an appropriate page_frag
* @sk: socket
@@@ -2608,7 -2635,6 +2635,6 @@@ static inline void skb_setup_tx_timesta
&skb_shinfo(skb)->tskey);
}
- DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
@@@ -2620,12 -2646,6 +2646,6 @@@
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
- if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
- !sk->sk_rx_skb_cache) {
- sk->sk_rx_skb_cache = skb;
- skb_orphan(skb);
- return;
- }
__kfree_skb(skb);
}
@@@ -2820,10 -2840,8 +2840,15 @@@ void sock_set_sndtimeo(struct sock *sk
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+ int sock_get_timeout(long timeo, void *optval, bool old_timeval);
+ int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
+ sockptr_t optval, int optlen, bool old_timeval);
+
+static inline bool sk_is_readable(struct sock *sk)
+{
+ if (sk->sk_prot->sock_is_readable)
+ return sk->sk_prot->sock_is_readable(sk);
+ return false;
+}
++
#endif /* _SOCK_H */
diff --combined include/net/tls.h
index 01d2e3744393,adab19a8aed7..07ff59e99cb3
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@@ -66,7 -66,7 +66,7 @@@
#define MAX_IV_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
- /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
+ /* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
*
* IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
*
@@@ -74,6 -74,7 +74,7 @@@
* Hence b0 contains (3 - 1) = 2.
*/
#define TLS_AES_CCM_IV_B0_BYTE 2
+ #define TLS_SM4_CCM_IV_B0_BYTE 2
#define __TLS_INC_STATS(net, field) \
__SNMP_INC_STATS((net)->mib.tls_statistics, field)
@@@ -220,6 -221,8 +221,8 @@@ union tls_crypto_context
struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
+ struct tls12_crypto_info_sm4_gcm sm4_gcm;
+ struct tls12_crypto_info_sm4_ccm sm4_ccm;
};
};
@@@ -375,7 -378,7 +378,7 @@@ void tls_sw_release_resources_rx(struc
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
-bool tls_sw_stream_read(const struct sock *sk);
+bool tls_sw_sock_is_readable(struct sock *sk);
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
diff --combined kernel/bpf/arraymap.c
index 447def540544,5e1ccfae916b..c7a5be3bf8be
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@@ -645,7 -645,7 +645,7 @@@ static const struct bpf_iter_seq_info i
.seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
};
- static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
+ static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
void *callback_ctx, u64 flags)
{
u32 i, key, num_elems = 0;
@@@ -668,9 -668,8 +668,8 @@@
val = array->value + array->elem_size * i;
num_elems++;
key = i;
- ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
- (u64)(long)&key, (u64)(long)val,
- (u64)(long)callback_ctx, 0);
+ ret = callback_fn((u64)(long)map, (u64)(long)&key,
+ (u64)(long)val, (u64)(long)callback_ctx, 0);
/* return value: 0 - continue, 1 - stop and return */
if (ret)
break;
@@@ -1072,7 -1071,6 +1071,7 @@@ static struct bpf_map *prog_array_map_a
INIT_WORK(&aux->work, prog_array_map_clear_deferred);
INIT_LIST_HEAD(&aux->poke_progs);
mutex_init(&aux->poke_mutex);
+ spin_lock_init(&aux->owner.lock);
map = array_map_alloc(attr);
if (IS_ERR(map)) {
diff --combined kernel/bpf/core.c
index 6e3ae90ad107,ea8a468dbded..ded9163185d1
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@@ -524,7 -524,6 +524,7 @@@ int bpf_jit_enable __read_mostly = IS
int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
int bpf_jit_harden __read_mostly;
long bpf_jit_limit __read_mostly;
+long bpf_jit_limit_max __read_mostly;
static void
bpf_prog_ksym_set_addr(struct bpf_prog *prog)
@@@ -818,8 -817,7 +818,8 @@@ u64 __weak bpf_jit_alloc_exec_limit(voi
static int __init bpf_jit_charge_init(void)
{
/* Only used as heuristic here to derive limit. */
- bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
+ bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
+ bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
PAGE_SIZE), LONG_MAX);
return 0;
}
@@@ -1823,26 -1821,20 +1823,26 @@@ static unsigned int __bpf_prog_ret0_war
bool bpf_prog_array_compatible(struct bpf_array *array,
const struct bpf_prog *fp)
{
+ bool ret;
+
if (fp->kprobe_override)
return false;
- if (!array->aux->type) {
+ spin_lock(&array->aux->owner.lock);
+
+ if (!array->aux->owner.type) {
/* There's no owner yet where we could check for
* compatibility.
*/
- array->aux->type = fp->type;
- array->aux->jited = fp->jited;
- return true;
+ array->aux->owner.type = fp->type;
+ array->aux->owner.jited = fp->jited;
+ ret = true;
+ } else {
+ ret = array->aux->owner.type == fp->type &&
+ array->aux->owner.jited == fp->jited;
}
-
- return array->aux->type == fp->type &&
- array->aux->jited == fp->jited;
+ spin_unlock(&array->aux->owner.lock);
+ return ret;
}
static int bpf_check_tail_call(const struct bpf_prog *fp)
@@@ -2365,6 -2357,11 +2365,11 @@@ const struct bpf_func_proto * __weak bp
return NULL;
}
+ const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
+ {
+ return NULL;
+ }
+
u64 __weak
bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
diff --combined net/batman-adv/bridge_loop_avoidance.c
index 17687848daec,7242b32fff80..2ed9496fc41f
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@@ -254,7 -254,7 +254,7 @@@ batadv_claim_hash_find(struct batadv_pr
* Return: backbone gateway if found or NULL otherwise
*/
static struct batadv_bla_backbone_gw *
- batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
+ batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
@@@ -336,7 -336,7 +336,7 @@@ batadv_bla_del_backbone_claims(struct b
* @vid: the VLAN ID
* @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
*/
- static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
+ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac,
unsigned short vid, int claimtype)
{
struct sk_buff *skb;
@@@ -488,7 -488,7 +488,7 @@@ static void batadv_bla_loopdetect_repor
* Return: the (possibly created) backbone gateway or NULL on error
*/
static struct batadv_bla_backbone_gw *
- batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
+ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig,
unsigned short vid, bool own_backbone)
{
struct batadv_bla_backbone_gw *entry;
@@@ -926,7 -926,7 +926,7 @@@ static bool batadv_handle_request(struc
*/
static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- u8 *backbone_addr, u8 *claim_addr,
+ const u8 *backbone_addr, const u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
@@@ -964,7 -964,7 +964,7 @@@
*/
static bool batadv_handle_claim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- u8 *backbone_addr, u8 *claim_addr,
+ const u8 *backbone_addr, const u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
@@@ -1560,14 -1560,10 +1560,14 @@@ int batadv_bla_init(struct batadv_priv
return 0;
bat_priv->bla.claim_hash = batadv_hash_new(128);
- bat_priv->bla.backbone_hash = batadv_hash_new(32);
+ if (!bat_priv->bla.claim_hash)
+ return -ENOMEM;
- if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
+ bat_priv->bla.backbone_hash = batadv_hash_new(32);
+ if (!bat_priv->bla.backbone_hash) {
+ batadv_hash_destroy(bat_priv->bla.claim_hash);
return -ENOMEM;
+ }
batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
&batadv_claim_hash_lock_class_key);
@@@ -2130,7 -2126,7 +2130,7 @@@ batadv_bla_claim_dump_entry(struct sk_b
struct batadv_hard_iface *primary_if,
struct batadv_bla_claim *claim)
{
- u8 *primary_addr = primary_if->net_dev->dev_addr;
+ const u8 *primary_addr = primary_if->net_dev->dev_addr;
u16 backbone_crc;
bool is_own;
void *hdr;
@@@ -2298,7 -2294,7 +2298,7 @@@ batadv_bla_backbone_dump_entry(struct s
struct batadv_hard_iface *primary_if,
struct batadv_bla_backbone_gw *backbone_gw)
{
- u8 *primary_addr = primary_if->net_dev->dev_addr;
+ const u8 *primary_addr = primary_if->net_dev->dev_addr;
u16 backbone_crc;
bool is_own;
int msecs;
diff --combined net/core/dev.c
index eb3a366bf212,e8754560e641..edeb811c454e
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -140,7 -140,7 +140,7 @@@
#include <linux/if_macvlan.h>
#include <linux/errqueue.h>
#include <linux/hrtimer.h>
- #include <linux/netfilter_ingress.h>
+ #include <linux/netfilter_netdev.h>
#include <linux/crash_dump.h>
#include <linux/sctp.h>
#include <net/udp_tunnel.h>
@@@ -303,6 -303,12 +303,12 @@@ static struct netdev_name_node *netdev_
return NULL;
}
+ bool netdev_name_in_use(struct net *net, const char *name)
+ {
+ return netdev_name_node_lookup(net, name);
+ }
+ EXPORT_SYMBOL(netdev_name_in_use);
+
int netdev_name_node_alt_create(struct net_device *dev, const char *name)
{
struct netdev_name_node *name_node;
@@@ -1133,7 -1139,7 +1139,7 @@@ static int __dev_alloc_name(struct net
}
snprintf(buf, IFNAMSIZ, name, i);
- if (!__dev_get_by_name(net, buf))
+ if (!netdev_name_in_use(net, buf))
return i;
/* It is possible to run out of possible slots
@@@ -1187,7 -1193,7 +1193,7 @@@ static int dev_get_valid_name(struct ne
if (strchr(name, '%'))
return dev_alloc_name_ns(net, dev, name);
- else if (__dev_get_by_name(net, name))
+ else if (netdev_name_in_use(net, name))
return -EEXIST;
else if (dev->name != name)
strlcpy(dev->name, name, IFNAMSIZ);
@@@ -1290,8 -1296,8 +1296,8 @@@ rollback
old_assign_type = NET_NAME_RENAMED;
goto rollback;
} else {
- pr_err("%s: name change rollback failed: %d\n",
- dev->name, ret);
+ netdev_err(dev, "name change rollback failed: %d\n",
+ ret);
}
}
@@@ -2345,7 -2351,7 +2351,7 @@@ static void netif_setup_tc(struct net_d
/* If TC0 is invalidated disable TC mapping */
if (tc->offset + tc->count > txq) {
- pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
+ netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
dev->num_tc = 0;
return;
}
@@@ -2356,8 -2362,8 +2362,8 @@@
tc = &dev->tc_to_txq[q];
if (tc->offset + tc->count > txq) {
- pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
- i, q);
+ netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
+ i, q);
netdev_set_prio_tc_map(dev, i, 0);
}
}
@@@ -2921,6 -2927,8 +2927,8 @@@ int netif_set_real_num_tx_queues(struc
if (dev->num_tc)
netif_setup_tc(dev, txq);
+ dev_qdisc_change_real_num_tx(dev, txq);
+
dev->real_num_tx_queues = txq;
if (disabling) {
@@@ -3163,12 -3171,6 +3171,12 @@@ static u16 skb_tx_hash(const struct net
qoffset = sb_dev->tc_to_txq[tc].offset;
qcount = sb_dev->tc_to_txq[tc].count;
+ if (unlikely(!qcount)) {
+ net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
+ sb_dev->name, qoffset, tc);
+ qoffset = 0;
+ qcount = dev->real_num_tx_queues;
+ }
}
if (skb_rx_queue_recorded(skb)) {
@@@ -3414,7 -3416,7 +3422,7 @@@ EXPORT_SYMBOL(__skb_gso_segment)
#ifdef CONFIG_BUG
static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{
- pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
+ netdev_err(dev, "hw csum failure\n");
skb_dump(KERN_ERR, skb, true);
dump_stack();
}
@@@ -3912,8 -3914,7 +3920,8 @@@ int dev_loopback_xmit(struct net *net,
skb_reset_mac_header(skb);
__skb_pull(skb, skb_network_offset(skb));
skb->pkt_type = PACKET_LOOPBACK;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (skb->ip_summed == CHECKSUM_NONE)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
WARN_ON(!skb_dst(skb));
skb_dst_force(skb);
netif_rx_ni(skb);
@@@ -3925,6 -3926,7 +3933,7 @@@ EXPORT_SYMBOL(dev_loopback_xmit)
static struct sk_buff *
sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{
+ #ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
struct tcf_result cl_res;
@@@ -3960,6 -3962,7 +3969,7 @@@
default:
break;
}
+ #endif /* CONFIG_NET_CLS_ACT */
return skb;
}
@@@ -4153,13 -4156,20 +4163,20 @@@ static int __dev_queue_xmit(struct sk_b
qdisc_pkt_len_init(skb);
#ifdef CONFIG_NET_CLS_ACT
skb->tc_at_ingress = 0;
- # ifdef CONFIG_NET_EGRESS
+ #endif
+ #ifdef CONFIG_NET_EGRESS
if (static_branch_unlikely(&egress_needed_key)) {
+ if (nf_hook_egress_active()) {
+ skb = nf_hook_egress(skb, &rc, dev);
+ if (!skb)
+ goto out;
+ }
+ nf_skip_egress(skb, true);
skb = sch_handle_egress(skb, &rc, dev);
if (!skb)
goto out;
+ nf_skip_egress(skb, false);
}
- # endif
#endif
/* If device/qdisc don't need skb->dst, release it right now while
* its hot in this cpu cache.
@@@ -5301,6 -5311,7 +5318,7 @@@ skip_taps
if (static_branch_unlikely(&ingress_needed_key)) {
bool another = false;
+ nf_skip_egress(skb, true);
skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
&another);
if (another)
@@@ -5308,6 -5319,7 +5326,7 @@@
if (!skb)
goto out;
+ nf_skip_egress(skb, false);
if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
goto out;
}
@@@ -5844,7 -5856,7 +5863,7 @@@ static void gro_normal_one(struct napi_
gro_normal_list(napi);
}
- static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
+ static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
struct packet_offload *ptype;
__be16 type = skb->protocol;
@@@ -5873,12 -5885,11 +5892,11 @@@
if (err) {
WARN_ON(&ptype->list == head);
kfree_skb(skb);
- return NET_RX_SUCCESS;
+ return;
}
out:
gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
- return NET_RX_SUCCESS;
}
static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
@@@ -6905,19 -6916,25 +6923,25 @@@ EXPORT_SYMBOL(netif_napi_add)
void napi_disable(struct napi_struct *n)
{
+ unsigned long val, new;
+
might_sleep();
set_bit(NAPI_STATE_DISABLE, &n->state);
- while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
- msleep(1);
- while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
- msleep(1);
+ do {
+ val = READ_ONCE(n->state);
+ if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
+ usleep_range(20, 200);
+ continue;
+ }
+
+ new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
+ new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
+ } while (cmpxchg(&n->state, val, new) != val);
hrtimer_cancel(&n->timer);
- clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
clear_bit(NAPI_STATE_DISABLE, &n->state);
- clear_bit(NAPI_STATE_THREADED, &n->state);
}
EXPORT_SYMBOL(napi_disable);
@@@ -6995,8 -7012,8 +7019,8 @@@ static int __napi_poll(struct napi_stru
}
if (unlikely(work > weight))
- pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
- n->poll, work, weight);
+ netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
+ n->poll, work, weight);
if (likely(work < weight))
return work;
@@@ -8550,8 -8567,7 +8574,7 @@@ static int __dev_set_promiscuity(struc
dev->flags &= ~IFF_PROMISC;
else {
dev->promiscuity -= inc;
- pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
- dev->name);
+ netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
return -EOVERFLOW;
}
}
@@@ -8621,8 -8637,7 +8644,7 @@@ static int __dev_set_allmulti(struct ne
dev->flags &= ~IFF_ALLMULTI;
else {
dev->allmulti -= inc;
- pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
- dev->name);
+ netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
return -EOVERFLOW;
}
}
@@@ -9159,14 -9174,11 +9181,11 @@@ int dev_get_port_parent_id(struct net_d
}
err = devlink_compat_switch_id_get(dev, ppid);
- if (!err || err != -EOPNOTSUPP)
+ if (!recurse || err != -EOPNOTSUPP)
return err;
- if (!recurse)
- return -EOPNOTSUPP;
-
netdev_for_each_lower_dev(dev, lower_dev, iter) {
- err = dev_get_port_parent_id(lower_dev, ppid, recurse);
+ err = dev_get_port_parent_id(lower_dev, ppid, true);
if (err)
break;
if (!first.id_len)
@@@ -9910,6 -9922,11 +9929,11 @@@ static netdev_features_t netdev_fix_fea
}
}
+ if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
+ netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
+ features &= ~NETIF_F_LRO;
+ }
+
if (features & NETIF_F_HW_TLS_TX) {
bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
@@@ -10867,7 -10884,7 +10891,7 @@@ struct net_device *alloc_netdev_mqs(in
if (!dev->ethtool_ops)
dev->ethtool_ops = &default_ethtool_ops;
- nf_hook_ingress_init(dev);
+ nf_hook_netdev_init(dev);
return dev;
@@@ -11153,7 -11170,7 +11177,7 @@@ int __dev_change_net_namespace(struct n
* we can use it in the destination network namespace.
*/
err = -EEXIST;
- if (__dev_get_by_name(net, dev->name)) {
+ if (netdev_name_in_use(net, dev->name)) {
/* We get here if we can't use the current device name */
if (!pat)
goto out;
@@@ -11506,7 -11523,7 +11530,7 @@@ static void __net_exit default_device_e
/* Push remaining network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
- if (__dev_get_by_name(&init_net, fb_name))
+ if (netdev_name_in_use(&init_net, fb_name))
snprintf(fb_name, IFNAMSIZ, "dev%%d");
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
diff --combined net/core/net-sysfs.c
index b2e49eb7001d,d6e4e0b43beb..9c01c642cf9e
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@@ -175,6 -175,14 +175,14 @@@ static int change_carrier(struct net_de
static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
+ struct net_device *netdev = to_net_dev(dev);
+
+ /* The check is also done in change_carrier; this helps returning early
+ * without hitting the trylock/restart in netdev_store.
+ */
+ if (!netdev->netdev_ops->ndo_change_carrier)
+ return -EOPNOTSUPP;
+
return netdev_store(dev, attr, buf, len, change_carrier);
}
@@@ -196,6 -204,12 +204,12 @@@ static ssize_t speed_show(struct devic
struct net_device *netdev = to_net_dev(dev);
int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps
+ * returning early without hitting the trylock/restart below.
+ */
+ if (!netdev->ethtool_ops->get_link_ksettings)
+ return ret;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -216,6 -230,12 +230,12 @@@ static ssize_t duplex_show(struct devic
struct net_device *netdev = to_net_dev(dev);
int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps
+ * returning early without hitting the trylock/restart below.
+ */
+ if (!netdev->ethtool_ops->get_link_ksettings)
+ return ret;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -468,6 -488,14 +488,14 @@@ static ssize_t proto_down_store(struct
struct device_attribute *attr,
const char *buf, size_t len)
{
+ struct net_device *netdev = to_net_dev(dev);
+
+ /* The check is also done in change_proto_down; this helps returning
+ * early without hitting the trylock/restart in netdev_store.
+ */
+ if (!netdev->netdev_ops->ndo_change_proto_down)
+ return -EOPNOTSUPP;
+
return netdev_store(dev, attr, buf, len, change_proto_down);
}
NETDEVICE_SHOW_RW(proto_down, fmt_dec);
@@@ -478,6 -506,12 +506,12 @@@ static ssize_t phys_port_id_show(struc
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
+ /* The check is also done in dev_get_phys_port_id; this helps returning
+ * early without hitting the trylock/restart below.
+ */
+ if (!netdev->netdev_ops->ndo_get_phys_port_id)
+ return -EOPNOTSUPP;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -500,6 -534,13 +534,13 @@@ static ssize_t phys_port_name_show(stru
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps
+ * returning early without hitting the trylock/restart below.
+ */
+ if (!netdev->netdev_ops->ndo_get_phys_port_name &&
+ !netdev->netdev_ops->ndo_get_devlink_port)
+ return -EOPNOTSUPP;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -522,6 -563,14 +563,14 @@@ static ssize_t phys_switch_id_show(stru
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps
+ * returning early without hitting the trylock/restart below. This works
+ * because recurse is false when calling dev_get_port_parent_id.
+ */
+ if (!netdev->netdev_ops->ndo_get_port_parent_id &&
+ !netdev->netdev_ops->ndo_get_devlink_port)
+ return -EOPNOTSUPP;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -1226,6 -1275,12 +1275,12 @@@ static ssize_t tx_maxrate_store(struct
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ /* The check is also done later; this helps returning early without
+ * hitting the trylock/restart below.
+ */
+ if (!dev->netdev_ops->ndo_set_tx_maxrate)
+ return -EOPNOTSUPP;
+
err = kstrtou32(buf, 10, &rate);
if (err < 0)
return err;
@@@ -1869,7 -1924,7 +1924,7 @@@ static struct class net_class __ro_afte
.get_ownership = net_get_ownership,
};
- #ifdef CONFIG_OF_NET
+ #ifdef CONFIG_OF
static int of_dev_node_match(struct device *dev, const void *data)
{
for (; dev; dev = dev->parent) {
@@@ -1973,9 -2028,9 +2028,9 @@@ int netdev_register_kobject(struct net_
int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
const struct net *net_new)
{
+ kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID;
+ kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID;
struct device *dev = &ndev->dev;
- kuid_t old_uid, new_uid;
- kgid_t old_gid, new_gid;
int error;
net_ns_get_ownership(net_old, &old_uid, &old_gid);
diff --combined net/core/skbuff.c
index fe9358437380,74601bbc56ac..09b8cf8ab234
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@@ -80,7 -80,6 +80,7 @@@
#include <linux/indirect_call_wrapper.h>
#include "datagram.h"
+#include "sock_destructor.h"
struct kmem_cache *skbuff_head_cache __ro_after_init;
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
@@@ -135,34 -134,31 +135,31 @@@ struct napi_alloc_cache
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
- static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask,
- unsigned int align_mask)
+ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask);
- }
-
- void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
- {
fragsz = SKB_DATA_ALIGN(fragsz);
- return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
+ return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
}
EXPORT_SYMBOL(__napi_alloc_frag_align);
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
- struct page_frag_cache *nc;
void *data;
fragsz = SKB_DATA_ALIGN(fragsz);
if (in_hardirq() || irqs_disabled()) {
- nc = this_cpu_ptr(&netdev_alloc_cache);
+ struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
+
data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
} else {
+ struct napi_alloc_cache *nc;
+
local_bh_disable();
- data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
+ nc = this_cpu_ptr(&napi_alloc_cache);
+ data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
local_bh_enable();
}
return data;
@@@ -398,8 -394,9 +395,9 @@@ struct sk_buff *__alloc_skb(unsigned in
{
struct kmem_cache *cache;
struct sk_buff *skb;
- u8 *data;
+ unsigned int osize;
bool pfmemalloc;
+ u8 *data;
cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_head_cache;
@@@ -431,7 -428,8 +429,8 @@@
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
- size = SKB_WITH_OVERHEAD(ksize(data));
+ osize = ksize(data);
+ size = SKB_WITH_OVERHEAD(osize);
prefetchw(data + size);
/*
@@@ -440,7 -438,7 +439,7 @@@
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
- __build_skb_around(skb, data, 0);
+ __build_skb_around(skb, data, osize);
skb->pfmemalloc = pfmemalloc;
if (flags & SKB_ALLOC_FCLONE) {
@@@ -1805,39 -1803,30 +1804,39 @@@ EXPORT_SYMBOL(skb_realloc_headroom)
struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
{
int delta = headroom - skb_headroom(skb);
+ int osize = skb_end_offset(skb);
+ struct sock *sk = skb->sk;
if (WARN_ONCE(delta <= 0,
"%s is expecting an increase in the headroom", __func__))
return skb;
- /* pskb_expand_head() might crash, if skb is shared */
- if (skb_shared(skb)) {
+ delta = SKB_DATA_ALIGN(delta);
+ /* pskb_expand_head() might crash, if skb is shared. */
+ if (skb_shared(skb) || !is_skb_wmem(skb)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
- if (likely(nskb)) {
- if (skb->sk)
- skb_set_owner_w(nskb, skb->sk);
- consume_skb(skb);
- } else {
- kfree_skb(skb);
- }
+ if (unlikely(!nskb))
+ goto fail;
+
+ if (sk)
+ skb_set_owner_w(nskb, sk);
+ consume_skb(skb);
skb = nskb;
}
- if (skb &&
- pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
- kfree_skb(skb);
- skb = NULL;
+ if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC))
+ goto fail;
+
+ if (sk && is_skb_wmem(skb)) {
+ delta = skb_end_offset(skb) - osize;
+ refcount_add(delta, &sk->sk_wmem_alloc);
+ skb->truesize += delta;
}
return skb;
+
+fail:
+ kfree_skb(skb);
+ return NULL;
}
EXPORT_SYMBOL(skb_expand_head);
diff --combined net/ipv4/tcp.c
index f5c336f8b0c8,d0b848ff5c0f..c262cb3e4f35
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@@ -287,8 -287,8 +287,8 @@@ enum
TCP_CMSG_TS = 2
};
- struct percpu_counter tcp_orphan_count;
- EXPORT_SYMBOL_GPL(tcp_orphan_count);
+ DEFINE_PER_CPU(unsigned int, tcp_orphan_count);
+ EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
long sysctl_tcp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_mem);
@@@ -325,11 -325,6 +325,6 @@@ struct tcp_splice_state
unsigned long tcp_memory_pressure __read_mostly;
EXPORT_SYMBOL_GPL(tcp_memory_pressure);
- DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
- EXPORT_SYMBOL(tcp_rx_skb_cache_key);
-
- DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
-
void tcp_enter_memory_pressure(struct sock *sk)
{
unsigned long val;
@@@ -486,7 -481,10 +481,7 @@@ static bool tcp_stream_is_readable(stru
{
if (tcp_epollin_ready(sk, target))
return true;
-
- if (sk->sk_prot->stream_memory_read)
- return sk->sk_prot->stream_memory_read(sk);
- return false;
+ return sk_is_readable(sk);
}
/*
@@@ -644,7 -642,7 +639,7 @@@ int tcp_ioctl(struct sock *sk, int cmd
}
EXPORT_SYMBOL(tcp_ioctl);
- static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
+ void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
tp->pushed_seq = tp->write_seq;
@@@ -655,7 -653,7 +650,7 @@@ static inline bool forced_push(const st
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
- static void skb_entail(struct sock *sk, struct sk_buff *skb)
+ void tcp_skb_entail(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
@@@ -858,30 -856,15 +853,15 @@@ ssize_t tcp_splice_read(struct socket *
}
EXPORT_SYMBOL(tcp_splice_read);
- struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
- bool force_schedule)
+ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ bool force_schedule)
{
struct sk_buff *skb;
- if (likely(!size)) {
- skb = sk->sk_tx_skb_cache;
- if (skb) {
- skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
- sk->sk_tx_skb_cache = NULL;
- pskb_trim(skb, 0);
- INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
- skb_shinfo(skb)->tx_flags = 0;
- memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb));
- return skb;
- }
- }
- /* The TCP header must be at least 32-bit aligned. */
- size = ALIGN(size, 4);
-
if (unlikely(tcp_under_memory_pressure(sk)))
sk_mem_reclaim_partial(sk);
- skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+ skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp);
if (likely(skb)) {
bool mem_scheduled;
@@@ -892,7 -875,7 +872,7 @@@
mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
}
if (likely(mem_scheduled)) {
- skb_reserve(skb, sk->sk_prot->max_header);
+ skb_reserve(skb, MAX_TCP_HEADER);
/*
* Make sure that we have exactly size bytes
* available to the caller, no more, no less.
@@@ -952,7 -935,7 +932,7 @@@ int tcp_send_mss(struct sock *sk, int *
*/
void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
{
- if (skb && !skb->len) {
+ if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
tcp_unlink_write_queue(skb, sk);
if (tcp_write_queue_empty(sk))
tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
@@@ -960,8 -943,8 +940,8 @@@
}
}
- struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
- struct page *page, int offset, size_t *size)
+ static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
+ struct page *page, int offset, size_t *size)
{
struct sk_buff *skb = tcp_write_queue_tail(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@@ -974,15 -957,15 +954,15 @@@ new_segment
if (!sk_stream_memory_free(sk))
return NULL;
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
- tcp_rtx_and_write_queues_empty(sk));
+ skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation,
+ tcp_rtx_and_write_queues_empty(sk));
if (!skb)
return NULL;
#ifdef CONFIG_TLS_DEVICE
skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
#endif
- skb_entail(sk, skb);
+ tcp_skb_entail(sk, skb);
copy = size_goal;
}
@@@ -1303,15 -1286,15 +1283,15 @@@ new_segment
goto restart;
}
first_skb = tcp_rtx_and_write_queues_empty(sk);
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
- first_skb);
+ skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation,
+ first_skb);
if (!skb)
goto wait_for_space;
process_backlog++;
skb->ip_summed = CHECKSUM_PARTIAL;
- skb_entail(sk, skb);
+ tcp_skb_entail(sk, skb);
copy = size_goal;
/* All packets are restored as if they have
@@@ -2687,11 -2670,36 +2667,36 @@@ void tcp_shutdown(struct sock *sk, int
}
EXPORT_SYMBOL(tcp_shutdown);
+ int tcp_orphan_count_sum(void)
+ {
+ int i, total = 0;
+
+ for_each_possible_cpu(i)
+ total += per_cpu(tcp_orphan_count, i);
+
+ return max(total, 0);
+ }
+
+ static int tcp_orphan_cache;
+ static struct timer_list tcp_orphan_timer;
+ #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100)
+
+ static void tcp_orphan_update(struct timer_list *unused)
+ {
+ WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum());
+ mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
+ }
+
+ static bool tcp_too_many_orphans(int shift)
+ {
+ return READ_ONCE(tcp_orphan_cache) << shift > sysctl_tcp_max_orphans;
+ }
+
bool tcp_check_oom(struct sock *sk, int shift)
{
bool too_many_orphans, out_of_socket_memory;
- too_many_orphans = tcp_too_many_orphans(sk, shift);
+ too_many_orphans = tcp_too_many_orphans(shift);
out_of_socket_memory = tcp_out_of_memory(sk);
if (too_many_orphans)
@@@ -2800,7 -2808,7 +2805,7 @@@ adjudge_to_death
/* remove backlog if any, without releasing ownership. */
__release_sock(sk);
- percpu_counter_inc(sk->sk_prot->orphan_count);
+ this_cpu_inc(tcp_orphan_count);
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
@@@ -2917,11 -2925,6 +2922,6 @@@ void tcp_write_queue_purge(struct sock
sk_wmem_free_skb(sk, skb);
}
tcp_rtx_queue_purge(sk);
- skb = sk->sk_tx_skb_cache;
- if (skb) {
- __kfree_skb(skb);
- sk->sk_tx_skb_cache = NULL;
- }
INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk));
@@@ -2958,10 -2961,6 +2958,6 @@@ int tcp_disconnect(struct sock *sk, in
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
- if (sk->sk_rx_skb_cache) {
- __kfree_skb(sk->sk_rx_skb_cache);
- sk->sk_rx_skb_cache = NULL;
- }
WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
tp->urg_data = 0;
tcp_write_queue_purge(sk);
@@@ -4502,7 -4501,10 +4498,10 @@@ void __init tcp_init(void
sizeof_field(struct sk_buff, cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
- percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
+
+ timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE);
+ mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
+
inet_hashinfo_init(&tcp_hashinfo);
inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
thash_entries, 21, /* one slot per 2 MB*/
diff --combined net/mac80211/mesh.c
index 5dcfd53a4ab6,a4212a333d61..15ac08d111ea
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@@ -672,7 -672,7 +672,7 @@@ ieee80211_mesh_update_bss_params(struc
u8 *ie, u8 ie_len)
{
struct ieee80211_supported_band *sband;
- const u8 *cap;
+ const struct element *cap;
const struct ieee80211_he_operation *he_oper = NULL;
sband = ieee80211_get_sband(sdata);
@@@ -687,10 -687,9 +687,10 @@@
sdata->vif.bss_conf.he_support = true;
- cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
- if (cap && cap[1] >= ieee80211_he_oper_size(&cap[3]))
- he_oper = (void *)(cap + 3);
+ cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
+ if (cap && cap->datalen >= 1 + sizeof(*he_oper) &&
+ cap->datalen >= 1 + ieee80211_he_oper_size(cap->data + 1))
+ he_oper = (void *)(cap->data + 1);
if (he_oper)
sdata->vif.bss_conf.he_oper.params =
@@@ -1247,7 -1246,7 +1247,7 @@@ ieee80211_mesh_rx_probe_req(struct ieee
struct sk_buff *presp;
struct beacon_data *bcn;
struct ieee80211_mgmt *hdr;
- struct ieee802_11_elems elems;
+ struct ieee802_11_elems *elems;
size_t baselen;
u8 *pos;
@@@ -1256,22 -1255,24 +1256,24 @@@
if (baselen > len)
return;
- ieee802_11_parse_elems(pos, len - baselen, false, &elems, mgmt->bssid,
- NULL);
-
- if (!elems.mesh_id)
+ elems = ieee802_11_parse_elems(pos, len - baselen, false, mgmt->bssid,
+ NULL);
+ if (!elems)
return;
+ if (!elems->mesh_id)
+ goto free;
+
/* 802.11-2012 10.1.4.3.2 */
if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) &&
!is_broadcast_ether_addr(mgmt->da)) ||
- elems.ssid_len != 0)
- return;
+ elems->ssid_len != 0)
+ goto free;
- if (elems.mesh_id_len != 0 &&
- (elems.mesh_id_len != ifmsh->mesh_id_len ||
- memcmp(elems.mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len)))
- return;
+ if (elems->mesh_id_len != 0 &&
+ (elems->mesh_id_len != ifmsh->mesh_id_len ||
+ memcmp(elems->mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len)))
+ goto free;
rcu_read_lock();
bcn = rcu_dereference(ifmsh->beacon);
@@@ -1295,6 -1296,8 +1297,8 @@@
ieee80211_tx_skb(sdata, presp);
out:
rcu_read_unlock();
+ free:
+ kfree(elems);
}
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
@@@ -1305,7 -1308,7 +1309,7 @@@
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee802_11_elems elems;
+ struct ieee802_11_elems *elems;
struct ieee80211_channel *channel;
size_t baselen;
int freq;
@@@ -1320,42 -1323,47 +1324,47 @@@
if (baselen > len)
return;
- ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
- false, &elems, mgmt->bssid, NULL);
+ elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable,
+ len - baselen,
+ false, mgmt->bssid, NULL);
+ if (!elems)
+ return;
/* ignore non-mesh or secure / unsecure mismatch */
- if ((!elems.mesh_id || !elems.mesh_config) ||
- (elems.rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) ||
- (!elems.rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE))
- return;
+ if ((!elems->mesh_id || !elems->mesh_config) ||
+ (elems->rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) ||
+ (!elems->rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE))
+ goto free;
- if (elems.ds_params)
- freq = ieee80211_channel_to_frequency(elems.ds_params[0], band);
+ if (elems->ds_params)
+ freq = ieee80211_channel_to_frequency(elems->ds_params[0], band);
else
freq = rx_status->freq;
channel = ieee80211_get_channel(local->hw.wiphy, freq);
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
- return;
+ goto free;
- if (mesh_matches_local(sdata, &elems)) {
+ if (mesh_matches_local(sdata, elems)) {
mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n",
sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal);
if (!sdata->u.mesh.user_mpm ||
sdata->u.mesh.mshcfg.rssi_threshold == 0 ||
sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal)
- mesh_neighbour_update(sdata, mgmt->sa, &elems,
+ mesh_neighbour_update(sdata, mgmt->sa, elems,
rx_status);
if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
!sdata->vif.csa_active)
- ieee80211_mesh_process_chnswitch(sdata, &elems, true);
+ ieee80211_mesh_process_chnswitch(sdata, elems, true);
}
if (ifmsh->sync_ops)
- ifmsh->sync_ops->rx_bcn_presp(sdata,
- stype, mgmt, &elems, rx_status);
+ ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, len,
+ elems->mesh_config, rx_status);
+ free:
+ kfree(elems);
}
int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
@@@ -1447,7 -1455,7 +1456,7 @@@ static void mesh_rx_csa_frame(struct ie
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee802_11_elems elems;
+ struct ieee802_11_elems *elems;
u16 pre_value;
bool fwd_csa = true;
size_t baselen;
@@@ -1460,33 -1468,37 +1469,37 @@@
pos = mgmt->u.action.u.chan_switch.variable;
baselen = offsetof(struct ieee80211_mgmt,
u.action.u.chan_switch.variable);
- ieee802_11_parse_elems(pos, len - baselen, true, &elems,
- mgmt->bssid, NULL);
-
- if (!mesh_matches_local(sdata, &elems))
+ elems = ieee802_11_parse_elems(pos, len - baselen, true,
+ mgmt->bssid, NULL);
+ if (!elems)
return;
- ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl;
+ if (!mesh_matches_local(sdata, elems))
+ goto free;
+
+ ifmsh->chsw_ttl = elems->mesh_chansw_params_ie->mesh_ttl;
if (!--ifmsh->chsw_ttl)
fwd_csa = false;
- pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value);
+ pre_value = le16_to_cpu(elems->mesh_chansw_params_ie->mesh_pre_value);
if (ifmsh->pre_value >= pre_value)
- return;
+ goto free;
ifmsh->pre_value = pre_value;
if (!sdata->vif.csa_active &&
- !ieee80211_mesh_process_chnswitch(sdata, &elems, false)) {
+ !ieee80211_mesh_process_chnswitch(sdata, elems, false)) {
mcsa_dbg(sdata, "Failed to process CSA action frame");
- return;
+ goto free;
}
/* forward or re-broadcast the CSA frame */
if (fwd_csa) {
- if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0)
+ if (mesh_fwd_csa_frame(sdata, mgmt, len, elems) < 0)
mcsa_dbg(sdata, "Failed to forward the CSA frame");
}
+ free:
+ kfree(elems);
}
static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
diff --combined net/netfilter/ipvs/ip_vs_ctl.c
index 0ff94c66641f,e62b40bd349e..38ed88b89007
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@@ -48,8 -48,6 +48,8 @@@
#include <net/ip_vs.h>
+MODULE_ALIAS_GENL_FAMILY(IPVS_GENL_NAME);
+
/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
static DEFINE_MUTEX(__ip_vs_mutex);
@@@ -2019,6 -2017,12 +2019,12 @@@ static struct ctl_table vs_vars[] =
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "run_estimation",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
#ifdef CONFIG_IP_VS_DEBUG
{
.procname = "debug_level",
@@@ -4092,6 -4096,8 +4098,8 @@@ static int __net_init ip_vs_control_net
tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
+ ipvs->sysctl_run_estimation = 1;
+ tbl[idx++].data = &ipvs->sysctl_run_estimation;
#ifdef CONFIG_IP_VS_DEBUG
/* Global sysctls must be ro in non-init netns */
if (!net_eq(net, &init_net))
diff --combined net/tls/tls_main.c
index 9ab81db8a654,278192ee133e..acfba9f1ba72
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@@ -421,6 -421,88 +421,88 @@@ static int do_tls_getsockopt_conf(struc
rc = -EFAULT;
break;
}
+ case TLS_CIPHER_AES_CCM_128: {
+ struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_aes_ccm_128, info);
+
+ if (len != sizeof(*aes_ccm_128)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(aes_ccm_128->iv,
+ cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE,
+ TLS_CIPHER_AES_CCM_128_IV_SIZE);
+ memcpy(aes_ccm_128->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_CHACHA20_POLY1305: {
+ struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_chacha20_poly1305,
+ info);
+
+ if (len != sizeof(*chacha20_poly1305)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(chacha20_poly1305->iv,
+ cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE,
+ TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE);
+ memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval, chacha20_poly1305,
+ sizeof(*chacha20_poly1305)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_SM4_GCM: {
+ struct tls12_crypto_info_sm4_gcm *sm4_gcm_info =
+ container_of(crypto_info,
+ struct tls12_crypto_info_sm4_gcm, info);
+
+ if (len != sizeof(*sm4_gcm_info)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(sm4_gcm_info->iv,
+ cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE,
+ TLS_CIPHER_SM4_GCM_IV_SIZE);
+ memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_SM4_CCM: {
+ struct tls12_crypto_info_sm4_ccm *sm4_ccm_info =
+ container_of(crypto_info,
+ struct tls12_crypto_info_sm4_ccm, info);
+
+ if (len != sizeof(*sm4_ccm_info)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(sm4_ccm_info->iv,
+ cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE,
+ TLS_CIPHER_SM4_CCM_IV_SIZE);
+ memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info)))
+ rc = -EFAULT;
+ break;
+ }
default:
rc = -EINVAL;
}
@@@ -524,6 -606,12 +606,12 @@@ static int do_tls_setsockopt_conf(struc
case TLS_CIPHER_CHACHA20_POLY1305:
optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305);
break;
+ case TLS_CIPHER_SM4_GCM:
+ optsize = sizeof(struct tls12_crypto_info_sm4_gcm);
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ optsize = sizeof(struct tls12_crypto_info_sm4_ccm);
+ break;
default:
rc = -EINVAL;
goto err_crypto_info;
@@@ -681,12 -769,12 +769,12 @@@ static void build_protos(struct proto p
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
- prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
+ prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
- prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
+ prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
#ifdef CONFIG_TLS_DEVICE
diff --combined net/tls/tls_sw.c
index d5d09bd817b7,4147bb2e7057..c5ba3fd50e91
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@@ -498,9 -498,15 +498,15 @@@ static int tls_do_encryption(struct soc
int rc, iv_offset = 0;
/* For CCM based ciphers, first byte of IV is a constant */
- if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
+ switch (prot->cipher_type) {
+ case TLS_CIPHER_AES_CCM_128:
rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
iv_offset = 1;
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
+ iv_offset = 1;
+ break;
}
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
@@@ -1457,10 -1463,16 +1463,16 @@@ static int decrypt_internal(struct soc
aad = (u8 *)(sgout + n_sgout);
iv = aad + prot->aad_size;
- /* For CCM based ciphers, first byte of nonce+iv is always '2' */
- if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
- iv[0] = 2;
+ /* For CCM based ciphers, first byte of nonce+iv is a constant */
+ switch (prot->cipher_type) {
+ case TLS_CIPHER_AES_CCM_128:
+ iv[0] = TLS_AES_CCM_IV_B0_BYTE;
+ iv_offset = 1;
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
iv_offset = 1;
+ break;
}
/* Prepare IV */
@@@ -2026,7 -2038,7 +2038,7 @@@ splice_read_end
return copied ? : err;
}
-bool tls_sw_stream_read(const struct sock *sk)
+bool tls_sw_sock_is_readable(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@@ -2424,6 -2436,40 +2436,40 @@@ int tls_set_sw_offload(struct sock *sk
cipher_name = "rfc7539(chacha20,poly1305)";
break;
}
+ case TLS_CIPHER_SM4_GCM: {
+ struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
+
+ sm4_gcm_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
+ tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
+ iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
+ iv = sm4_gcm_info->iv;
+ rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
+ rec_seq = sm4_gcm_info->rec_seq;
+ keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
+ key = sm4_gcm_info->key;
+ salt = sm4_gcm_info->salt;
+ salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
+ cipher_name = "gcm(sm4)";
+ break;
+ }
+ case TLS_CIPHER_SM4_CCM: {
+ struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
+
+ sm4_ccm_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
+ tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
+ iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
+ iv = sm4_ccm_info->iv;
+ rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
+ rec_seq = sm4_ccm_info->rec_seq;
+ keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
+ key = sm4_ccm_info->key;
+ salt = sm4_ccm_info->salt;
+ salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
+ cipher_name = "ccm(sm4)";
+ break;
+ }
default:
rc = -EINVAL;
goto free_priv;
diff --combined net/wireless/core.c
index aaba847d79eb,45be124a98f1..eb297e1015e0
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@@ -524,7 -524,6 +524,7 @@@ use_default_name
INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk);
INIT_WORK(&rdev->mgmt_registrations_update_wk,
cfg80211_mgmt_registrations_update_wk);
+ spin_lock_init(&rdev->mgmt_registrations_lock);
#ifdef CONFIG_CFG80211_DEFAULT_PS
rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@@ -1081,6 -1080,16 +1081,16 @@@ void cfg80211_dev_free(struct cfg80211_
list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
cfg80211_put_bss(&rdev->wiphy, &scan->pub);
mutex_destroy(&rdev->wiphy.mtx);
+
+ /*
+ * The 'regd' can only be non-NULL if we never finished
+ * initializing the wiphy and thus never went through the
+ * unregister path - e.g. in failure scenarios. Thus, it
+ * cannot have been visible to anyone if non-NULL, so we
+ * can just free it here.
+ */
+ kfree(rcu_dereference_raw(rdev->wiphy.regd));
+
kfree(rdev);
}
@@@ -1280,6 -1289,7 +1290,6 @@@ void cfg80211_init_wdev(struct wireless
INIT_LIST_HEAD(&wdev->event_list);
spin_lock_init(&wdev->event_lock);
INIT_LIST_HEAD(&wdev->mgmt_registrations);
- spin_lock_init(&wdev->mgmt_registrations_lock);
INIT_LIST_HEAD(&wdev->pmsr_list);
spin_lock_init(&wdev->pmsr_lock);
INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk);
diff --combined net/wireless/scan.c
index adc0d14cfd86,e4f79b23f7f6..22e92be61938
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@@ -383,7 -383,7 +383,7 @@@ static bool is_bss(struct cfg80211_bss
const u8 *ssid, size_t ssid_len)
{
const struct cfg80211_bss_ies *ies;
- const u8 *ssidie;
+ const struct element *ssid_elem;
if (bssid && !ether_addr_equal(a->bssid, bssid))
return false;
@@@ -394,12 -394,12 +394,12 @@@
ies = rcu_access_pointer(a->ies);
if (!ies)
return false;
- ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
- if (!ssidie)
+ ssid_elem = cfg80211_find_elem(WLAN_EID_SSID, ies->data, ies->len);
+ if (!ssid_elem)
return false;
- if (ssidie[1] != ssid_len)
+ if (ssid_elem->datalen != ssid_len)
return false;
- return memcmp(ssidie + 2, ssid, ssid_len) == 0;
+ return memcmp(ssid_elem->data, ssid, ssid_len) == 0;
}
static int
@@@ -418,17 -418,14 +418,17 @@@ cfg80211_add_nontrans_list(struct cfg80
}
ssid_len = ssid[1];
ssid = ssid + 2;
- rcu_read_unlock();
/* check if nontrans_bss is in the list */
list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) {
- if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len))
+ if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) {
+ rcu_read_unlock();
return 0;
+ }
}
+ rcu_read_unlock();
+
/* add to the list */
list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list);
return 0;
@@@ -1794,25 -1791,13 +1794,13 @@@ cfg80211_bss_update(struct cfg80211_reg
return NULL;
}
- /*
- * Update RX channel information based on the available frame payload
- * information. This is mainly for the 2.4 GHz band where frames can be received
- * from neighboring channels and the Beacon frames use the DSSS Parameter Set
- * element to indicate the current (transmitting) channel, but this might also
- * be needed on other bands if RX frequency does not match with the actual
- * operating channel of a BSS.
- */
- static struct ieee80211_channel *
- cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
- struct ieee80211_channel *channel,
- enum nl80211_bss_scan_width scan_width)
+ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ enum nl80211_band band)
{
const u8 *tmp;
- u32 freq;
int channel_number = -1;
- struct ieee80211_channel *alt_channel;
- if (channel->band == NL80211_BAND_S1GHZ) {
+ if (band == NL80211_BAND_S1GHZ) {
tmp = cfg80211_find_ie(WLAN_EID_S1G_OPERATION, ie, ielen);
if (tmp && tmp[1] >= sizeof(struct ieee80211_s1g_oper_ie)) {
struct ieee80211_s1g_oper_ie *s1gop = (void *)(tmp + 2);
@@@ -1833,6 -1818,29 +1821,29 @@@
}
}
+ return channel_number;
+ }
+ EXPORT_SYMBOL(cfg80211_get_ies_channel_number);
+
+ /*
+ * Update RX channel information based on the available frame payload
+ * information. This is mainly for the 2.4 GHz band where frames can be received
+ * from neighboring channels and the Beacon frames use the DSSS Parameter Set
+ * element to indicate the current (transmitting) channel, but this might also
+ * be needed on other bands if RX frequency does not match with the actual
+ * operating channel of a BSS.
+ */
+ static struct ieee80211_channel *
+ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width)
+ {
+ u32 freq;
+ int channel_number;
+ struct ieee80211_channel *alt_channel;
+
+ channel_number = cfg80211_get_ies_channel_number(ie, ielen, channel->band);
+
if (channel_number < 0) {
/* No channel information in frame payload */
return channel;
@@@ -2075,12 -2083,12 +2086,12 @@@ static void cfg80211_parse_mbssid_data(
if (!non_tx_data)
return;
- if (!cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+ if (!cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
return;
if (!wiphy->support_mbssid)
return;
if (wiphy->support_only_he_mbssid &&
- !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+ !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
return;
new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
@@@ -2447,10 -2455,10 +2458,10 @@@ cfg80211_inform_bss_frame_data(struct w
res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
len, gfp);
if (!res || !wiphy->support_mbssid ||
- !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+ !cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
return res;
if (wiphy->support_only_he_mbssid &&
- !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+ !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
return res;
non_tx_data.tx_bss = res;
diff --combined net/wireless/util.c
index a1a99a574984,2991f711491a..5ff1f8726faf
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@@ -80,6 -80,7 +80,7 @@@ u32 ieee80211_channel_to_freq_khz(int c
return 0; /* not supported */
switch (band) {
case NL80211_BAND_2GHZ:
+ case NL80211_BAND_LC:
if (chan == 14)
return MHZ_TO_KHZ(2484);
else if (chan < 14)
@@@ -209,6 -210,7 +210,7 @@@ static void set_mandatory_flags_band(st
WARN_ON(want);
break;
case NL80211_BAND_2GHZ:
+ case NL80211_BAND_LC:
want = 7;
for (i = 0; i < sband->n_bitrates; i++) {
switch (sband->bitrates[i].bitrate) {
@@@ -1028,14 -1030,14 +1030,14 @@@ int cfg80211_change_iface(struct cfg802
!(rdev->wiphy.interface_modes & (1 << ntype)))
return -EOPNOTSUPP;
- /* if it's part of a bridge, reject changing type to station/ibss */
- if (netif_is_bridge_port(dev) &&
- (ntype == NL80211_IFTYPE_ADHOC ||
- ntype == NL80211_IFTYPE_STATION ||
- ntype == NL80211_IFTYPE_P2P_CLIENT))
- return -EBUSY;
-
if (ntype != otype) {
+ /* if it's part of a bridge, reject changing type to station/ibss */
+ if (netif_is_bridge_port(dev) &&
+ (ntype == NL80211_IFTYPE_ADHOC ||
+ ntype == NL80211_IFTYPE_STATION ||
+ ntype == NL80211_IFTYPE_P2P_CLIENT))
+ return -EBUSY;
+
dev->ieee80211_ptr->use_4addr = false;
dev->ieee80211_ptr->mesh_id_up_len = 0;
wdev_lock(dev->ieee80211_ptr);
--
LinuxNextTracking
8 months
Build check errors found: 2021-10-28
by postmaster@open-mesh.org
Name of failed tests
====================
master
------
* difference between net-next and batadv master
Output of different failed tests
================================
master: difference between net-next and batadv master
-----------------------------------------------------
netnext/net/batman-adv/bridge_loop_avoidance.c | 8 --
netnext/net/batman-adv/main.c | 56 ++++------------
netnext/net/batman-adv/network-coding.c | 4 -
netnext/net/batman-adv/translation-table.c | 4 -
4 files changed, 20 insertions(+), 52 deletions(-)
Statistics
==========
master
------
Failed tests: 1
Started build tests: 293
Tested Linux versions: 41
Tested configs: 114
maint
-----
Failed tests: 0
Started build tests: 316
Tested Linux versions: 41
Tested configs: 120
8 months
[linux-next] LinuxNextTracking branch, master, updated. next-20211027
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit 6f68cd634856f8ca93bafd623ba5357e0f648c68
Author: Pavel Skripkin <paskripkin(a)gmail.com>
Date: Sun Oct 24 16:13:56 2021 +0300
net: batman-adv: fix error handling
Syzbot reported ODEBUG warning in batadv_nc_mesh_free(). The problem was
in wrong error handling in batadv_mesh_init().
Before this patch batadv_mesh_init() was calling batadv_mesh_free() in case
of any batadv_*_init() calls failure. This approach may work well, when
there is some kind of indicator, which can tell which parts of batadv are
initialized; but there isn't any.
All written above lead to cleaning up uninitialized fields. Even if we hide
ODEBUG warning by initializing bat_priv->nc.work, syzbot was able to hit
GPF in batadv_nc_purge_paths(), because hash pointer in still NULL. [1]
To fix these bugs we can unwind batadv_*_init() calls one by one.
It is good approach for 2 reasons: 1) It fixes bugs on error handling
path 2) It improves the performance, since we won't call unneeded
batadv_*_free() functions.
So, this patch makes all batadv_*_init() clean up all allocated memory
before returning with an error to no call correspoing batadv_*_free()
and open-codes batadv_mesh_free() with proper order to avoid touching
uninitialized fields.
Link: https://lore.kernel.org/netdev/000000000000c87fbd05cef6bcb0@google.com/ [1]
Reported-and-tested-by: syzbot+28b0702ada0bf7381f58(a)syzkaller.appspotmail.com
Fixes: c6c8fea29769 ("net: Add batman-adv meshing protocol")
Signed-off-by: Pavel Skripkin <paskripkin(a)gmail.com>
Acked-by: Sven Eckelmann <sven(a)narfation.org>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 1669744304c5..17687848daec 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1560,10 +1560,14 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
return 0;
bat_priv->bla.claim_hash = batadv_hash_new(128);
- bat_priv->bla.backbone_hash = batadv_hash_new(32);
+ if (!bat_priv->bla.claim_hash)
+ return -ENOMEM;
- if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
+ bat_priv->bla.backbone_hash = batadv_hash_new(32);
+ if (!bat_priv->bla.backbone_hash) {
+ batadv_hash_destroy(bat_priv->bla.claim_hash);
return -ENOMEM;
+ }
batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
&batadv_claim_hash_lock_class_key);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 3ddd66e4c29e..5207cd8d6ad8 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -190,29 +190,41 @@ int batadv_mesh_init(struct net_device *soft_iface)
bat_priv->gw.generation = 0;
- ret = batadv_v_mesh_init(bat_priv);
- if (ret < 0)
- goto err;
-
ret = batadv_originator_init(bat_priv);
- if (ret < 0)
- goto err;
+ if (ret < 0) {
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+ goto err_orig;
+ }
ret = batadv_tt_init(bat_priv);
- if (ret < 0)
- goto err;
+ if (ret < 0) {
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+ goto err_tt;
+ }
+
+ ret = batadv_v_mesh_init(bat_priv);
+ if (ret < 0) {
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+ goto err_v;
+ }
ret = batadv_bla_init(bat_priv);
- if (ret < 0)
- goto err;
+ if (ret < 0) {
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+ goto err_bla;
+ }
ret = batadv_dat_init(bat_priv);
- if (ret < 0)
- goto err;
+ if (ret < 0) {
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+ goto err_dat;
+ }
ret = batadv_nc_mesh_init(bat_priv);
- if (ret < 0)
- goto err;
+ if (ret < 0) {
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+ goto err_nc;
+ }
batadv_gw_init(bat_priv);
batadv_mcast_init(bat_priv);
@@ -222,8 +234,20 @@ int batadv_mesh_init(struct net_device *soft_iface)
return 0;
-err:
- batadv_mesh_free(soft_iface);
+err_nc:
+ batadv_dat_free(bat_priv);
+err_dat:
+ batadv_bla_free(bat_priv);
+err_bla:
+ batadv_v_mesh_free(bat_priv);
+err_v:
+ batadv_tt_free(bat_priv);
+err_tt:
+ batadv_originator_free(bat_priv);
+err_orig:
+ batadv_purge_outstanding_packets(bat_priv, NULL);
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
+
return ret;
}
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 9f06132e007d..0a7f1d36a6a8 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -152,8 +152,10 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
&batadv_nc_coding_hash_lock_class_key);
bat_priv->nc.decoding_hash = batadv_hash_new(128);
- if (!bat_priv->nc.decoding_hash)
+ if (!bat_priv->nc.decoding_hash) {
+ batadv_hash_destroy(bat_priv->nc.coding_hash);
goto err;
+ }
batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
&batadv_nc_decoding_hash_lock_class_key);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index e0b3dace2020..4b7ad6684bc4 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -4162,8 +4162,10 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
return ret;
ret = batadv_tt_global_init(bat_priv);
- if (ret < 0)
+ if (ret < 0) {
+ batadv_tt_local_table_free(bat_priv);
return ret;
+ }
batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
batadv_tt_tvlv_unicast_handler_v1,
--
LinuxNextTracking
8 months
[linux-next] LinuxNextTracking branch, master, updated. next-20211027
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit 735b1fe548e7396a639f641b65c3a89b5d377cbf
Merge: efabfec55eb24459a7a42f5fd05eb9a690d08700 06338ceff92510544a732380dbb2d621bd3775bf
Author: Stephen Rothwell <sfr(a)canb.auug.org.au>
Date: Wed Oct 27 11:11:52 2021 +1100
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
# Conflicts:
# include/linux/mlx5/fs.h
# include/net/sock.h
diff --combined Documentation/devicetree/bindings/vendor-prefixes.yaml
index fe9615089acb,e2eb0c738f3a..a348fccf3d0a
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@@ -131,6 -131,8 +131,8 @@@ patternProperties
description: Asahi Kasei Corp.
"^asc,.*":
description: All Sensors Corporation
+ "^asix,.*":
+ description: ASIX Electronics Corporation
"^aspeed,.*":
description: ASPEED Technology Inc.
"^asus,.*":
@@@ -191,8 -193,6 +193,8 @@@
description: B&R Industrial Automation GmbH
"^bticino,.*":
description: Bticino International
+ "^calamp,.*":
+ description: CalAmp Corp.
"^calaosystems,.*":
description: CALAO Systems SAS
"^calxeda,.*":
@@@ -337,8 -337,6 +339,8 @@@
description: EBV Elektronik
"^eckelmann,.*":
description: Eckelmann AG
+ "^edimax,.*":
+ description: EDIMAX Technology Co., Ltd
"^edt,.*":
description: Emerging Display Technologies
"^eeti,.*":
@@@ -357,8 -355,6 +359,8 @@@
description: Shenzhen Elida Technology Co., Ltd.
"^elimo,.*":
description: Elimo Engineering Ltd.
+ "^elpida,.*":
+ description: Elpida Memory, Inc.
"^embest,.*":
description: Shenzhen Embest Technology Co., Ltd.
"^emlid,.*":
@@@ -401,8 -397,6 +403,8 @@@
description: Exar Corporation
"^excito,.*":
description: Excito
+ "^exegin,.*":
+ description: Exegin Technologies Limited
"^ezchip,.*":
description: EZchip Semiconductor
"^facebook,.*":
@@@ -517,8 -511,6 +519,8 @@@
description: Hycon Technology Corp.
"^hydis,.*":
description: Hydis Technologies
+ "^hynix,.*":
+ description: SK Hynix Inc.
"^hyundai,.*":
description: Hyundai Technology
"^i2se,.*":
@@@ -587,8 -579,6 +589,8 @@@
description: JEDEC Solid State Technology Association
"^jesurun,.*":
description: Shenzhen Jesurun Electronics Business Dept.
+ "^jethome,.*":
+ description: JetHome (IP Sokolov P.A.)
"^jianda,.*":
description: Jiandangjing Technology Co., Ltd.
"^kam,.*":
@@@ -1114,8 -1104,6 +1116,8 @@@
description: SparkFun Electronics
"^sprd,.*":
description: Spreadtrum Communications Inc.
+ "^ssi,.*":
+ description: SSI Computer Corp
"^sst,.*":
description: Silicon Storage Technology, Inc.
"^sstar,.*":
diff --combined MAINTAINERS
index d2d4b2c33a06,975086c5345d..bcf838a430f4
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -334,7 -334,7 +334,7 @@@ F: drivers/platform/x86/acer-wmi.
ACPI
M: "Rafael J. Wysocki" <rafael(a)kernel.org>
-M: Len Brown <lenb(a)kernel.org>
+R: Len Brown <lenb(a)kernel.org>
L: linux-acpi(a)vger.kernel.org
S: Supported
W: https://01.org/linux-acpi
@@@ -355,7 -355,7 +355,7 @@@ F: tools/power/acpi
ACPI APEI
M: "Rafael J. Wysocki" <rafael(a)kernel.org>
-M: Len Brown <lenb(a)kernel.org>
+R: Len Brown <lenb(a)kernel.org>
R: James Morse <james.morse(a)arm.com>
R: Tony Luck <tony.luck(a)intel.com>
R: Borislav Petkov <bp(a)alien8.de>
@@@ -378,6 -378,14 +378,6 @@@ F: drivers/acpi/acpica
F: include/acpi/
F: tools/power/acpi/
-ACPI FAN DRIVER
-M: Zhang Rui <rui.zhang(a)intel.com>
-L: linux-acpi(a)vger.kernel.org
-S: Supported
-W: https://01.org/linux-acpi
-B: https://bugzilla.kernel.org
-F: drivers/acpi/fan.c
-
ACPI FOR ARM64 (ACPI/arm64)
M: Lorenzo Pieralisi <lorenzo.pieralisi(a)arm.com>
M: Hanjun Guo <guohanjun(a)huawei.com>
@@@ -414,6 -422,14 +414,6 @@@ W: https://01.org/linux-acp
B: https://bugzilla.kernel.org
F: drivers/acpi/*thermal*
-ACPI VIDEO DRIVER
-M: Zhang Rui <rui.zhang(a)intel.com>
-L: linux-acpi(a)vger.kernel.org
-S: Supported
-W: https://01.org/linux-acpi
-B: https://bugzilla.kernel.org
-F: drivers/acpi/acpi_video.c
-
ACPI VIOT DRIVER
M: Jean-Philippe Brucker <jean-philippe(a)linaro.org>
L: linux-acpi(a)vger.kernel.org
@@@ -1266,13 -1282,6 +1266,13 @@@ S: Maintaine
F: Documentation/devicetree/bindings/iommu/apple,dart.yaml
F: drivers/iommu/apple-dart.c
+APPLE PCIE CONTROLLER DRIVER
+M: Alyssa Rosenzweig <alyssa(a)rosenzweig.io>
+M: Marc Zyngier <maz(a)kernel.org>
+L: linux-pci(a)vger.kernel.org
+S: Maintained
+F: drivers/pci/controller/pcie-apple.c
+
APPLE SMC DRIVER
M: Henrik Rydberg <rydberg(a)bitmath.org>
L: linux-hwmon(a)vger.kernel.org
@@@ -2303,14 -2312,6 +2303,14 @@@ F: arch/arm/boot/dts/nuvoton-wpcm450
F: arch/arm/mach-npcm/wpcm450.c
F: drivers/*/*wpcm*
+ARM/NXP S32G ARCHITECTURE
+M: Chester Lin <clin(a)suse.com>
+R: Andreas F��rber <afaerber(a)suse.de>
+R: Matthias Brugger <mbrugger(a)suse.com>
+L: linux-arm-kernel(a)lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm64/boot/dts/freescale/s32g*.dts*
+
ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
L: openmoko-kernel(a)lists.openmoko.org (subscribers-only)
S: Orphan
@@@ -2898,6 -2899,12 +2898,12 @@@ S: Maintaine
F: Documentation/hwmon/asc7621.rst
F: drivers/hwmon/asc7621.c
+ ASIX AX88796C SPI ETHERNET ADAPTER
+ M: ��ukasz Stelmach <l.stelmach(a)samsung.com>
+ S: Maintained
+ F: Documentation/devicetree/bindings/net/asix,ax88796c.yaml
+ F: drivers/net/ethernet/asix/ax88796c_*
+
ASPEED PINCTRL DRIVERS
M: Andrew Jeffery <andrew(a)aj.id.au>
L: linux-aspeed(a)lists.ozlabs.org (moderated for non-subscribers)
@@@ -4435,7 -4442,7 +4441,7 @@@ N: cros_e
N: cros-ec
CHRONTEL CH7322 CEC DRIVER
-M: Jeff Chase <jnchase(a)google.com>
+M: Joe Tessler <jrt(a)google.com>
L: linux-media(a)vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
@@@ -5457,19 -5464,6 +5463,19 @@@ F: include/net/devlink.
F: include/uapi/linux/devlink.h
F: net/core/devlink.c
+DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT
+M: Christoph Niedermaier <cniedermaier(a)dh-electronics.com>
+L: kernel(a)dh-electronics.com
+S: Maintained
+F: arch/arm/boot/dts/imx6*-dhcom-*
+
+DH ELECTRONICS STM32MP1 DHCOM/DHCOR BOARD SUPPORT
+M: Marek Vasut <marex(a)denx.de>
+L: kernel(a)dh-electronics.com
+S: Maintained
+F: arch/arm/boot/dts/stm32mp1*-dhcom-*
+F: arch/arm/boot/dts/stm32mp1*-dhcor-*
+
DIALOG SEMICONDUCTOR DRIVERS
M: Support Opensource <support.opensource(a)diasemi.com>
S: Supported
@@@ -6304,7 -6298,6 +6310,7 @@@ L: linux-tegra(a)vger.kernel.or
S: Supported
T: git git://anongit.freedesktop.org/tegra/linux.git
F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
+F: Documentation/devicetree/bindings/gpu/host1x/
F: drivers/gpu/drm/tegra/
F: drivers/gpu/host1x/
F: include/linux/host1x.h
@@@ -7025,7 -7018,6 +7031,6 @@@ F: drivers/net/mdio/fwnode_mdio.
F: drivers/net/mdio/of_mdio.c
F: drivers/net/pcs/
F: drivers/net/phy/
- F: drivers/of/of_net.c
F: include/dt-bindings/net/qca-ar803x.h
F: include/linux/*mdio*.h
F: include/linux/mdio/*.h
@@@ -7037,6 -7029,7 +7042,7 @@@ F: include/linux/platform_data/mdio-gpi
F: include/trace/events/mdio.h
F: include/uapi/linux/mdio.h
F: include/uapi/linux/mii.h
+ F: net/core/of_net.c
EXFAT FILE SYSTEM
M: Namjae Jeon <linkinjeon(a)kernel.org>
@@@ -8211,7 -8204,7 +8217,7 @@@ T: git git://linuxtv.org/anttip/media_t
F: drivers/media/usb/hackrf/
HANTRO VPU CODEC DRIVER
-M: Ezequiel Garcia <ezequiel(a)collabora.com>
+M: Ezequiel Garcia <ezequiel(a)vanguardiasur.com.ar>
M: Philipp Zabel <p.zabel(a)pengutronix.de>
L: linux-media(a)vger.kernel.org
L: linux-rockchip(a)lists.infradead.org
@@@ -8679,12 -8672,6 +8685,12 @@@ S: Maintaine
T: git git://linuxtv.org/media_tree.git
F: drivers/media/i2c/hi556.c
+HYNIX HI846 SENSOR DRIVER
+M: Martin Kepplinger <martin.kepplinger(a)puri.sm>
+L: linux-media(a)vger.kernel.org
+S: Maintained
+F: drivers/media/i2c/hi846.c
+
Hyper-V/Azure CORE AND DRIVERS
M: "K. Y. Srinivasan" <kys(a)microsoft.com>
M: Haiyang Zhang <haiyangz(a)microsoft.com>
@@@ -10020,7 -10007,6 +10026,7 @@@ JC42.4 TEMPERATURE SENSOR DRIVE
M: Guenter Roeck <linux(a)roeck-us.net>
L: linux-hwmon(a)vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/hwmon/jedec,jc42.yaml
F: Documentation/hwmon/jc42.rst
F: drivers/hwmon/jc42.c
@@@ -10060,7 -10046,6 +10066,7 @@@ F: include/linux/jbd2.
JPU V4L2 MEM2MEM DRIVER FOR RENESAS
M: Mikhail Ulyanov <mikhail.ulyanov(a)cogentembedded.com>
L: linux-media(a)vger.kernel.org
+L: linux-renesas-soc(a)vger.kernel.org
S: Maintained
F: drivers/media/platform/rcar_jpu.c
@@@ -10915,7 -10900,7 +10921,7 @@@ LM90 HARDWARE MONITOR DRIVE
M: Jean Delvare <jdelvare(a)suse.com>
L: linux-hwmon(a)vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/hwmon/lm90.txt
+F: Documentation/devicetree/bindings/hwmon/national,lm90.yaml
F: Documentation/hwmon/lm90.rst
F: drivers/hwmon/lm90.c
F: include/dt-bindings/thermal/lm90.h
@@@ -11299,6 -11284,7 +11305,6 @@@ F: Documentation/networking/device_driv
F: drivers/net/ethernet/marvell/octeontx2/af/
MARVELL PRESTERA ETHERNET SWITCH DRIVER
-M: Vadym Kochan <vkochan(a)marvell.com>
M: Taras Chornyi <tchornyi(a)marvell.com>
S: Supported
W: https://github.com/Marvell-switching/switchdev-prestera
@@@ -11699,7 -11685,6 +11705,7 @@@ T: git git://linuxtv.org/media_tree.gi
F: Documentation/devicetree/bindings/media/renesas,csi2.yaml
F: Documentation/devicetree/bindings/media/renesas,isp.yaml
F: Documentation/devicetree/bindings/media/renesas,vin.yaml
+F: drivers/media/platform/rcar-isp.c
F: drivers/media/platform/rcar-vin/
MEDIA DRIVERS FOR RENESAS - VSP1
@@@ -11845,7 -11830,9 +11851,9 @@@ F: drivers/mmc/host/mtk-sd.
MEDIATEK MT76 WIRELESS LAN DRIVER
M: Felix Fietkau <nbd(a)nbd.name>
M: Lorenzo Bianconi <lorenzo.bianconi83(a)gmail.com>
- R: Ryder Lee <ryder.lee(a)mediatek.com>
+ M: Ryder Lee <ryder.lee(a)mediatek.com>
+ R: Shayne Chen <shayne.chen(a)mediatek.com>
+ R: Sean Wang <sean.wang(a)mediatek.com>
L: linux-wireless(a)vger.kernel.org
S: Maintained
F: drivers/net/wireless/mediatek/mt76/
@@@ -11869,12 -11856,6 +11877,12 @@@ S: Maintaine
F: Documentation/devicetree/bindings/i2c/i2c-mt7621.txt
F: drivers/i2c/busses/i2c-mt7621.c
+MEDIATEK MT7621 PCIE CONTROLLER DRIVER
+M: Sergio Paracuellos <sergio.paracuellos(a)gmail.com>
+S: Maintained
+F: Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml
+F: drivers/pci/controller/pcie-mt7621.c
+
MEDIATEK MT7621 PHY PCI DRIVER
M: Sergio Paracuellos <sergio.paracuellos(a)gmail.com>
S: Maintained
@@@ -11898,14 -11879,6 +11906,14 @@@ M: Sean Wang <sean.wang(a)mediatek.com
S: Maintained
F: drivers/char/hw_random/mtk-rng.c
+MEDIATEK SMI DRIVER
+M: Yong Wu <yong.wu(a)mediatek.com>
+L: linux-mediatek(a)lists.infradead.org (moderated for non-subscribers)
+S: Supported
+F: Documentation/devicetree/bindings/memory-controllers/mediatek,smi*
+F: drivers/memory/mtk-smi.c
+F: include/soc/mediatek/smi.h
+
MEDIATEK SWITCH DRIVER
M: Sean Wang <sean.wang(a)mediatek.com>
M: Landen Chao <Landen.Chao(a)mediatek.com>
@@@ -12789,7 -12762,6 +12797,7 @@@ M: Laurent Pinchart <laurent.pinchart@i
L: linux-media(a)vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
+F: Documentation/devicetree/bindings/media/i2c/aptina,mt9p031.yaml
F: drivers/media/i2c/mt9p031.c
F: include/media/i2c/mt9p031.h
@@@ -13084,6 -13056,7 +13092,7 @@@ F: include/linux/dsa
F: include/linux/platform_data/dsa.h
F: include/net/dsa.h
F: net/dsa/
+ F: tools/testing/selftests/drivers/net/dsa/
NETWORKING [GENERAL]
M: "David S. Miller" <davem(a)davemloft.net>
@@@ -13842,13 -13815,6 +13851,13 @@@ S: Maintaine
T: git git://linuxtv.org/media_tree.git
F: drivers/media/i2c/ov13858.c
+OMNIVISION OV13B10 SENSOR DRIVER
+M: Arec Kao <arec.kao(a)intel.com>
+L: linux-media(a)vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: drivers/media/i2c/ov13b10.c
+
OMNIVISION OV2680 SENSOR DRIVER
M: Rui Miguel Silva <rmfrfs(a)gmail.com>
L: linux-media(a)vger.kernel.org
@@@ -14645,15 -14611,7 +14654,15 @@@ M: Stanimir Varbanov <svarbanov@mm-sol.
L: linux-pci(a)vger.kernel.org
L: linux-arm-msm(a)vger.kernel.org
S: Maintained
-F: drivers/pci/controller/dwc/*qcom*
+F: drivers/pci/controller/dwc/pcie-qcom.c
+
+PCIE ENDPOINT DRIVER FOR QUALCOMM
+M: Manivannan Sadhasivam <manivannan.sadhasivam(a)linaro.org>
+L: linux-pci(a)vger.kernel.org
+L: linux-arm-msm(a)vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
+F: drivers/pci/controller/dwc/pcie-qcom-ep.c
PCIE DRIVER FOR ROCKCHIP
M: Shawn Lin <shawn.lin(a)rock-chips.com>
@@@ -14910,6 -14868,13 +14919,6 @@@ L: linux-omap(a)vger.kernel.or
S: Maintained
F: drivers/pinctrl/pinctrl-single.c
-PIN CONTROLLER - ST SPEAR
-M: Viresh Kumar <vireshk(a)kernel.org>
-L: linux-arm-kernel(a)lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-W: http://www.st.com/spear
-F: drivers/pinctrl/spear/
-
PKTCDVD DRIVER
M: linux-block(a)vger.kernel.org
S: Orphan
@@@ -14958,6 -14923,7 +14967,6 @@@ S: Maintaine
W: http://hwmon.wiki.kernel.org/
W: http://www.roeck-us.net/linux/drivers/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
-F: Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
F: Documentation/devicetree/bindings/hwmon/ltc2978.txt
F: Documentation/devicetree/bindings/hwmon/max31785.txt
F: Documentation/hwmon/adm1275.rst
@@@ -15506,6 -15472,7 +15515,7 @@@ M: ath9k-devel(a)qca.qualcomm.co
L: linux-wireless(a)vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
+ F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
F: drivers/net/wireless/ath/ath9k/
QUALCOMM CAMERA SUBSYSTEM DRIVER
@@@ -15927,6 -15894,12 +15937,12 @@@ L: linux-wireless(a)vger.kernel.or
S: Maintained
F: drivers/net/wireless/realtek/rtw88/
+ REALTEK WIRELESS DRIVER (rtw89)
+ M: Ping-Ke Shih <pkshih(a)realtek.com>
+ L: linux-wireless(a)vger.kernel.org
+ S: Maintained
+ F: drivers/net/wireless/realtek/rtw89/
+
REDPINE WIRELESS DRIVER
M: Amitkumar Karwar <amitkarwar(a)gmail.com>
M: Siva Rebbagondla <siva8118(a)gmail.com>
@@@ -16161,7 -16134,7 +16177,7 @@@ F: include/uapi/linux/rkisp1-config.
ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER
M: Jacob Chen <jacob-chen(a)iotwrt.com>
-M: Ezequiel Garcia <ezequiel(a)collabora.com>
+M: Ezequiel Garcia <ezequiel(a)vanguardiasur.com.ar>
L: linux-media(a)vger.kernel.org
L: linux-rockchip(a)lists.infradead.org
S: Maintained
@@@ -16169,7 -16142,7 +16185,7 @@@ F: Documentation/devicetree/bindings/me
F: drivers/media/platform/rockchip/rga/
ROCKCHIP VIDEO DECODER DRIVER
-M: Ezequiel Garcia <ezequiel(a)collabora.com>
+M: Ezequiel Garcia <ezequiel(a)vanguardiasur.com.ar>
L: linux-media(a)vger.kernel.org
L: linux-rockchip(a)lists.infradead.org
S: Maintained
@@@ -17674,17 -17647,21 +17690,17 @@@ W: https://github.com/linux-speakup/spe
B: https://github.com/linux-speakup/speakup/issues
F: drivers/accessibility/speakup/
-SPEAR CLOCK FRAMEWORK SUPPORT
-M: Viresh Kumar <vireshk(a)kernel.org>
-L: linux-arm-kernel(a)lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-W: http://www.st.com/spear
-F: drivers/clk/spear/
-
-SPEAR PLATFORM SUPPORT
+SPEAR PLATFORM/CLOCK/PINCTRL SUPPORT
M: Viresh Kumar <vireshk(a)kernel.org>
M: Shiraz Hashim <shiraz.linux.kernel(a)gmail.com>
+M: soc(a)kernel.org
L: linux-arm-kernel(a)lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: http://www.st.com/spear
F: arch/arm/boot/dts/spear*
F: arch/arm/mach-spear/
+F: drivers/clk/spear/
+F: drivers/pinctrl/spear/
SPI NOR SUBSYSTEM
M: Tudor Ambarus <tudor.ambarus(a)microchip.com>
@@@ -18601,9 -18578,7 +18617,9 @@@ L: linux-pm(a)vger.kernel.or
S: Supported
Q: https://patchwork.kernel.org/project/linux-pm/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git thermal
+F: Documentation/ABI/testing/sysfs-class-thermal
F: Documentation/devicetree/bindings/thermal/
+F: Documentation/driver-api/thermal/
F: drivers/thermal/
F: include/linux/cpu_cooling.h
F: include/linux/thermal.h
@@@ -20377,7 -20352,6 +20393,7 @@@ X86 ARCHITECTURE (32-BIT AND 64-BIT
M: Thomas Gleixner <tglx(a)linutronix.de>
M: Ingo Molnar <mingo(a)redhat.com>
M: Borislav Petkov <bp(a)alien8.de>
+M: Dave Hansen <dave.hansen(a)linux.intel.com>
M: x86(a)kernel.org
R: "H. Peter Anvin" <hpa(a)zytor.com>
L: linux-kernel(a)vger.kernel.org
diff --combined drivers/infiniband/hw/mlx4/main.c
index 38742e233915,f3fa2fe6a88a..ceca05982f61
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@@ -2105,10 -2105,10 +2105,10 @@@ mlx4_ib_alloc_hw_device_stats(struct ib
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[0].name)
+ if (!diag[0].descs)
return NULL;
- return rdma_alloc_hw_stats_struct(diag[0].name, diag[0].num_counters,
+ return rdma_alloc_hw_stats_struct(diag[0].descs, diag[0].num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@@ -2118,10 -2118,10 +2118,10 @@@ mlx4_ib_alloc_hw_port_stats(struct ib_d
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[1].name)
+ if (!diag[1].descs)
return NULL;
- return rdma_alloc_hw_stats_struct(diag[1].name, diag[1].num_counters,
+ return rdma_alloc_hw_stats_struct(diag[1].descs, diag[1].num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@@ -2151,8 -2151,10 +2151,8 @@@ static int mlx4_ib_get_hw_stats(struct
}
static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
- const char ***name,
- u32 **offset,
- u32 *num,
- bool port)
+ struct rdma_stat_desc **pdescs,
+ u32 **offset, u32 *num, bool port)
{
u32 num_counters;
@@@ -2164,46 -2166,46 +2164,46 @@@
if (!port)
num_counters += ARRAY_SIZE(diag_device_only);
- *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
- if (!*name)
+ *pdescs = kcalloc(num_counters, sizeof(struct rdma_stat_desc),
+ GFP_KERNEL);
+ if (!*pdescs)
return -ENOMEM;
*offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
if (!*offset)
- goto err_name;
+ goto err;
*num = num_counters;
return 0;
-err_name:
- kfree(*name);
+err:
+ kfree(*pdescs);
return -ENOMEM;
}
static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
- const char **name,
- u32 *offset,
- bool port)
+ struct rdma_stat_desc *descs,
+ u32 *offset, bool port)
{
int i;
int j;
for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
- name[i] = diag_basic[i].name;
+ descs[i].name = diag_basic[i].name;
offset[i] = diag_basic[i].offset;
}
if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
- name[j] = diag_ext[i].name;
+ descs[j].name = diag_ext[i].name;
offset[j] = diag_ext[i].offset;
}
}
if (!port) {
for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
- name[j] = diag_device_only[i].name;
+ descs[j].name = diag_device_only[i].name;
offset[j] = diag_device_only[i].offset;
}
}
@@@ -2231,13 -2233,13 +2231,13 @@@ static int mlx4_ib_alloc_diag_counters(
if (i && !per_port)
continue;
- ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
+ ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs,
&diag[i].offset,
&diag[i].num_counters, i);
if (ret)
goto err_alloc;
- mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
+ mlx4_ib_fill_diag_counters(ibdev, diag[i].descs,
diag[i].offset, i);
}
@@@ -2247,7 -2249,7 +2247,7 @@@
err_alloc:
if (i) {
- kfree(diag[i - 1].name);
+ kfree(diag[i - 1].descs);
kfree(diag[i - 1].offset);
}
@@@ -2260,7 -2262,7 +2260,7 @@@ static void mlx4_ib_diag_cleanup(struc
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
kfree(ibdev->diag_counters[i].offset);
- kfree(ibdev->diag_counters[i].name);
+ kfree(ibdev->diag_counters[i].descs);
}
}
@@@ -2273,7 -2275,7 +2273,7 @@@ static void mlx4_ib_update_qps(struct m
u64 release_mac = MLX4_IB_INVALID_MAC;
struct mlx4_ib_qp *qp;
- new_smac = mlx4_mac_to_u64(dev->dev_addr);
+ new_smac = ether_addr_to_u64(dev->dev_addr);
atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
/* no need for update QP1 and mac registration in non-SRIOV */
diff --combined drivers/infiniband/hw/mlx4/qp.c
index 3a1a4ac9dd33,aea4182f33a4..b17d6ebc5b70
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@@ -1099,10 -1099,8 +1099,10 @@@ static int create_qp_common(struct ib_p
if (dev->steering_support ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
qp->flags |= MLX4_IB_QP_NETIF;
- else
+ else {
+ err = -EINVAL;
goto err;
+ }
}
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
@@@ -1855,7 -1853,7 +1855,7 @@@ static int mlx4_set_path(struct mlx4_ib
u16 vlan_id, u8 *smac)
{
return _mlx4_set_path(dev, &qp->ah_attr,
- mlx4_mac_to_u64(smac),
+ ether_addr_to_u64(smac),
vlan_id,
path, &mqp->pri, port);
}
diff --combined drivers/infiniband/hw/mlx5/odp.c
index 31596d8c5212,81147d774dd2..91eb615b89ee
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@@ -430,7 -430,7 +430,7 @@@ static struct mlx5_ib_mr *implicit_get_
mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
- mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE;
+ mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
mr->parent = imr;
odp->private = mr;
@@@ -500,7 -500,7 +500,7 @@@ struct mlx5_ib_mr *mlx5_ib_alloc_implic
}
imr->ibmr.pd = &pd->ibpd;
- imr->mmkey.iova = 0;
+ imr->ibmr.iova = 0;
imr->umem = &umem_odp->umem;
imr->ibmr.lkey = imr->mmkey.key;
imr->ibmr.rkey = imr->mmkey.key;
@@@ -738,7 -738,7 +738,7 @@@ static int pagefault_mr(struct mlx5_ib_
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- if (unlikely(io_virt < mr->mmkey.iova))
+ if (unlikely(io_virt < mr->ibmr.iova))
return -EFAULT;
if (mr->umem->is_dmabuf)
@@@ -747,7 -747,7 +747,7 @@@
if (!odp->is_implicit_odp) {
u64 user_va;
- if (check_add_overflow(io_virt - mr->mmkey.iova,
+ if (check_add_overflow(io_virt - mr->ibmr.iova,
(u64)odp->umem.address, &user_va))
return -EFAULT;
if (unlikely(user_va >= ib_umem_end(odp) ||
@@@ -788,7 -788,7 +788,7 @@@ struct pf_frame
int depth;
};
-static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
+static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
{
if (!mmkey)
return false;
@@@ -797,6 -797,21 +797,6 @@@
return mmkey->key == key;
}
-static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
-{
- struct mlx5_ib_mw *mw;
- struct mlx5_ib_devx_mr *devx_mr;
-
- if (mmkey->type == MLX5_MKEY_MW) {
- mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
- return mw->ndescs;
- }
-
- devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr,
- mmkey);
- return devx_mr->ndescs;
-}
-
/*
* Handle a single data segment in a page-fault WQE or RDMA region.
*
@@@ -816,11 -831,12 +816,11 @@@ static int pagefault_single_data_segmen
{
int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
struct pf_frame *head = NULL, *frame;
- struct mlx5_core_mkey *mmkey;
+ struct mlx5_ib_mkey *mmkey;
struct mlx5_ib_mr *mr;
struct mlx5_klm *pklm;
u32 *out = NULL;
size_t offset;
- int ndescs;
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
@@@ -869,6 -885,8 +869,6 @@@ next_mr
case MLX5_MKEY_MW:
case MLX5_MKEY_INDIRECT_DEVX:
- ndescs = get_indirect_num_descs(mmkey);
-
if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
mlx5_ib_dbg(dev, "indirection level exceeded\n");
ret = -EFAULT;
@@@ -876,7 -894,7 +876,7 @@@
}
outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
- sizeof(*pklm) * (ndescs - 2);
+ sizeof(*pklm) * (mmkey->ndescs - 2);
if (outlen > cur_outlen) {
kfree(out);
@@@ -891,14 -909,14 +891,14 @@@
pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
bsf0_klm0_pas_mtt0_1);
- ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
+ ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen);
if (ret)
goto end;
offset = io_virt - MLX5_GET64(query_mkey_out, out,
memory_key_mkey_entry.start_addr);
- for (i = 0; bcnt && i < ndescs; i++, pklm++) {
+ for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
if (offset >= be32_to_cpu(pklm->bcount)) {
offset -= be32_to_cpu(pklm->bcount);
continue;
@@@ -1541,6 -1559,7 +1541,7 @@@ int mlx5r_odp_create_eq(struct mlx5_ib_
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_IB_NUM_PF_EQE,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
@@@ -1685,31 -1704,25 +1686,31 @@@ get_prefetchable_mr(struct ib_pd *pd, e
u32 lkey)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr = NULL;
+ struct mlx5_ib_mkey *mmkey;
xa_lock(&dev->odp_mkeys);
mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
- if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
+ if (!mmkey || mmkey->key != lkey) {
+ mr = ERR_PTR(-ENOENT);
goto end;
+ }
+ if (mmkey->type != MLX5_MKEY_MR) {
+ mr = ERR_PTR(-EINVAL);
+ goto end;
+ }
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
if (mr->ibmr.pd != pd) {
- mr = NULL;
+ mr = ERR_PTR(-EPERM);
goto end;
}
/* prefetch with write-access must be supported by the MR */
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
!mr->umem->writable) {
- mr = NULL;
+ mr = ERR_PTR(-EPERM);
goto end;
}
@@@ -1741,7 -1754,7 +1742,7 @@@ static void mlx5_ib_prefetch_mr_work(st
destroy_prefetch_work(work);
}
-static bool init_prefetch_work(struct ib_pd *pd,
+static int init_prefetch_work(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 pf_flags, struct prefetch_mr_work *work,
struct ib_sge *sg_list, u32 num_sge)
@@@ -1752,19 -1765,17 +1753,19 @@@
work->pf_flags = pf_flags;
for (i = 0; i < num_sge; ++i) {
- work->frags[i].io_virt = sg_list[i].addr;
- work->frags[i].length = sg_list[i].length;
- work->frags[i].mr =
- get_prefetchable_mr(pd, advice, sg_list[i].lkey);
- if (!work->frags[i].mr) {
+ struct mlx5_ib_mr *mr;
+
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (IS_ERR(mr)) {
work->num_sge = i;
- return false;
+ return PTR_ERR(mr);
}
+ work->frags[i].io_virt = sg_list[i].addr;
+ work->frags[i].length = sg_list[i].length;
+ work->frags[i].mr = mr;
}
work->num_sge = num_sge;
- return true;
+ return 0;
}
static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
@@@ -1780,8 -1791,8 +1781,8 @@@
struct mlx5_ib_mr *mr;
mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
- if (!mr)
- return -ENOENT;
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
&bytes_mapped, pf_flags);
if (ret < 0) {
@@@ -1801,7 -1812,6 +1802,7 @@@ int mlx5_ib_advise_mr_prefetch(struct i
{
u32 pf_flags = 0;
struct prefetch_mr_work *work;
+ int rc;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
@@@ -1817,10 -1827,9 +1818,10 @@@
if (!work)
return -ENOMEM;
- if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
+ rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
+ if (rc) {
destroy_prefetch_work(work);
- return -EINVAL;
+ return rc;
}
queue_work(system_unbound_wq, &work->work);
return 0;
diff --combined drivers/net/ethernet/chelsio/cxgb3/common.h
index d115ec93296d,a309016f7f8c..ecd025dda8d6
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@@ -676,6 -676,8 +676,6 @@@ void t3_link_changed(struct adapter *ad
void t3_link_fault(struct adapter *adapter, int port_id);
int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
-int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data);
-int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data);
int t3_seeprom_wp(struct adapter *adapter, int enable);
int t3_get_tp_version(struct adapter *adapter, u32 *vers);
int t3_check_tpsram_version(struct adapter *adapter);
@@@ -708,7 -710,7 +708,7 @@@ int t3_mac_enable(struct cmac *mac, in
int t3_mac_disable(struct cmac *mac, int which);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
- int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
+ int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, int n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
diff --combined drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index e185f5f24849,9cf9e33664e4..bfffcaeee624
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@@ -2036,16 -2036,20 +2036,16 @@@ static int get_eeprom(struct net_devic
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- int i, err = 0;
-
- u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ int cnt;
e->magic = EEPROM_MAGIC;
- for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
- err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
+ cnt = pci_read_vpd(adapter->pdev, e->offset, e->len, data);
+ if (cnt < 0)
+ return cnt;
- if (!err)
- memcpy(data, buf + e->offset, e->len);
- kfree(buf);
- return err;
+ e->len = cnt;
+
+ return 0;
}
static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
@@@ -2054,6 -2058,7 +2054,6 @@@
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
u32 aligned_offset, aligned_len;
- __le32 *p;
u8 *buf;
int err;
@@@ -2067,9 -2072,12 +2067,9 @@@
buf = kmalloc(aligned_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
- if (!err && aligned_len > 4)
- err = t3_seeprom_read(adapter,
- aligned_offset + aligned_len - 4,
- (__le32 *) & buf[aligned_len - 4]);
- if (err)
+ err = pci_read_vpd(adapter->pdev, aligned_offset, aligned_len,
+ buf);
+ if (err < 0)
goto out;
memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
} else
@@@ -2079,13 -2087,17 +2079,13 @@@
if (err)
goto out;
- for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
- err = t3_seeprom_write(adapter, aligned_offset, *p);
- aligned_offset += 4;
- }
-
- if (!err)
+ err = pci_write_vpd(adapter->pdev, aligned_offset, aligned_len, buf);
+ if (err >= 0)
err = t3_seeprom_wp(adapter, 1);
out:
if (buf != data)
kfree(buf);
- return err;
+ return err < 0 ? err : 0;
}
static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@@ -2574,7 -2586,7 +2574,7 @@@ static int cxgb_set_mac_addr(struct net
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
if (offload_running(adapter))
write_smt_entry(adapter, pi->port_id);
diff --combined drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index cb85c2f21525,53feac8da503..da41eee2f25c
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@@ -29,6 -29,7 +29,7 @@@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+ #include <linux/etherdevice.h>
#include "common.h"
#include "regs.h"
#include "sge_defs.h"
@@@ -595,9 -596,80 +596,9 @@@ struct t3_vpd
u32 pad; /* for multiple-of-4 sizing and alignment */
};
-#define EEPROM_MAX_POLL 40
#define EEPROM_STAT_ADDR 0x4000
#define VPD_BASE 0xc00
-/**
- * t3_seeprom_read - read a VPD EEPROM location
- * @adapter: adapter to read
- * @addr: EEPROM address
- * @data: where to store the read data
- *
- * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
- * VPD ROM capability. A zero is written to the flag bit when the
- * address is written to the control register. The hardware device will
- * set the flag to 1 when 4 bytes have been read into the data register.
- */
-int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
-{
- u16 val;
- int attempts = EEPROM_MAX_POLL;
- u32 v;
- unsigned int base = adapter->params.pci.vpd_cap_addr;
-
- if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
- return -EINVAL;
-
- pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
- do {
- udelay(10);
- pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
- } while (!(val & PCI_VPD_ADDR_F) && --attempts);
-
- if (!(val & PCI_VPD_ADDR_F)) {
- CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
- return -EIO;
- }
- pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
- *data = cpu_to_le32(v);
- return 0;
-}
-
-/**
- * t3_seeprom_write - write a VPD EEPROM location
- * @adapter: adapter to write
- * @addr: EEPROM address
- * @data: value to write
- *
- * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
- * VPD ROM capability.
- */
-int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
-{
- u16 val;
- int attempts = EEPROM_MAX_POLL;
- unsigned int base = adapter->params.pci.vpd_cap_addr;
-
- if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
- return -EINVAL;
-
- pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
- le32_to_cpu(data));
- pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
- addr | PCI_VPD_ADDR_F);
- do {
- msleep(1);
- pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
- } while ((val & PCI_VPD_ADDR_F) && --attempts);
-
- if (val & PCI_VPD_ADDR_F) {
- CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
- return -EIO;
- }
- return 0;
-}
-
/**
* t3_seeprom_wp - enable/disable EEPROM write protection
* @adapter: the adapter
@@@ -607,14 -679,7 +608,14 @@@
*/
int t3_seeprom_wp(struct adapter *adapter, int enable)
{
- return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
+ u32 data = enable ? 0xc : 0;
+ int ret;
+
+ /* EEPROM_STAT_ADDR is outside VPD area, use pci_write_vpd_any() */
+ ret = pci_write_vpd_any(adapter->pdev, EEPROM_STAT_ADDR, sizeof(u32),
+ &data);
+
+ return ret < 0 ? ret : 0;
}
static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val)
@@@ -644,22 -709,24 +645,22 @@@ static int vpdstrtou16(char *s, u8 len
*/
static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- int i, addr, ret;
struct t3_vpd vpd;
+ u8 base_val = 0;
+ int addr, ret;
/*
* Card information is normally at VPD_BASE but some early cards had
* it at 0.
*/
- ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
- if (ret)
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
+ if (ret < 0)
return ret;
- addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
+ addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : 0;
- for (i = 0; i < sizeof(vpd); i += 4) {
- ret = t3_seeprom_read(adapter, addr + i,
- (__le32 *)((u8 *)&vpd + i));
- if (ret)
- return ret;
- }
+ ret = pci_read_vpd(adapter->pdev, addr, sizeof(vpd), &vpd);
+ if (ret < 0)
+ return ret;
ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
if (ret)
@@@ -3692,8 -3759,7 +3693,7 @@@ int t3_prep_adapter(struct adapter *ada
memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
- memcpy(adapter->port[i]->dev_addr, hw_addr,
- ETH_ALEN);
+ eth_hw_addr_set(adapter->port[i], hw_addr);
init_link_config(&p->link_config, p->phy.caps);
p->phy.ops->power_down(&p->phy, 1);
diff --combined drivers/net/ethernet/intel/ice/ice_ptp.c
index d1ef3d48a4b0,a1be0d04a2d0..bf7247c6f58e
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@@ -6,6 -6,252 +6,252 @@@
#define E810_OUT_PROP_DELAY_NS 1
+ static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
+ /* name idx func chan */
+ { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
+ { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
+ { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
+ { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
+ { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
+ };
+
+ /**
+ * ice_get_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Read the configuration of the SMA control logic and put it into the
+ * ptp_pin_desc structure
+ */
+ static int
+ ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
+ {
+ u8 data, i;
+ int status;
+
+ /* Read initial pin state */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* initialize with defaults */
+ for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
+ snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
+ "%s", ice_pin_desc_e810t[i].name);
+ ptp_pins[i].index = ice_pin_desc_e810t[i].index;
+ ptp_pins[i].func = ice_pin_desc_e810t[i].func;
+ ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
+ }
+
+ /* Parse SMA1/UFL1 */
+ switch (data & ICE_SMA1_MASK_E810T) {
+ case ICE_SMA1_MASK_E810T:
+ default:
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_DIR_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_PEROUT;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_TX_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case 0:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_PEROUT;
+ break;
+ }
+
+ /* Parse SMA2/UFL2 */
+ switch (data & ICE_SMA2_MASK_E810T) {
+ case ICE_SMA2_MASK_E810T:
+ default:
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_EXTTS;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ case ICE_SMA2_DIR_EN_E810T:
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ }
+
+ return 0;
+ }
+
+ /**
+ * ice_ptp_set_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Set the configuration of the SMA control logic based on the configuration in
+ * num_pins parameter
+ */
+ static int
+ ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
+ const struct ptp_pin_desc *ptp_pins)
+ {
+ int status;
+ u8 data;
+
+ /* SMA1 and UFL1 cannot be set to TX at the same time */
+ if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT)
+ return -EINVAL;
+
+ /* SMA2 and UFL2 cannot be set to RX at the same time */
+ if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS)
+ return -EINVAL;
+
+ /* Read initial pin state value */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* Set the right sate based on the desired configuration */
+ data &= ~ICE_SMA1_MASK_E810T;
+ if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
+ data |= ICE_SMA1_MASK_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX");
+ data |= ICE_SMA1_TX_EN_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ /* U.FL 1 TX will always enable SMA 1 RX */
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 TX");
+ data |= ICE_SMA1_DIR_EN_E810T;
+ }
+
+ data &= ~ICE_SMA2_MASK_E810T;
+ if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
+ data |= ICE_SMA2_MASK_E810T;
+ } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 RX");
+ data |= (ICE_SMA2_TX_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "UFL2 RX");
+ data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX");
+ data |= (ICE_SMA2_DIR_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
+ data |= ICE_SMA2_DIR_EN_E810T;
+ }
+
+ return ice_write_sma_ctrl_e810t(hw, data);
+ }
+
+ /**
+ * ice_ptp_set_sma_e810t
+ * @info: the driver's PTP info structure
+ * @pin: pin index in kernel structure
+ * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
+ *
+ * Set the configuration of a single SMA pin
+ */
+ static int
+ ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func)
+ {
+ struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (pin < SMA1 || func > PTP_PF_PEROUT)
+ return -EOPNOTSUPP;
+
+ err = ice_get_sma_config_e810t(hw, ptp_pins);
+ if (err)
+ return err;
+
+ /* Disable the same function on the other pin sharing the channel */
+ if (pin == SMA1 && ptp_pins[UFL1].func == func)
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ if (pin == UFL1 && ptp_pins[SMA1].func == func)
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+
+ if (pin == SMA2 && ptp_pins[UFL2].func == func)
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ if (pin == UFL2 && ptp_pins[SMA2].func == func)
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+
+ /* Set up new pin function in the temp table */
+ ptp_pins[pin].func = func;
+
+ return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
+ }
+
+ /**
+ * ice_verify_pin_e810t
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Verify if pin supports requested pin function. If the Check pins consistency.
+ * Reconfigure the SMA logic attached to the given pin to enable its
+ * desired functionality
+ */
+ static int
+ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+ {
+ /* Don't allow channel reassignment */
+ if (chan != ice_pin_desc_e810t[pin].chan)
+ return -EOPNOTSUPP;
+
+ /* Check if functions are properly assigned */
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ if (pin == UFL1)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PEROUT:
+ if (pin == UFL2 || pin == GNSS)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PHYSYNC:
+ return -EOPNOTSUPP;
+ }
+
+ return ice_ptp_set_sma_e810t(info, pin, func);
+ }
+
/**
* ice_set_tx_tstamp - Enable or disable Tx timestamping
* @pf: The PF pointer to search in
@@@ -735,17 -981,34 +981,34 @@@ ice_ptp_gpio_enable_e810(struct ptp_clo
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct ice_perout_channel clk_cfg = {0};
+ bool sma_pres = false;
unsigned int chan;
u32 gpio_pin;
int err;
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
+ sma_pres = true;
+
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
chan = rq->perout.index;
- if (chan == PPS_CLK_GEN_CHAN)
+ if (sma_pres) {
+ if (chan == ice_pin_desc_e810t[SMA1].chan)
+ clk_cfg.gpio_pin = GPIO_20;
+ else if (chan == ice_pin_desc_e810t[SMA2].chan)
+ clk_cfg.gpio_pin = GPIO_22;
+ else
+ return -1;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ clk_cfg.gpio_pin = GPIO_20;
+ else
+ clk_cfg.gpio_pin = GPIO_22;
+ } else if (chan == PPS_CLK_GEN_CHAN) {
clk_cfg.gpio_pin = PPS_PIN_INDEX;
- else
+ } else {
clk_cfg.gpio_pin = chan;
+ }
clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
rq->perout.period.nsec);
@@@ -757,7 -1020,19 +1020,19 @@@
break;
case PTP_CLK_REQ_EXTTS:
chan = rq->extts.index;
- gpio_pin = chan;
+ if (sma_pres) {
+ if (chan < ice_pin_desc_e810t[SMA2].chan)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else {
+ gpio_pin = chan;
+ }
err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
rq->extts.flags);
@@@ -1012,7 -1287,7 +1287,7 @@@ int ice_ptp_set_ts_config(struct ice_p
* The timestamp is in ns, so we must convert the result first.
*/
void
- ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
u32 ts_high;
@@@ -1037,14 -1312,94 +1312,94 @@@
}
}
+ /**
+ * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Disable the OS access to the SMA pins. Called to clear out the OS
+ * indications of pin support when we fail to setup the E810-T SMA control
+ * register.
+ */
+ static void
+ ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+ {
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
+
+ info->enable = NULL;
+ info->verify = NULL;
+ info->n_pins = 0;
+ info->n_ext_ts = 0;
+ info->n_per_out = 0;
+ }
+
+ /**
+ * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Finish setting up the SMA pins by allocating pin_config, and setting it up
+ * according to the current status of the SMA. On failure, disable all of the
+ * extended SMA pin support.
+ */
+ static void
+ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+ {
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ /* Allocate memory for kernel pins interface */
+ info->pin_config = devm_kcalloc(dev, info->n_pins,
+ sizeof(*info->pin_config), GFP_KERNEL);
+ if (!info->pin_config) {
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+ return;
+ }
+
+ /* Read current SMA status */
+ err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
+ if (err)
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+ }
+
+ /**
+ * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
+ * @pf: pointer to the PF instance
+ * @info: PTP clock capabilities
+ */
+ static void
+ ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+ {
+ /* Check if SMA controller is in the netlist */
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
+ !ice_is_pca9575_present(&pf->hw))
+ ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
+
+ if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
+ info->n_per_out = N_PER_OUT_E810T_NO_SMA;
+ return;
+ }
+
+ info->n_per_out = N_PER_OUT_E810T;
+ info->n_ext_ts = N_EXT_TS_E810;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
+
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
+ }
+
/**
* ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
* @info: PTP clock capabilities
*/
static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
{
- info->n_per_out = E810_N_PER_OUT;
- info->n_ext_ts = E810_N_EXT_TS;
+ info->n_per_out = N_PER_OUT_E810;
+ info->n_ext_ts = N_EXT_TS_E810;
}
/**
@@@ -1062,7 -1417,10 +1417,10 @@@ ice_ptp_set_funcs_e810(struct ice_pf *p
{
info->enable = ice_ptp_gpio_enable_e810;
- ice_ptp_setup_pins_e810(info);
+ if (ice_is_e810t(&pf->hw))
+ ice_ptp_setup_pins_e810t(pf, info);
+ else
+ ice_ptp_setup_pins_e810(info);
}
/**
@@@ -1571,9 -1929,6 +1929,9 @@@ err_kworker
*/
void ice_ptp_release(struct ice_pf *pf)
{
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return;
+
/* Disable timestamping for both Tx and Rx */
ice_ptp_cfg_timestamp(pf, false);
diff --combined drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 02db3148240a,da1bec04efff..eae9aa9c0811
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@@ -745,7 -745,7 +745,7 @@@ static int mlx5_fw_tracer_set_mtrc_conf
MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY);
MLX5_SET(mtrc_conf, in, log_trace_buffer_size,
ilog2(TRACER_BUFFER_PAGE_NUM));
- MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key);
+ MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey);
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_MTRC_CONF, 0, 1);
@@@ -1028,7 -1028,7 +1028,7 @@@ int mlx5_fw_tracer_init(struct mlx5_fw_
err_notifier_unregister:
mlx5_eq_notifier_unregister(dev, &tracer->nb);
- mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
+ mlx5_core_destroy_mkey(dev, tracer->buff.mkey);
err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
err_cancel_work:
@@@ -1051,7 -1051,7 +1051,7 @@@ void mlx5_fw_tracer_cleanup(struct mlx5
if (tracer->owner)
mlx5_fw_tracer_ownership_release(tracer);
- mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey);
+ mlx5_core_destroy_mkey(tracer->dev, tracer->buff.mkey);
mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn);
}
@@@ -1069,7 -1069,6 +1069,6 @@@ void mlx5_fw_tracer_destroy(struct mlx5
mlx5_fw_tracer_clean_saved_traces_array(tracer);
mlx5_fw_tracer_free_strings_db(tracer);
mlx5_fw_tracer_destroy_log_buf(tracer);
- flush_workqueue(tracer->work_queue);
destroy_workqueue(tracer->work_queue);
kvfree(tracer);
}
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en.h
index 7761daa25f63,a3a4fece0cac..5b023951136d
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@@ -220,8 -220,6 +220,6 @@@ struct mlx5e_umr_wqe
struct mlx5_mtt inline_mtts[0];
};
- extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
-
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER,
MLX5E_PFLAG_TX_CQE_BASED_MODER,
@@@ -253,6 -251,9 +251,9 @@@ struct mlx5e_params
u16 mode;
u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ struct {
+ struct mlx5e_mqprio_rl *rl;
+ } channel;
} mqprio;
bool rx_cqe_compress_def;
bool tunneled_offload_en;
@@@ -665,7 -666,7 +666,7 @@@ struct mlx5e_rq
u8 wq_type;
u32 rqn;
struct mlx5_core_dev *mdev;
- struct mlx5_core_mkey umr_mkey;
+ u32 umr_mkey;
struct mlx5e_dma_info wqe_overflow;
/* XDP read-mostly */
@@@ -879,6 -880,7 +880,7 @@@ struct mlx5e_priv
#endif
struct mlx5e_scratchpad scratchpad;
struct mlx5e_htb htb;
+ struct mlx5e_mqprio_rl *mqprio_rl;
};
struct mlx5e_rx_handlers {
@@@ -918,6 -920,7 +920,7 @@@ void mlx5e_fold_sw_stats64(struct mlx5e
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
+ int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf);
void mlx5e_set_rx_mode_work(struct work_struct *work);
@@@ -1003,7 -1006,8 +1006,8 @@@ int mlx5e_modify_sq(struct mlx5_core_de
struct mlx5e_modify_sq_param *p);
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid);
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
+ struct mlx5e_sq_stats *sq_stats);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5fce9401ac74,f3dec58026d9..c4f3d648a7b6
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@@ -234,7 -234,8 +234,7 @@@ static int mlx5e_rq_alloc_mpwqe_info(st
}
static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
- u64 npages, u8 page_shift,
- struct mlx5_core_mkey *umr_mkey,
+ u64 npages, u8 page_shift, u32 *umr_mkey,
dma_addr_t filler_addr)
{
struct mlx5_mtt *mtt;
@@@ -454,7 -455,7 +454,7 @@@ static int mlx5e_alloc_rq(struct mlx5e_
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_drop_page;
- rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
+ rq->mkey_be = cpu_to_be32(rq->umr_mkey);
err = mlx5e_rq_alloc_mpwqe_info(rq, node);
if (err)
@@@ -486,7 -487,7 +486,7 @@@
if (err)
goto err_rq_frags;
- rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key);
+ rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
}
if (xsk) {
@@@ -573,7 -574,7 +573,7 @@@ err_free_by_rq_type
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
err_rq_mkey:
- mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
+ mlx5_core_destroy_mkey(mdev, rq->umr_mkey);
err_rq_drop_page:
mlx5e_free_mpwqe_rq_drop_page(rq);
break;
@@@ -606,7 -607,7 +606,7 @@@ static void mlx5e_free_rq(struct mlx5e_
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
- mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
+ mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey);
mlx5e_free_mpwqe_rq_drop_page(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@@ -929,9 -930,10 +929,10 @@@ static int mlx5e_alloc_xdpsq_fifo(struc
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+ size_t size;
- xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
- GFP_KERNEL, numa);
+ size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq);
+ xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
if (!xdpi_fifo->xi)
return -ENOMEM;
@@@ -945,10 -947,11 +946,11 @@@
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ size_t size;
int err;
- sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
- GFP_KERNEL, numa);
+ size = array_size(sizeof(*sq->db.wqe_info), wq_sz);
+ sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
if (!sq->db.wqe_info)
return -ENOMEM;
@@@ -1297,7 -1300,8 +1299,8 @@@ static int mlx5e_set_sq_maxrate(struct
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid)
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
+ struct mlx5e_sq_stats *sq_stats)
{
struct mlx5e_create_sq_param csp = {};
u32 tx_rate;
@@@ -1307,10 -1311,7 +1310,7 @@@
if (err)
return err;
- if (qos_queue_group_id)
- sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
- else
- sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+ sq->stats = sq_stats;
csp.tisn = tisn;
csp.tis_lst_sz = 1;
@@@ -1704,6 -1705,36 +1704,36 @@@ static void mlx5e_close_tx_cqs(struct m
mlx5e_close_cq(&c->sq[tc].cq);
}
+ static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
+ {
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+ if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
+ return tc;
+
+ WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
+ return -ENOENT;
+ }
+
+ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
+ u32 *hw_id)
+ {
+ int tc;
+
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
+ !params->mqprio.channel.rl) {
+ *hw_id = 0;
+ return 0;
+ }
+
+ tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
+ if (tc < 0)
+ return tc;
+
+ return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
+ }
+
static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@@@ -1712,9 -1743,16 +1742,16 @@@
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels;
+ u32 qos_queue_group_id;
+
+ err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
+ if (err)
+ goto err_close_sqs;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0);
+ params, &cparam->txq_sq, &c->sq[tc], tc,
+ qos_queue_group_id,
+ &c->priv->channel_stats[c->ix].sq[tc]);
if (err)
goto err_close_sqs;
}
@@@ -1990,7 -2028,7 +2027,7 @@@ static int mlx5e_open_channel(struct ml
c->cpu = cpu;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
@@@ -2339,6 -2377,13 +2376,13 @@@ static int mlx5e_update_netdev_queues(s
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
}
+ if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+ priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
+ }
return 0;
@@@ -2900,15 -2945,18 +2944,18 @@@ static void mlx5e_params_mqprio_dcb_set
{
params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc;
+ params->mqprio.channel.rl = NULL;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels);
}
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
- struct tc_mqprio_qopt *qopt)
+ struct tc_mqprio_qopt *qopt,
+ struct mlx5e_mqprio_rl *rl)
{
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
params->mqprio.num_tc = qopt->num_tc;
+ params->mqprio.channel.rl = rl;
mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
}
@@@ -2968,9 -3016,13 +3015,13 @@@ static int mlx5e_mqprio_channel_validat
netdev_err(netdev, "Min tx rate is not supported\n");
return -EINVAL;
}
+
if (mqprio->max_rate[i]) {
- netdev_err(netdev, "Max tx rate is not supported\n");
- return -EINVAL;
+ int err;
+
+ err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
+ if (err)
+ return err;
}
if (mqprio->qopt.offset[i] != agg_count) {
@@@ -2989,11 -3041,22 +3040,22 @@@
return 0;
}
+ static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
+ {
+ int tc;
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
+ if (mqprio->max_rate[tc])
+ return true;
+ return false;
+ }
+
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
+ struct mlx5e_mqprio_rl *rl;
bool nch_changed;
int err;
@@@ -3001,13 -3064,32 +3063,32 @@@
if (err)
return err;
+ rl = NULL;
+ if (mlx5e_mqprio_rate_limit(mqprio)) {
+ rl = mlx5e_mqprio_rl_alloc();
+ if (!rl)
+ return -ENOMEM;
+ err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
+ mqprio->max_rate);
+ if (err) {
+ mlx5e_mqprio_rl_free(rl);
+ return err;
+ }
+ }
+
new_params = priv->channels.params;
- mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt);
+ mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx;
- return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ if (err && rl) {
+ mlx5e_mqprio_rl_cleanup(rl);
+ mlx5e_mqprio_rl_free(rl);
+ }
+
+ return err;
}
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@@@ -3225,7 -3307,7 +3306,7 @@@ static int mlx5e_set_mac(struct net_dev
return -EADDRNOTAVAIL;
netif_addr_lock_bh(netdev);
- ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ eth_hw_addr_set(netdev, saddr->sa_data);
netif_addr_unlock_bh(netdev);
mlx5e_nic_set_rx_mode(priv);
@@@ -4350,13 -4432,17 +4431,17 @@@ void mlx5e_build_nic_params(struct mlx5
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ u8 addr[ETH_ALEN];
- mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
- if (is_zero_ether_addr(netdev->dev_addr) &&
+ mlx5_query_mac_address(priv->mdev, addr);
+ if (is_zero_ether_addr(addr) &&
!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
eth_hw_addr_random(netdev);
mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
+ return;
}
+
+ eth_hw_addr_set(netdev, addr);
}
static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
@@@ -4861,6 -4947,11 +4946,11 @@@ void mlx5e_priv_cleanup(struct mlx5e_pr
kfree(priv->htb.qos_sq_stats[i]);
kvfree(priv->htb.qos_sq_stats);
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+
memset(priv, 0, sizeof(*priv));
}
diff --combined drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 71a08f84d49d,873efde0d458..386ab9a2d490
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@@ -99,9 -99,6 +99,9 @@@
#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
+#define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
+#define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
+
#define BY_PASS_PRIO_NUM_LEVELS 1
#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_NUM_PRIOS)
@@@ -209,63 -206,34 +209,63 @@@ static struct init_tree_node egress_roo
}
};
-#define RDMA_RX_BYPASS_PRIO 0
-#define RDMA_RX_KERNEL_PRIO 1
+enum {
+ RDMA_RX_COUNTERS_PRIO,
+ RDMA_RX_BYPASS_PRIO,
+ RDMA_RX_KERNEL_PRIO,
+};
+
+#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
+#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
+#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
+
static struct init_tree_node rdma_rx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 2,
+ .ar_size = 3,
.children = (struct init_tree_node[]) {
+ [RDMA_RX_COUNTERS_PRIO] =
+ ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
+ RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
[RDMA_RX_BYPASS_PRIO] =
- ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
+ ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
[RDMA_RX_KERNEL_PRIO] =
- ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
+ ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
ADD_MULTIPLE_PRIO(1, 1))),
}
};
+enum {
+ RDMA_TX_COUNTERS_PRIO,
+ RDMA_TX_BYPASS_PRIO,
+};
+
+#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
+#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
+
static struct init_tree_node rdma_tx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 1,
+ .ar_size = 2,
.children = (struct init_tree_node[]) {
- ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
+ [RDMA_TX_COUNTERS_PRIO] =
+ ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
+ RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
+ [RDMA_TX_BYPASS_PRIO] =
+ ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS_RDMA_TX,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
- ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
BY_PASS_PRIO_NUM_LEVELS))),
}
};
@@@ -2223,6 -2191,10 +2223,10 @@@ struct mlx5_flow_namespace *mlx5_get_fl
if (steering->fdb_root_ns)
return &steering->fdb_root_ns->ns;
return NULL;
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ if (steering->port_sel_root_ns)
+ return &steering->port_sel_root_ns->ns;
+ return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
@@@ -2247,12 -2219,6 +2251,12 @@@
prio = RDMA_RX_KERNEL_PRIO;
} else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
root_ns = steering->rdma_tx_root_ns;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) {
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_COUNTERS_PRIO;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) {
+ root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_COUNTERS_PRIO;
} else { /* Must be NIC RX */
root_ns = steering->root_ns;
prio = type;
@@@ -2634,6 -2600,7 +2638,7 @@@ void mlx5_cleanup_fs(struct mlx5_core_d
steering->fdb_root_ns = NULL;
kfree(steering->fdb_sub_ns);
steering->fdb_sub_ns = NULL;
+ cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
@@@ -2672,6 -2639,21 +2677,21 @@@ static int init_sniffer_rx_root_ns(stru
return PTR_ERR_OR_ZERO(prio);
}
+ #define PORT_SEL_NUM_LEVELS 3
+ static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
+ {
+ struct fs_prio *prio;
+
+ steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
+ if (!steering->port_sel_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
+ PORT_SEL_NUM_LEVELS);
+ return PTR_ERR_OR_ZERO(prio);
+ }
+
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
{
int err;
@@@ -3058,6 -3040,12 +3078,12 @@@ int mlx5_init_fs(struct mlx5_core_dev *
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
+ err = init_port_sel_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
err = init_rdma_rx_root_ns(steering);
@@@ -3262,6 -3250,52 +3288,52 @@@ void mlx5_packet_reformat_dealloc(struc
}
EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
+ int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
+ {
+ return definer->id;
+ }
+
+ struct mlx5_flow_definer *
+ mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask)
+ {
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_definer *definer;
+ int id;
+
+ root = get_root_namespace(dev, ns_type);
+ if (!root)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ definer = kzalloc(sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return ERR_PTR(-ENOMEM);
+
+ definer->ns_type = ns_type;
+ id = root->cmds->create_match_definer(root, format_id, match_mask);
+ if (id < 0) {
+ mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
+ kfree(definer);
+ return ERR_PTR(id);
+ }
+ definer->id = id;
+ return definer;
+ }
+
+ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer)
+ {
+ struct mlx5_flow_root_namespace *root;
+
+ root = get_root_namespace(dev, definer->ns_type);
+ if (WARN_ON(!root))
+ return;
+
+ root->cmds->destroy_match_definer(root, definer->id);
+ kfree(definer);
+ }
+
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns)
{
diff --combined drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 6c67185c05c1,73fed94af09a..3f47d2b3b6e6
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@@ -4,7 -4,7 +4,7 @@@
#ifndef _DR_TYPES_
#define _DR_TYPES_
- #include <linux/mlx5/driver.h>
+ #include <linux/mlx5/vport.h>
#include <linux/refcount.h>
#include "fs_core.h"
#include "wq.h"
@@@ -14,7 -14,6 +14,6 @@@
#define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5
- #define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2
#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
@@@ -752,9 -751,9 +751,9 @@@ struct mlx5dr_esw_caps
struct mlx5dr_cmd_vport_cap {
u16 vport_gvmi;
u16 vhca_gvmi;
+ u16 num;
u64 icm_address_rx;
u64 icm_address_tx;
- u32 num;
};
struct mlx5dr_roce_cap {
@@@ -763,6 -762,11 +762,11 @@@
u8 fl_rc_qp_when_roce_enabled:1;
};
+ struct mlx5dr_vports {
+ struct mlx5dr_cmd_vport_cap esw_manager_caps;
+ struct xarray vports_caps_xa;
+ };
+
struct mlx5dr_cmd_caps {
u16 gvmi;
u64 nic_rx_drop_address;
@@@ -786,7 -790,6 +790,6 @@@
u8 flex_parser_id_gtpu_first_ext_dw_0;
u8 max_ft_level;
u16 roce_min_src_udp;
- u8 num_esw_ports;
u8 sw_format_ver;
bool eswitch_manager;
bool rx_sw_owner;
@@@ -795,11 -798,11 +798,11 @@@
u8 rx_sw_owner_v2:1;
u8 tx_sw_owner_v2:1;
u8 fdb_sw_owner_v2:1;
- u32 num_vports;
struct mlx5dr_esw_caps esw_caps;
- struct mlx5dr_cmd_vport_cap *vports_caps;
+ struct mlx5dr_vports vports;
bool prio_tag_required;
struct mlx5dr_roce_cap roce_caps;
+ u8 is_ecpf:1;
u8 isolate_vl_tc:1;
};
@@@ -826,10 -829,6 +829,6 @@@ struct mlx5dr_domain_info
struct mlx5dr_cmd_caps caps;
};
- struct mlx5dr_domain_cache {
- struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
- };
-
struct mlx5dr_domain {
struct mlx5dr_domain *peer_dmn;
struct mlx5_core_dev *mdev;
@@@ -841,7 -840,7 +840,7 @@@
struct mlx5dr_icm_pool *action_icm_pool;
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
- struct mlx5dr_domain_cache cache;
+ struct xarray csum_fts_xa;
struct mlx5dr_ste_ctx *ste_ctx;
};
@@@ -942,7 -941,7 +941,7 @@@ struct mlx5dr_action_dest_tbl
struct mlx5dr_action_ctr {
u32 ctr_id;
- u32 offeset;
+ u32 offset;
};
struct mlx5dr_action_vport {
@@@ -1102,18 -1101,8 +1101,8 @@@ mlx5dr_ste_htbl_may_grow(struct mlx5dr_
return true;
}
- static inline struct mlx5dr_cmd_vport_cap *
- mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
- {
- if (!caps->vports_caps ||
- (vport >= caps->num_vports && vport != WIRE_PORT))
- return NULL;
-
- if (vport == WIRE_PORT)
- vport = caps->num_vports;
-
- return &caps->vports_caps[vport];
- }
+ struct mlx5dr_cmd_vport_cap *
+ mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport);
struct mlx5dr_cmd_query_flow_table_details {
u8 status;
@@@ -1154,7 -1143,7 +1143,7 @@@ int mlx5dr_cmd_set_fte_modify_and_vport
u32 table_id,
u32 group_id,
u32 modify_header_id,
- u32 vport_id);
+ u16 vport_id);
int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
u32 table_type,
u32 table_id);
@@@ -1275,7 -1264,7 +1264,7 @@@ struct mlx5dr_cq
struct mlx5dr_mr {
struct mlx5_core_dev *mdev;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
dma_addr_t dma_addr;
void *addr;
size_t size;
@@@ -1372,12 -1361,12 +1361,12 @@@ struct mlx5dr_fw_recalc_cs_ft
};
struct mlx5dr_fw_recalc_cs_ft *
- mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
+ mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num);
void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
- int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
- u32 vport_num,
- u64 *rx_icm_addr);
+ int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u16 vport_num,
+ u64 *rx_icm_addr);
int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_cmd_flow_destination_hw_info *dest,
int num_dest,
diff --combined drivers/net/ethernet/microchip/lan743x_main.c
index c4f32c7e0db1,03d02403c19e..9d1091237567
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@@ -816,7 -816,7 +816,7 @@@ static int lan743x_mac_init(struct lan7
eth_random_addr(adapter->mac_address);
}
lan743x_mac_set_address(adapter, adapter->mac_address);
- ether_addr_copy(netdev->dev_addr, adapter->mac_address);
+ eth_hw_addr_set(netdev, adapter->mac_address);
return 0;
}
@@@ -1743,16 -1743,6 +1743,16 @@@ static int lan743x_tx_ring_init(struct
ret = -EINVAL;
goto cleanup;
}
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
+ dev_warn(&tx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
ring_allocation_size = ALIGN(tx->ring_size *
sizeof(struct lan743x_tx_descriptor),
PAGE_SIZE);
@@@ -2286,16 -2276,6 +2286,16 @@@ static int lan743x_rx_ring_init(struct
ret = -EINVAL;
goto cleanup;
}
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
+ dev_warn(&rx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
ring_allocation_size = ALIGN(rx->ring_size *
sizeof(struct lan743x_rx_descriptor),
PAGE_SIZE);
@@@ -2665,7 -2645,7 +2665,7 @@@ static int lan743x_netdev_set_mac_addre
ret = eth_prepare_mac_addr_change(netdev, sock_addr);
if (ret)
return ret;
- ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
lan743x_mac_set_address(adapter, sock_addr->sa_data);
lan743x_rfe_update_mac_address(adapter);
return 0;
@@@ -3039,8 -3019,6 +3039,8 @@@ static int lan743x_pm_resume(struct dev
if (ret) {
netif_err(adapter, probe, adapter->netdev,
"lan743x_hardware_init returned %d\n", ret);
+ lan743x_pci_cleanup(adapter);
+ return ret;
}
/* open netdev when netdev is at running state while resume.
diff --combined drivers/net/ethernet/nxp/lpc_eth.c
index c910fa2f40a4,a63cc295b979..bc39558fe82b
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@@ -419,7 -419,7 +419,7 @@@ struct netdata_local
/*
* MAC support functions
*/
- static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
+ static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac)
{
u32 tmp;
@@@ -1015,6 -1015,9 +1015,6 @@@ static int lpc_eth_close(struct net_dev
napi_disable(&pldat->napi);
netif_stop_queue(ndev);
- if (ndev->phydev)
- phy_stop(ndev->phydev);
-
spin_lock_irqsave(&pldat->lock, flags);
__lpc_eth_reset(pldat);
netif_carrier_off(ndev);
@@@ -1022,8 -1025,6 +1022,8 @@@
writel(0, LPC_ENET_MAC2(pldat->net_base));
spin_unlock_irqrestore(&pldat->lock, flags);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
clk_disable_unprepare(pldat->clk);
return 0;
@@@ -1092,7 -1093,7 +1092,7 @@@ static int lpc_set_mac_address(struct n
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&pldat->lock, flags);
@@@ -1231,6 -1232,7 +1231,7 @@@ static int lpc_eth_drv_probe(struct pla
struct net_device *ndev;
dma_addr_t dma_handle;
struct resource *res;
+ u8 addr[ETH_ALEN];
int irq, ret;
/* Setup network interface for RMII or MII mode */
@@@ -1346,10 -1348,11 +1347,11 @@@
pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Get MAC address from current HW setting (POR state is all zeros) */
- __lpc_get_mac(pldat, ndev->dev_addr);
+ __lpc_get_mac(pldat, addr);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
- of_get_mac_address(np, ndev->dev_addr);
+ of_get_ethdev_address(np, ndev);
}
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --combined drivers/net/ethernet/smsc/smc91x.c
index e4fc6484faa8,a31c159e96ea..96a9400b5737
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@@ -1851,6 -1851,7 +1851,7 @@@ static int smc_probe(struct net_device
int retval;
unsigned int val, revision_register;
const char *version_string;
+ u8 addr[ETH_ALEN];
DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
@@@ -1922,7 -1923,8 +1923,8 @@@
/* Get the MAC address */
SMC_SELECT_BANK(lp, 1);
- SMC_GET_MAC_ADDR(lp, dev->dev_addr);
+ SMC_GET_MAC_ADDR(lp, addr);
+ eth_hw_addr_set(dev, addr);
/* now, reset the chip, and put it into a known state */
smc_reset(dev);
@@@ -2190,7 -2192,6 +2192,7 @@@ static const struct of_device_id smc91x
};
MODULE_DEVICE_TABLE(of, smc91x_match);
+#if defined(CONFIG_GPIOLIB)
/**
* try_toggle_control_gpio - configure a gpio if it exists
* @dev: net device
@@@ -2221,15 -2222,6 +2223,15 @@@ static int try_toggle_control_gpio(stru
return 0;
}
+#else
+static int try_toggle_control_gpio(struct device *dev,
+ struct gpio_desc **desc,
+ const char *name, int index,
+ int value, unsigned int nsdelay)
+{
+ return 0;
+}
+#endif
#endif
/*
diff --combined drivers/net/usb/lan78xx.c
index 63cd72c5f580,03319fdb5235..f20376c1ef3f
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@@ -1817,7 -1817,7 +1817,7 @@@ static void lan78xx_init_mac_address(st
lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
- ether_addr_copy(dev->net->dev_addr, addr);
+ eth_hw_addr_set(dev->net, addr);
}
/* MDIO read and write wrappers for phylib */
@@@ -2416,7 -2416,7 +2416,7 @@@ static int lan78xx_set_mac_addr(struct
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
addr_lo = netdev->dev_addr[0] |
netdev->dev_addr[1] << 8 |
@@@ -4122,12 -4122,6 +4122,12 @@@ static int lan78xx_probe(struct usb_int
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+ /* Reject broken descriptors. */
+ if (dev->maxpacket == 0) {
+ ret = -ENODEV;
+ goto out4;
+ }
+
/* driver requires remote-wakeup capability during autosuspend. */
intf->needs_remote_wakeup = 1;
diff --combined drivers/net/xen-netfront.c
index fc41ba95f81d,57437e4b8a94..911f43986a8c
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@@ -1730,10 -1730,6 +1730,10 @@@ static int netfront_resume(struct xenbu
dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ netif_tx_lock_bh(info->netdev);
+ netif_device_detach(info->netdev);
+ netif_tx_unlock_bh(info->netdev);
+
xennet_disconnect_backend(info);
return 0;
}
@@@ -2161,6 -2157,7 +2161,7 @@@ static int talk_to_netback(struct xenbu
unsigned int max_queues = 0;
struct netfront_queue *queue = NULL;
unsigned int num_queues = 1;
+ u8 addr[ETH_ALEN];
info->netdev->irq = 0;
@@@ -2174,11 -2171,12 +2175,12 @@@
"feature-split-event-channels", 0);
/* Read mac addr. */
- err = xen_net_read_mac(dev, info->netdev->dev_addr);
+ err = xen_net_read_mac(dev, addr);
if (err) {
xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
goto out_unlocked;
}
+ eth_hw_addr_set(info->netdev, addr);
info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
"feature-xdp-headroom", 0);
@@@ -2353,10 -2351,6 +2355,10 @@@ static int xennet_connect(struct net_de
* domain a kick because we've probably just requeued some
* packets.
*/
+ netif_tx_lock_bh(np->netdev);
+ netif_device_attach(np->netdev);
+ netif_tx_unlock_bh(np->netdev);
+
netif_carrier_on(np->netdev);
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
diff --combined drivers/soc/fsl/dpio/dpio-service.c
index 4d119e98ce7c,3fd0d0840287..1d2b27e3ea63
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@@ -12,6 -12,7 +12,7 @@@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+ #include <linux/dim.h>
#include <linux/slab.h>
#include "dpio.h"
@@@ -28,6 -29,14 +29,14 @@@ struct dpaa2_io
spinlock_t lock_notifications;
struct list_head notifications;
struct device *dev;
+
+ /* Net DIM */
+ struct dim rx_dim;
+ /* protect against concurrent Net DIM updates */
+ spinlock_t dim_lock;
+ u16 event_ctr;
+ u64 bytes;
+ u64 frames;
};
struct dpaa2_io_store {
@@@ -59,7 -68,7 +68,7 @@@ static inline struct dpaa2_io *service_
* potentially being migrated away.
*/
if (cpu < 0)
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
/* If a specific cpu was requested, pick it up immediately */
return dpio_by_cpu[cpu];
@@@ -100,6 -109,17 +109,17 @@@ struct dpaa2_io *dpaa2_io_service_selec
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+ static void dpaa2_io_dim_work(struct work_struct *w)
+ {
+ struct dim *dim = container_of(w, struct dim, work);
+ struct dim_cq_moder moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim);
+
+ dpaa2_io_set_irq_coalescing(d, moder.usec);
+ dim->state = DIM_START_MEASURE;
+ }
+
/**
* dpaa2_io_create() - create a dpaa2_io object.
* @desc: the dpaa2_io descriptor
@@@ -114,6 -134,7 +134,7 @@@ struct dpaa2_io *dpaa2_io_create(const
struct device *dev)
{
struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+ u32 qman_256_cycles_per_ns;
if (!obj)
return NULL;
@@@ -127,7 -148,15 +148,15 @@@
obj->dpio_desc = *desc;
obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
+ obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk;
obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
+
+ /* Compute how many 256 QBMAN cycles fit into one ns. This is because
+ * the interrupt timeout period register needs to be specified in QBMAN
+ * clock cycles in increments of 256.
+ */
+ qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000);
+ obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns;
obj->swp = qbman_swp_init(&obj->swp_desc);
if (!obj->swp) {
@@@ -138,6 -167,7 +167,7 @@@
INIT_LIST_HEAD(&obj->node);
spin_lock_init(&obj->lock_mgmt_cmd);
spin_lock_init(&obj->lock_notifications);
+ spin_lock_init(&obj->dim_lock);
INIT_LIST_HEAD(&obj->notifications);
/* For now only enable DQRR interrupts */
@@@ -155,6 -185,12 +185,12 @@@
obj->dev = dev;
+ memset(&obj->rx_dim, 0, sizeof(obj->rx_dim));
+ INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work);
+ obj->event_ctr = 0;
+ obj->bytes = 0;
+ obj->frames = 0;
+
return obj;
}
@@@ -194,6 -230,8 +230,8 @@@ irqreturn_t dpaa2_io_irq(struct dpaa2_i
struct qbman_swp *swp;
u32 status;
+ obj->event_ctr++;
+
swp = obj->swp;
status = qbman_swp_interrupt_read_status(swp);
if (!status)
@@@ -462,7 -500,7 +500,7 @@@ int dpaa2_io_service_enqueue_multiple_f
qbman_eq_desc_set_no_orp(&ed, 0);
qbman_eq_desc_set_fq(&ed, fqid);
- return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
+ return qbman_swp_enqueue_multiple(d->swp, &ed, fd, NULL, nb);
}
EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
@@@ -779,3 -817,82 +817,82 @@@ int dpaa2_io_query_bp_count(struct dpaa
return 0;
}
EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
+
+ /**
+ * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+ int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff)
+ {
+ struct qbman_swp *swp = d->swp;
+
+ return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1,
+ irq_holdoff);
+ }
+ EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing);
+
+ /**
+ * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+ void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff)
+ {
+ struct qbman_swp *swp = d->swp;
+
+ qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff);
+ }
+ EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing);
+
+ /**
+ * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing
+ * @d: the given DPIO object
+ * @use_adaptive_rx_coalesce: adaptive coalescing state
+ */
+ void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
+ int use_adaptive_rx_coalesce)
+ {
+ d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce;
+ }
+ EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing);
+
+ /**
+ * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state
+ * @d: the given DPIO object
+ *
+ * Return 1 when adaptive coalescing is enabled on the DPIO object and 0
+ * otherwise.
+ */
+ int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d)
+ {
+ return d->swp->use_adaptive_rx_coalesce;
+ }
+ EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing);
+
+ /**
+ * dpaa2_io_update_net_dim() - Update Net DIM
+ * @d: the given DPIO object
+ * @frames: how many frames have been dequeued by the user since the last call
+ * @bytes: how many bytes have been dequeued by the user since the last call
+ */
+ void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes)
+ {
+ struct dim_sample dim_sample = {};
+
+ if (!d->swp->use_adaptive_rx_coalesce)
+ return;
+
+ spin_lock(&d->dim_lock);
+
+ d->bytes += bytes;
+ d->frames += frames;
+
+ dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample);
+ net_dim(&d->rx_dim, dim_sample);
+
+ spin_unlock(&d->dim_lock);
+ }
+ EXPORT_SYMBOL(dpaa2_io_update_net_dim);
diff --combined drivers/soc/fsl/dpio/qbman-portal.c
index c4282cc7b391,3474bf5f88d5..1397ec21873a
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@@ -29,6 -29,7 +29,7 @@@
#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
#define QBMAN_CINH_SWP_DQPI 0xa00
+ #define QBMAN_CINH_SWP_DQRR_ITR 0xa80
#define QBMAN_CINH_SWP_DCAP 0xac0
#define QBMAN_CINH_SWP_SDQCR 0xb00
#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
@@@ -38,6 -39,7 +39,7 @@@
#define QBMAN_CINH_SWP_IER 0xe40
#define QBMAN_CINH_SWP_ISDR 0xe80
#define QBMAN_CINH_SWP_IIR 0xec0
+ #define QBMAN_CINH_SWP_ITPR 0xf40
/* CENA register offsets */
#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
@@@ -355,6 -357,9 +357,9 @@@ struct qbman_swp *qbman_swp_init(const
& p->eqcr.pi_ci_mask;
p->eqcr.available = p->eqcr.pi_ring_size;
+ /* Initialize the software portal with a irq timeout period of 0us */
+ qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0);
+
return p;
}
@@@ -674,9 -679,9 +679,9 @@@ int qbman_swp_enqueue_multiple_direct(s
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
/* Skip copying the verb */
- memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
- memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
- &fd[i], sizeof(*fd));
+ memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
eqcr_pi++;
}
@@@ -688,9 -693,9 +693,9 @@@
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
- struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
@@@ -732,7 -737,8 +737,7 @@@ int qbman_swp_enqueue_multiple_mem_back
int i, num_enqueued = 0;
unsigned long irq_flags;
- spin_lock(&s->access_spinlock);
- local_irq_save(irq_flags);
+ spin_lock_irqsave(&s->access_spinlock, irq_flags);
half_mask = (s->eqcr.pi_ci_mask>>1);
full_mask = s->eqcr.pi_ci_mask;
@@@ -743,7 -749,8 +748,7 @@@
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
eqcr_ci, s->eqcr.ci);
if (!s->eqcr.available) {
- local_irq_restore(irq_flags);
- spin_unlock(&s->access_spinlock);
+ spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
return 0;
}
}
@@@ -756,9 -763,9 +761,9 @@@
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
/* Skip copying the verb */
- memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
- memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
- &fd[i], sizeof(*fd));
+ memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
eqcr_pi++;
}
@@@ -768,9 -775,9 +773,9 @@@
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
- struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
@@@ -782,7 -789,8 +787,7 @@@
dma_wmb();
qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
(QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
- local_irq_restore(irq_flags);
- spin_unlock(&s->access_spinlock);
+ spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
return num_enqueued;
}
@@@ -829,9 -837,9 +834,9 @@@ int qbman_swp_enqueue_multiple_desc_dir
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]);
/* Skip copying the verb */
- memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
- memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
- &fd[i], sizeof(*fd));
+ memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
eqcr_pi++;
}
@@@ -899,9 -907,9 +904,9 @@@ int qbman_swp_enqueue_multiple_desc_mem
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]);
/* Skip copying the verb */
- memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
- memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
- &fd[i], sizeof(*fd));
+ memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
eqcr_pi++;
}
@@@ -1793,3 -1801,56 +1798,56 @@@ u32 qbman_bp_info_num_free_bufs(struct
{
return le32_to_cpu(a->fill);
}
+
+ /**
+ * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+ int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff)
+ {
+ u32 itp, max_holdoff;
+
+ /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles
+ * increments. This depends on the QBMAN internal frequency.
+ */
+ itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns;
+ if (itp > 4096) {
+ max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000;
+ pr_err("irq_holdoff must be <= %uus\n", max_holdoff);
+ return -EINVAL;
+ }
+
+ if (irq_threshold >= p->dqrr.dqrr_size) {
+ pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1);
+ return -EINVAL;
+ }
+
+ p->irq_threshold = irq_threshold;
+ p->irq_holdoff = irq_holdoff;
+
+ qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold);
+ qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp);
+
+ return 0;
+ }
+
+ /**
+ * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold (an IRQ is generated when there are more
+ * DQRR entries in the portal than the threshold)
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+ void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff)
+ {
+ if (irq_threshold)
+ *irq_threshold = p->irq_threshold;
+ if (irq_holdoff)
+ *irq_holdoff = p->irq_holdoff;
+ }
diff --combined include/linux/bpf.h
index 3db6f6c95489,1c7fd7c4c6d3..e6f5579f9356
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@@ -48,6 -48,7 +48,7 @@@ extern struct idr btf_idr
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;
+ typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
@@@ -142,7 -143,8 +143,8 @@@ struct bpf_map_ops
int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee);
- int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn,
+ int (*map_for_each_callback)(struct bpf_map *map,
+ bpf_callback_t callback_fn,
void *callback_ctx, u64 flags);
/* BTF name and id of struct allocated by map_alloc */
@@@ -929,11 -931,8 +931,11 @@@ struct bpf_array_aux
* stored in the map to make sure that all callers and callees have
* the same prog type and JITed flag.
*/
- enum bpf_prog_type type;
- bool jited;
+ struct {
+ spinlock_t lock;
+ enum bpf_prog_type type;
+ bool jited;
+ } owner;
/* Programs with direct jumps into programs part of this array. */
struct list_head poke_progs;
struct bpf_map *map;
@@@ -1092,6 -1091,7 +1094,7 @@@ bool bpf_prog_array_compatible(struct b
int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+ const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
@@@ -2220,6 -2220,8 +2223,8 @@@ int bpf_arch_text_poke(void *ip, enum b
struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+ #define MAX_BPRINTF_VARARGS 12
+
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u32 **bin_buf, u32 num_args);
void bpf_bprintf_cleanup(void);
diff --combined include/linux/filter.h
index ef03ff34234d,47f80adbe744..8231a6a257f6
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@@ -360,10 -360,9 +360,9 @@@ static inline bool insn_is_zext(const s
.off = 0, \
.imm = TGT })
- /* Function call */
+ /* Convert function address to BPF immediate */
- #define BPF_CAST_CALL(x) \
- ((u64 (*)(u64, u64, u64, u64, u64))(x))
+ #define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
@@@ -371,7 -370,7 +370,7 @@@
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
- .imm = ((FUNC) - __bpf_call_base) })
+ .imm = BPF_CALL_IMM(FUNC) })
/* Raw code statement block */
@@@ -1051,7 -1050,6 +1050,7 @@@ extern int bpf_jit_enable
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
extern long bpf_jit_limit;
+extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
diff --combined include/linux/mlx5/device.h
index ed0230ff9422,f8a0bbb42c3b..ed0254b01b71
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@@ -541,19 -541,21 +541,21 @@@ struct mlx5_cmd_layout
u8 status_own;
};
- enum mlx5_fatal_assert_bit_offsets {
- MLX5_RFR_OFFSET = 31,
+ enum mlx5_rfr_severity_bit_offsets {
+ MLX5_RFR_BIT_OFFSET = 0x7,
};
struct health_buffer {
- __be32 assert_var[5];
- __be32 rsvd0[3];
+ __be32 assert_var[6];
+ __be32 rsvd0[2];
__be32 assert_exit_ptr;
__be32 assert_callra;
- __be32 rsvd1[2];
+ __be32 rsvd1[1];
+ __be32 time;
__be32 fw_ver;
__be32 hw_id;
- __be32 rfr;
+ u8 rfr_severity;
+ u8 rsvd2[3];
u8 irisc_index;
u8 synd;
__be16 ext_synd;
@@@ -577,7 -579,9 +579,9 @@@ struct mlx5_init_seg
__be32 rsvd1[120];
__be32 initializing;
struct health_buffer health;
- __be32 rsvd2[880];
+ __be32 rsvd2[878];
+ __be32 cmd_exec_to;
+ __be32 cmd_q_init_to;
__be32 internal_timer_h;
__be32 internal_timer_l;
__be32 rsvd3[2];
@@@ -1183,6 -1187,7 +1187,7 @@@ enum mlx5_cap_type
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
MLX5_CAP_GENERAL_2 = 0x20,
+ MLX5_CAP_PORT_SELECTION = 0x25,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@@ -1340,6 -1345,20 +1345,20 @@@ enum mlx5_qcam_feature_groups
MLX5_GET(e_switch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
+ #define MLX5_CAP_PORT_SELECTION(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
+
+ #define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
+
+ #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
+
+ #define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
+
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
@@@ -1456,8 -1475,6 +1475,8 @@@ static inline u16 mlx5_to_sw_pkey_sz(in
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
+#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
+#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
diff --combined include/linux/mlx5/driver.h
index 5cc19b9483b9,f617dfbcd9fd..a623ec635947
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@@ -59,15 -59,13 +59,13 @@@
#define MLX5_ADEV_NAME "mlx5_core"
+ #define MLX5_IRQ_EQ_CTRL (U8_MAX)
+
enum {
MLX5_BOARD_ID_LEN = 64,
};
enum {
- /* one minute for the sake of bringup. Generally, commands must always
- * complete and we may need to increase this timeout value
- */
- MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@@ -136,6 -134,7 +134,7 @@@ enum
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MFRL = 0x9028,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MRTC = 0x902d,
MLX5_REG_MTRC_CAP = 0x9040,
MLX5_REG_MTRC_CONF = 0x9041,
MLX5_REG_MTRC_STDB = 0x9042,
@@@ -154,6 -153,7 +153,7 @@@
MLX5_REG_MIRC = 0x9162,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
+ MLX5_REG_DTOR = 0xC00E,
};
enum mlx5_qpts_trust_state {
@@@ -357,6 -357,22 +357,6 @@@ struct mlx5_core_sig_ctx
u32 sigerr_count;
};
-enum {
- MLX5_MKEY_MR = 1,
- MLX5_MKEY_MW,
- MLX5_MKEY_INDIRECT_DEVX,
-};
-
-struct mlx5_core_mkey {
- u64 iova;
- u64 size;
- u32 key;
- u32 pd;
- u32 type;
- struct wait_queue_head wait;
- refcount_t usecount;
-};
-
#define MLX5_24BIT_MASK ((1 << 24) - 1)
enum mlx5_res_type {
@@@ -425,6 -441,7 +425,7 @@@ struct mlx5_core_health
struct work_struct report_work;
struct devlink_health_reporter *fw_reporter;
struct devlink_health_reporter *fw_fatal_reporter;
+ struct delayed_work update_fw_log_ts_work;
};
struct mlx5_qp_table {
@@@ -637,7 -654,7 +638,7 @@@ struct mlx5e_resources
struct mlx5e_hw_objs {
u32 pdn;
struct mlx5_td td;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
struct mlx5_sq_bfreg bfreg;
} hw_objs;
struct devlink_port dl_port;
@@@ -736,6 -753,7 +737,7 @@@ struct mlx5_core_dev
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
} caps;
+ struct mlx5_timeouts *timeouts;
u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
@@@ -989,6 -1007,8 +991,6 @@@ void mlx5_cmd_mbox_status(void *out, u
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
-int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
-int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
void mlx5_health_flush(struct mlx5_core_dev *dev);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
@@@ -1006,11 -1026,13 +1008,11 @@@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *out, int outlen);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
+ int inlen);
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
+ int outlen);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
@@@ -1222,6 -1244,16 +1224,16 @@@ static inline int mlx5_core_native_port
return MLX5_CAP_GEN(dev, native_port_num);
}
+ static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev)
+ {
+ int idx = MLX5_CAP_GEN(dev, native_port_num);
+
+ if (idx >= 1 && idx <= MLX5_MAX_PORTS)
+ return idx - 1;
+ else
+ return PCI_FUNC(dev->pdev->devfn);
+ }
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
@@@ -1230,11 -1262,12 +1242,12 @@@ static inline bool mlx5_is_roce_init_en
{
struct devlink *devlink = priv_to_devlink(dev);
union devlink_param_value val;
+ int err;
- devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- &val);
- return val.vbool;
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
+ return err ? MLX5_CAP_GEN(dev, roce) : val.vbool;
}
#endif /* MLX5_DRIVER_H */
diff --combined include/linux/mlx5/fs.h
index f2c3da2006d9,7a43fec63a35..90c548aa6389
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@@ -83,8 -83,7 +83,9 @@@ enum mlx5_flow_namespace_type
MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_TX,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_PORT_SEL,
};
enum {
@@@ -99,6 -98,7 +100,7 @@@
struct mlx5_pkt_reformat;
struct mlx5_modify_hdr;
+ struct mlx5_flow_definer;
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_namespace;
@@@ -259,6 -259,13 +261,13 @@@ struct mlx5_modify_hdr *mlx5_modify_hea
void *modify_actions);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr);
+ struct mlx5_flow_definer *
+ mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask);
+ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer);
+ int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
struct mlx5_pkt_reformat_params {
int type;
diff --combined include/linux/mlx5/mlx5_ifc.h
index 9011c4495867,746381eccccf..53483c457093
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@@ -94,6 -94,7 +94,7 @@@ enum
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
+ MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
MLX5_OBJ_TYPE_PSV = 0xff03,
@@@ -342,7 -343,7 +343,7 @@@ struct mlx5_ifc_flow_table_fields_suppo
u8 outer_geneve_oam[0x1];
u8 outer_geneve_protocol_type[0x1];
u8 outer_geneve_opt_len[0x1];
- u8 reserved_at_1e[0x1];
+ u8 source_vhca_port[0x1];
u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1];
@@@ -393,14 -394,6 +394,14 @@@
u8 metadata_reg_c_0[0x1];
};
+struct mlx5_ifc_flow_table_fields_supported_2_bits {
+ u8 reserved_at_0[0xe];
+ u8 bth_opcode[0x1];
+ u8 reserved_at_f[0x11];
+
+ u8 reserved_at_20[0x60];
+};
+
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
u8 reserved_at_1[0x1];
@@@ -547,7 -540,7 +548,7 @@@ struct mlx5_ifc_fte_match_set_misc_bit
union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
- u8 reserved_at_b8[0x8];
+ u8 bth_opcode[0x8];
u8 geneve_vni[0x18];
u8 reserved_at_d8[0x7];
@@@ -764,15 -757,7 +765,15 @@@ struct mlx5_ifc_flow_table_nic_cap_bit
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x1200];
+ u8 reserved_at_e00[0x700];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma;
+
+ u8 reserved_at_1580[0x280];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_transmit_rdma;
+
+ u8 reserved_at_1880[0x780];
u8 sw_steering_nic_rx_action_drop_icm_address[0x40];
@@@ -783,6 -768,18 +784,18 @@@
u8 reserved_at_20c0[0x5f40];
};
+ struct mlx5_ifc_port_selection_cap_bits {
+ u8 reserved_at_0[0x10];
+ u8 port_select_flow_table[0x1];
+ u8 reserved_at_11[0xf];
+
+ u8 reserved_at_20[0x1e0];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
+
+ u8 reserved_at_400[0x7c00];
+ };
+
enum {
MLX5_FDB_TO_VPORT_REG_C_0 = 0x01,
MLX5_FDB_TO_VPORT_REG_C_1 = 0x02,
@@@ -1322,7 -1319,8 +1335,8 @@@ struct mlx5_ifc_cmd_hca_cap_bits
u8 vhca_resource_manager[0x1];
u8 hca_cap_2[0x1];
- u8 reserved_at_21[0x2];
+ u8 reserved_at_21[0x1];
+ u8 dtor[0x1];
u8 event_on_vhca_state_teardown_request[0x1];
u8 event_on_vhca_state_in_use[0x1];
u8 event_on_vhca_state_active[0x1];
@@@ -1530,7 -1528,8 +1544,8 @@@
u8 uar_4k[0x1];
u8 reserved_at_241[0x9];
u8 uar_sz[0x6];
- u8 reserved_at_248[0x2];
+ u8 port_selection_cap[0x1];
+ u8 reserved_at_248[0x1];
u8 umem_uid_0[0x1];
u8 reserved_at_250[0x5];
u8 log_pg_sz[0x8];
@@@ -1603,7 -1602,8 +1618,8 @@@
u8 log_max_tis_per_sq[0x5];
u8 ext_stride_num_range[0x1];
- u8 reserved_at_3a1[0x2];
+ u8 roce_rw_supported[0x1];
+ u8 reserved_at_3a2[0x1];
u8 log_max_stride_sz_rq[0x5];
u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
@@@ -1732,7 -1732,7 +1748,7 @@@
u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
- u8 reserved_at_6e0[0x10];
+ u8 max_num_match_definer[0x10];
u8 sf_base_id[0x10];
u8 flex_parser_id_gtpu_dw_2[0x4];
@@@ -1747,7 -1747,7 +1763,7 @@@
u8 reserved_at_760[0x20];
u8 vhca_tunnel_commands[0x40];
- u8 reserved_at_7c0[0x40];
+ u8 match_definer_format_supported[0x40];
};
struct mlx5_ifc_cmd_hca_cap_2_bits {
@@@ -1766,6 -1766,7 +1782,7 @@@ enum mlx5_flow_destination_type
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
+ MLX5_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
@@@ -2823,6 -2824,40 +2840,40 @@@ struct mlx5_ifc_dropped_packet_logged_b
u8 reserved_at_0[0xe0];
};
+ struct mlx5_ifc_default_timeout_bits {
+ u8 to_multiplier[0x3];
+ u8 reserved_at_3[0x9];
+ u8 to_value[0x14];
+ };
+
+ struct mlx5_ifc_dtor_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ struct mlx5_ifc_default_timeout_bits pcie_toggle_to;
+
+ u8 reserved_at_40[0x60];
+
+ struct mlx5_ifc_default_timeout_bits health_poll_to;
+
+ struct mlx5_ifc_default_timeout_bits full_crdump_to;
+
+ struct mlx5_ifc_default_timeout_bits fw_reset_to;
+
+ struct mlx5_ifc_default_timeout_bits flush_on_err_to;
+
+ struct mlx5_ifc_default_timeout_bits pci_sync_update_to;
+
+ struct mlx5_ifc_default_timeout_bits tear_down_to;
+
+ struct mlx5_ifc_default_timeout_bits fsm_reactivate_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_pages_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to;
+
+ u8 reserved_at_1c0[0x40];
+ };
+
enum {
MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
@@@ -3144,6 -3179,7 +3195,7 @@@ union mlx5_ifc_hca_cap_union_bits
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
+ struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
struct mlx5_ifc_debug_cap_bits debug_cap;
@@@ -4113,13 -4149,19 +4165,19 @@@ struct mlx5_ifc_health_buffer_bits
u8 assert_callra[0x20];
- u8 reserved_at_140[0x40];
+ u8 reserved_at_140[0x20];
+
+ u8 time[0x20];
u8 fw_version[0x20];
u8 hw_id[0x20];
- u8 reserved_at_1c0[0x20];
+ u8 rfr[0x1];
+ u8 reserved_at_1c1[0x3];
+ u8 valid[0x1];
+ u8 severity[0x3];
+ u8 reserved_at_1c8[0x18];
u8 irisc_index[0x8];
u8 synd[0x8];
@@@ -5632,6 -5674,236 +5690,236 @@@ struct mlx5_ifc_query_fte_in_bits
u8 reserved_at_120[0xe0];
};
+ struct mlx5_ifc_match_definer_format_0_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 metadata_reg_c_1[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_dmac_15_0[0x10];
+ u8 outer_ethertype[0x10];
+
+ u8 reserved_at_180[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 outer_l4_type_ext[0x4];
+ u8 reserved_at_1a4[0x2];
+ u8 outer_ipsec_layer[0x2];
+ u8 outer_l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 outer_l2_ok[0x1];
+ u8 outer_l3_ok[0x1];
+ u8 outer_l4_ok[0x1];
+ u8 outer_second_vlan_type[0x2];
+ u8 outer_second_vlan_prio[0x3];
+ u8 outer_second_vlan_cfi[0x1];
+ u8 outer_second_vlan_vid[0xc];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 inner_ipv4_checksum_ok[0x1];
+ u8 inner_l4_checksum_ok[0x1];
+ u8 outer_ipv4_checksum_ok[0x1];
+ u8 outer_l4_checksum_ok[0x1];
+ u8 inner_l3_ok[0x1];
+ u8 inner_l4_ok[0x1];
+ u8 outer_l3_ok_duplicate[0x1];
+ u8 outer_l4_ok_duplicate[0x1];
+ u8 outer_tcp_cwr[0x1];
+ u8 outer_tcp_ece[0x1];
+ u8 outer_tcp_urg[0x1];
+ u8 outer_tcp_ack[0x1];
+ u8 outer_tcp_psh[0x1];
+ u8 outer_tcp_rst[0x1];
+ u8 outer_tcp_syn[0x1];
+ u8 outer_tcp_fin[0x1];
+ };
+
+ struct mlx5_ifc_match_definer_format_22_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 outer_ip_src_addr[0x20];
+
+ u8 outer_ip_dest_addr[0x20];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+ };
+
+ struct mlx5_ifc_match_definer_format_23_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 inner_ip_src_addr[0x20];
+
+ u8 inner_ip_dest_addr[0x20];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 inner_ip_frag[0x1];
+ u8 inner_qp_type[0x2];
+ u8 inner_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 inner_l3_type[0x2];
+ u8 inner_l4_type[0x2];
+ u8 inner_first_vlan_type[0x2];
+ u8 inner_first_vlan_prio[0x3];
+ u8 inner_first_vlan_cfi[0x1];
+ u8 inner_first_vlan_vid[0xc];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+ };
+
+ struct mlx5_ifc_match_definer_format_29_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+ };
+
+ struct mlx5_ifc_match_definer_format_30_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+ };
+
+ struct mlx5_ifc_match_definer_format_31_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+ };
+
+ struct mlx5_ifc_match_definer_format_32_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+ };
+
+ struct mlx5_ifc_match_definer_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x10];
+ u8 format_id[0x10];
+
+ u8 reserved_at_a0[0x160];
+
+ u8 match_mask[16][0x20];
+ };
+
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 vhca_tunnel_id[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+ };
+
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+ };
+
+ struct mlx5_ifc_create_match_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_match_definer_bits obj_context;
+ };
+
+ struct mlx5_ifc_create_match_definer_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ };
+
enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
@@@ -7585,7 -7857,7 +7873,7 @@@ struct mlx5_ifc_dealloc_uar_out_bits
struct mlx5_ifc_dealloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@@ -8105,6 -8377,11 +8393,11 @@@ struct mlx5_ifc_create_flow_group_out_b
u8 reserved_at_60[0x20];
};
+ enum {
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1,
+ };
+
enum {
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
@@@ -8126,7 -8403,9 +8419,9 @@@ struct mlx5_ifc_create_flow_group_in_bi
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x4];
+ u8 group_type[0x4];
+ u8 reserved_at_90[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@@ -8141,7 -8420,10 +8436,10 @@@
u8 end_flow_index[0x20];
- u8 reserved_at_140[0xa0];
+ u8 reserved_at_140[0x10];
+ u8 match_definer_id[0x10];
+
+ u8 reserved_at_160[0x80];
u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
@@@ -8432,7 -8714,7 +8730,7 @@@ struct mlx5_ifc_alloc_uar_out_bits
struct mlx5_ifc_alloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@@ -10076,6 -10358,17 +10374,17 @@@ struct mlx5_ifc_pddr_reg_bits
union mlx5_ifc_pddr_reg_page_data_auto_bits page_data;
};
+ struct mlx5_ifc_mrtc_reg_bits {
+ u8 time_synced[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0x20];
+
+ u8 time_h[0x20];
+
+ u8 time_l[0x20];
+ };
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@@ -10137,6 -10430,7 +10446,7 @@@
struct mlx5_ifc_mirc_reg_bits mirc_reg;
struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
+ struct mlx5_ifc_mrtc_reg_bits mrtc_reg;
u8 reserved_at_0[0x60e0];
};
@@@ -10414,9 -10708,16 +10724,16 @@@ struct mlx5_ifc_dcbx_param_bits
u8 reserved_at_a0[0x160];
};
+ enum {
+ MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT,
+ };
+
struct mlx5_ifc_lagc_bits {
u8 fdb_selection_mode[0x1];
- u8 reserved_at_1[0x1c];
+ u8 reserved_at_1[0x14];
+ u8 port_select_mode[0x3];
+ u8 reserved_at_18[0x5];
u8 lag_state[0x3];
u8 reserved_at_20[0x14];
@@@ -10630,29 -10931,6 +10947,6 @@@ struct mlx5_ifc_dealloc_memic_out_bits
u8 reserved_at_40[0x40];
};
- struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
-
- u8 vhca_tunnel_id[0x10];
- u8 obj_type[0x10];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
- };
-
- struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
- };
-
struct mlx5_ifc_umem_bits {
u8 reserved_at_0[0x80];
diff --combined include/net/cfg80211.h
index 27336fc70467,7c9d5db4f0e6..423f97b982ff
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@@ -739,6 -739,22 +739,22 @@@ struct cfg80211_tid_config
struct cfg80211_tid_cfg tid_conf[];
};
+ /**
+ * struct cfg80211_fils_aad - FILS AAD data
+ * @macaddr: STA MAC address
+ * @kek: FILS KEK
+ * @kek_len: FILS KEK length
+ * @snonce: STA Nonce
+ * @anonce: AP Nonce
+ */
+ struct cfg80211_fils_aad {
+ const u8 *macaddr;
+ const u8 *kek;
+ u8 kek_len;
+ const u8 *snonce;
+ const u8 *anonce;
+ };
+
/**
* cfg80211_get_chandef_type - return old channel type from chandef
* @chandef: the channel definition
@@@ -1040,6 -1056,36 +1056,36 @@@ struct cfg80211_crypto_settings
enum nl80211_sae_pwe_mechanism sae_pwe;
};
+ /**
+ * struct cfg80211_mbssid_config - AP settings for multi bssid
+ *
+ * @tx_wdev: pointer to the transmitted interface in the MBSSID set
+ * @index: index of this AP in the multi bssid group.
+ * @ema: set to true if the beacons should be sent out in EMA mode.
+ */
+ struct cfg80211_mbssid_config {
+ struct wireless_dev *tx_wdev;
+ u8 index;
+ bool ema;
+ };
+
+ /**
+ * struct cfg80211_mbssid_elems - Multiple BSSID elements
+ *
+ * @cnt: Number of elements in array %elems.
+ *
+ * @elem: Array of multiple BSSID element(s) to be added into Beacon frames.
+ * @elem.data: Data for multiple BSSID elements.
+ * @elem.len: Length of data.
+ */
+ struct cfg80211_mbssid_elems {
+ u8 cnt;
+ struct {
+ const u8 *data;
+ size_t len;
+ } elem[];
+ };
+
/**
* struct cfg80211_beacon_data - beacon data
* @head: head portion of beacon (before TIM IE)
@@@ -1058,6 -1104,7 +1104,7 @@@
* @assocresp_ies_len: length of assocresp_ies in octets
* @probe_resp_len: length of probe response template (@probe_resp)
* @probe_resp: probe response template (AP mode only)
+ * @mbssid_ies: multiple BSSID elements
* @ftm_responder: enable FTM responder functionality; -1 for no change
* (which also implies no change in LCI/civic location data)
* @lci: Measurement Report element content, starting with Measurement Token
@@@ -1075,6 -1122,7 +1122,7 @@@ struct cfg80211_beacon_data
const u8 *probe_resp;
const u8 *lci;
const u8 *civicloc;
+ struct cfg80211_mbssid_elems *mbssid_ies;
s8 ftm_responder;
size_t head_len, tail_len;
@@@ -1189,6 -1237,7 +1237,7 @@@ enum cfg80211_ap_settings_flags
* @he_oper: HE operation IE (or %NULL if HE isn't enabled)
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ * @mbssid_config: AP settings for multiple bssid
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@@ -1221,6 -1270,7 +1270,7 @@@
struct cfg80211_he_bss_color he_bss_color;
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+ struct cfg80211_mbssid_config mbssid_config;
};
/**
@@@ -4018,6 -4068,10 +4068,10 @@@ struct mgmt_frame_regs
* @set_sar_specs: Update the SAR (TX power) settings.
*
* @color_change: Initiate a color change.
+ *
+ * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use
+ * those to decrypt (Re)Association Request and encrypt (Re)Association
+ * Response frame.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@@ -4348,6 -4402,8 +4402,8 @@@
int (*color_change)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_color_change_settings *params);
+ int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_fils_aad *fils_aad);
};
/*
@@@ -4981,6 -5037,13 +5037,13 @@@ struct wiphy_iftype_akm_suites
* %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
* @sar_capa: SAR control capabilities
* @rfkill: a pointer to the rfkill structure
+ *
+ * @mbssid_max_interfaces: maximum number of interfaces supported by the driver
+ * in a multiple BSSID set. This field must be set to a non-zero value
+ * by the driver to advertise MBSSID support.
+ * @ema_max_profile_periodicity: maximum profile periodicity supported by
+ * the driver. Setting this field to a non-zero value indicates that the
+ * driver supports enhanced multi-BSSID advertisements (EMA AP).
*/
struct wiphy {
struct mutex mtx;
@@@ -5125,6 -5188,9 +5188,9 @@@
struct rfkill *rfkill;
+ u8 mbssid_max_interfaces;
+ u8 ema_max_profile_periodicity;
+
char priv[] __aligned(NETDEV_ALIGN);
};
@@@ -5376,6 -5442,7 +5442,6 @@@ static inline void wiphy_unlock(struct
* netdev and may otherwise be used by driver read-only, will be update
* by cfg80211 on change_interface
* @mgmt_registrations: list of registrations for management frames
- * @mgmt_registrations_lock: lock for the list
* @mgmt_registrations_need_update: mgmt registrations were updated,
* need to propagate the update to the driver
* @mtx: mutex used to lock data in this struct, may be used by drivers
@@@ -5422,6 -5489,7 +5488,6 @@@ struct wireless_dev
u32 identifier;
struct list_head mgmt_registrations;
- spinlock_t mgmt_registrations_lock;
u8 mgmt_registrations_need_update:1;
struct mutex mtx;
@@@ -5490,7 -5558,7 +5556,7 @@@
unsigned long unprot_beacon_reported;
};
- static inline u8 *wdev_address(struct wireless_dev *wdev)
+ static inline const u8 *wdev_address(struct wireless_dev *wdev)
{
if (wdev->netdev)
return wdev->netdev->dev_addr;
@@@ -6308,6 -6376,17 +6374,17 @@@ static inline void cfg80211_gen_new_bss
u64_to_ether_addr(new_bssid_u64, new_bssid);
}
+ /**
+ * cfg80211_get_ies_channel_number - returns the channel number from ies
+ * @ie: IEs
+ * @ielen: length of IEs
+ * @band: enum nl80211_band of the channel
+ *
+ * Returns the channel number, or -1 if none could be determined.
+ */
+ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ enum nl80211_band band);
+
/**
* cfg80211_is_element_inherited - returns if element ID should be inherited
* @element: element to check
diff --combined include/net/sock.h
index 463f390d90b3,ff4e62aa62e5..db95ceaf4d4c
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@@ -259,10 -259,11 +259,11 @@@ struct bpf_local_storage
* @sk_rcvbuf: size of receive buffer in bytes
* @sk_wq: sock wait queue and async head
* @sk_rx_dst: receive input route used by early demux
+ * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst
+ * @sk_rx_dst_cookie: cookie for @sk_rx_dst
* @sk_dst_cache: destination cache
* @sk_dst_pending_confirm: need to confirm neighbour
* @sk_policy: flow policy
- * @sk_rx_skb_cache: cache copy of recently accessed RX skb
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
@@@ -270,6 -271,7 +271,7 @@@
* @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward
+ * @sk_reserved_mem: space reserved and non-reclaimable for the socket
* @sk_napi_id: id of the last napi context to receive data for sk
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
@@@ -329,7 -331,6 +331,6 @@@
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
* @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
- * @sk_tx_skb_cache: cache copy of recently accessed TX skb
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_cgrp_data: cgroup data for this cgroup
@@@ -394,7 -395,6 +395,6 @@@ struct sock
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
- struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
/*
* The backlog queue is special, it is always used with
@@@ -413,6 -413,7 +413,7 @@@
#define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc;
+ u32 sk_reserved_mem;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_ll_usec;
/* ===== mostly read cache line ===== */
@@@ -431,6 -432,9 +432,9 @@@
struct xfrm_policy __rcu *sk_policy[2];
#endif
struct dst_entry *sk_rx_dst;
+ int sk_rx_dst_ifindex;
+ u32 sk_rx_dst_cookie;
+
struct dst_entry __rcu *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;
@@@ -443,7 -447,6 +447,6 @@@
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
};
- struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
@@@ -1208,7 -1211,7 +1211,7 @@@ struct proto
#endif
bool (*stream_memory_free)(const struct sock *sk, int wake);
- bool (*stream_memory_read)(const struct sock *sk);
+ bool (*sock_is_readable)(struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
void (*leave_memory_pressure)(struct sock *sk);
@@@ -1237,7 -1240,7 +1240,7 @@@
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
- struct percpu_counter *orphan_count;
+ unsigned int __percpu *orphan_count;
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
@@@ -1518,20 -1521,49 +1521,49 @@@ sk_rmem_schedule(struct sock *sk, struc
skb_pfmemalloc(skb);
}
+ static inline int sk_unused_reserved_mem(const struct sock *sk)
+ {
+ int unused_mem;
+
+ if (likely(!sk->sk_reserved_mem))
+ return 0;
+
+ unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
+ atomic_read(&sk->sk_rmem_alloc);
+
+ return unused_mem > 0 ? unused_mem : 0;
+ }
+
static inline void sk_mem_reclaim(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable >= SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk, reclaimable);
+ }
+
+ static inline void sk_mem_reclaim_final(struct sock *sk)
+ {
+ sk->sk_reserved_mem = 0;
+ sk_mem_reclaim(sk);
}
static inline void sk_mem_reclaim_partial(struct sock *sk)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
- if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
+
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
+
+ if (reclaimable > SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk, reclaimable - 1);
}
static inline void sk_mem_charge(struct sock *sk, int size)
@@@ -1543,9 -1575,12 +1575,12 @@@
static inline void sk_mem_uncharge(struct sock *sk, int size)
{
+ int reclaimable;
+
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
+ reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
/* Avoid a possible overflow.
* TCP send queues can make this happen, if sk_mem_reclaim()
@@@ -1554,22 -1589,14 +1589,14 @@@
* If we reach 2 MBytes, reclaim 1 MBytes right now, there is
* no need to hold that much forward allocation anyway.
*/
- if (unlikely(sk->sk_forward_alloc >= 1 << 21))
+ if (unlikely(reclaimable >= 1 << 21))
__sk_mem_reclaim(sk, 1 << 20);
}
- DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
sk_wmem_queued_add(sk, -skb->truesize);
sk_mem_uncharge(sk, skb->truesize);
- if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
- !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
- skb_ext_reset(skb);
- skb_zcopy_clear(skb, true);
- sk->sk_tx_skb_cache = skb;
- return;
- }
__kfree_skb(skb);
}
@@@ -1889,10 -1916,8 +1916,8 @@@ static inline void sk_rx_queue_set(stru
if (skb_rx_queue_recorded(skb)) {
u16 rx_queue = skb_get_rx_queue(skb);
- if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
- return;
-
- sk->sk_rx_queue_mapping = rx_queue;
+ if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+ WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
}
#endif
}
@@@ -1900,15 -1925,19 +1925,19 @@@
static inline void sk_rx_queue_clear(struct sock *sk)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
- sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
+ WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING);
#endif
}
static inline int sk_rx_queue_get(const struct sock *sk)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
- if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
- return sk->sk_rx_queue_mapping;
+ if (sk) {
+ int res = READ_ONCE(sk->sk_rx_queue_mapping);
+
+ if (res != NO_QUEUE_MAPPING)
+ return res;
+ }
#endif
return -1;
@@@ -2388,13 -2417,11 +2417,11 @@@ static inline void sk_stream_moderate_s
return;
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+ val = max_t(u32, val, sk_unused_reserved_mem(sk));
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
}
- struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
- bool force_schedule);
-
/**
* sk_page_frag - return an appropriate page_frag
* @sk: socket
@@@ -2608,7 -2635,6 +2635,6 @@@ static inline void skb_setup_tx_timesta
&skb_shinfo(skb)->tskey);
}
- DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
@@@ -2620,12 -2646,6 +2646,6 @@@
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
- if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
- !sk->sk_rx_skb_cache) {
- sk->sk_rx_skb_cache = skb;
- skb_orphan(skb);
- return;
- }
__kfree_skb(skb);
}
@@@ -2820,10 -2840,8 +2840,15 @@@ void sock_set_sndtimeo(struct sock *sk
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+ int sock_get_timeout(long timeo, void *optval, bool old_timeval);
+ int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
+ sockptr_t optval, int optlen, bool old_timeval);
+
+static inline bool sk_is_readable(struct sock *sk)
+{
+ if (sk->sk_prot->sock_is_readable)
+ return sk->sk_prot->sock_is_readable(sk);
+ return false;
+}
++
#endif /* _SOCK_H */
diff --combined include/net/tls.h
index 01d2e3744393,adab19a8aed7..07ff59e99cb3
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@@ -66,7 -66,7 +66,7 @@@
#define MAX_IV_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
- /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
+ /* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
*
* IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
*
@@@ -74,6 -74,7 +74,7 @@@
* Hence b0 contains (3 - 1) = 2.
*/
#define TLS_AES_CCM_IV_B0_BYTE 2
+ #define TLS_SM4_CCM_IV_B0_BYTE 2
#define __TLS_INC_STATS(net, field) \
__SNMP_INC_STATS((net)->mib.tls_statistics, field)
@@@ -220,6 -221,8 +221,8 @@@ union tls_crypto_context
struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
+ struct tls12_crypto_info_sm4_gcm sm4_gcm;
+ struct tls12_crypto_info_sm4_ccm sm4_ccm;
};
};
@@@ -375,7 -378,7 +378,7 @@@ void tls_sw_release_resources_rx(struc
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
-bool tls_sw_stream_read(const struct sock *sk);
+bool tls_sw_sock_is_readable(struct sock *sk);
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
diff --combined kernel/bpf/arraymap.c
index 447def540544,5e1ccfae916b..c7a5be3bf8be
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@@ -645,7 -645,7 +645,7 @@@ static const struct bpf_iter_seq_info i
.seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
};
- static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn,
+ static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
void *callback_ctx, u64 flags)
{
u32 i, key, num_elems = 0;
@@@ -668,9 -668,8 +668,8 @@@
val = array->value + array->elem_size * i;
num_elems++;
key = i;
- ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
- (u64)(long)&key, (u64)(long)val,
- (u64)(long)callback_ctx, 0);
+ ret = callback_fn((u64)(long)map, (u64)(long)&key,
+ (u64)(long)val, (u64)(long)callback_ctx, 0);
/* return value: 0 - continue, 1 - stop and return */
if (ret)
break;
@@@ -1072,7 -1071,6 +1071,7 @@@ static struct bpf_map *prog_array_map_a
INIT_WORK(&aux->work, prog_array_map_clear_deferred);
INIT_LIST_HEAD(&aux->poke_progs);
mutex_init(&aux->poke_mutex);
+ spin_lock_init(&aux->owner.lock);
map = array_map_alloc(attr);
if (IS_ERR(map)) {
diff --combined kernel/bpf/core.c
index 6e3ae90ad107,ea8a468dbded..ded9163185d1
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@@ -524,7 -524,6 +524,7 @@@ int bpf_jit_enable __read_mostly = IS
int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
int bpf_jit_harden __read_mostly;
long bpf_jit_limit __read_mostly;
+long bpf_jit_limit_max __read_mostly;
static void
bpf_prog_ksym_set_addr(struct bpf_prog *prog)
@@@ -818,8 -817,7 +818,8 @@@ u64 __weak bpf_jit_alloc_exec_limit(voi
static int __init bpf_jit_charge_init(void)
{
/* Only used as heuristic here to derive limit. */
- bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
+ bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
+ bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
PAGE_SIZE), LONG_MAX);
return 0;
}
@@@ -1823,26 -1821,20 +1823,26 @@@ static unsigned int __bpf_prog_ret0_war
bool bpf_prog_array_compatible(struct bpf_array *array,
const struct bpf_prog *fp)
{
+ bool ret;
+
if (fp->kprobe_override)
return false;
- if (!array->aux->type) {
+ spin_lock(&array->aux->owner.lock);
+
+ if (!array->aux->owner.type) {
/* There's no owner yet where we could check for
* compatibility.
*/
- array->aux->type = fp->type;
- array->aux->jited = fp->jited;
- return true;
+ array->aux->owner.type = fp->type;
+ array->aux->owner.jited = fp->jited;
+ ret = true;
+ } else {
+ ret = array->aux->owner.type == fp->type &&
+ array->aux->owner.jited == fp->jited;
}
-
- return array->aux->type == fp->type &&
- array->aux->jited == fp->jited;
+ spin_unlock(&array->aux->owner.lock);
+ return ret;
}
static int bpf_check_tail_call(const struct bpf_prog *fp)
@@@ -2365,6 -2357,11 +2365,11 @@@ const struct bpf_func_proto * __weak bp
return NULL;
}
+ const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
+ {
+ return NULL;
+ }
+
u64 __weak
bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
diff --combined net/batman-adv/bridge_loop_avoidance.c
index 17687848daec,7242b32fff80..2ed9496fc41f
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@@ -254,7 -254,7 +254,7 @@@ batadv_claim_hash_find(struct batadv_pr
* Return: backbone gateway if found or NULL otherwise
*/
static struct batadv_bla_backbone_gw *
- batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
+ batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
@@@ -336,7 -336,7 +336,7 @@@ batadv_bla_del_backbone_claims(struct b
* @vid: the VLAN ID
* @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
*/
- static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
+ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac,
unsigned short vid, int claimtype)
{
struct sk_buff *skb;
@@@ -488,7 -488,7 +488,7 @@@ static void batadv_bla_loopdetect_repor
* Return: the (possibly created) backbone gateway or NULL on error
*/
static struct batadv_bla_backbone_gw *
- batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
+ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig,
unsigned short vid, bool own_backbone)
{
struct batadv_bla_backbone_gw *entry;
@@@ -926,7 -926,7 +926,7 @@@ static bool batadv_handle_request(struc
*/
static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- u8 *backbone_addr, u8 *claim_addr,
+ const u8 *backbone_addr, const u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
@@@ -964,7 -964,7 +964,7 @@@
*/
static bool batadv_handle_claim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- u8 *backbone_addr, u8 *claim_addr,
+ const u8 *backbone_addr, const u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
@@@ -1560,14 -1560,10 +1560,14 @@@ int batadv_bla_init(struct batadv_priv
return 0;
bat_priv->bla.claim_hash = batadv_hash_new(128);
- bat_priv->bla.backbone_hash = batadv_hash_new(32);
+ if (!bat_priv->bla.claim_hash)
+ return -ENOMEM;
- if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
+ bat_priv->bla.backbone_hash = batadv_hash_new(32);
+ if (!bat_priv->bla.backbone_hash) {
+ batadv_hash_destroy(bat_priv->bla.claim_hash);
return -ENOMEM;
+ }
batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
&batadv_claim_hash_lock_class_key);
@@@ -2130,7 -2126,7 +2130,7 @@@ batadv_bla_claim_dump_entry(struct sk_b
struct batadv_hard_iface *primary_if,
struct batadv_bla_claim *claim)
{
- u8 *primary_addr = primary_if->net_dev->dev_addr;
+ const u8 *primary_addr = primary_if->net_dev->dev_addr;
u16 backbone_crc;
bool is_own;
void *hdr;
@@@ -2298,7 -2294,7 +2298,7 @@@ batadv_bla_backbone_dump_entry(struct s
struct batadv_hard_iface *primary_if,
struct batadv_bla_backbone_gw *backbone_gw)
{
- u8 *primary_addr = primary_if->net_dev->dev_addr;
+ const u8 *primary_addr = primary_if->net_dev->dev_addr;
u16 backbone_crc;
bool is_own;
int msecs;
diff --combined net/core/dev.c
index eb3a366bf212,4e3d19a06de4..a58b02e22d69
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -140,7 -140,7 +140,7 @@@
#include <linux/if_macvlan.h>
#include <linux/errqueue.h>
#include <linux/hrtimer.h>
- #include <linux/netfilter_ingress.h>
+ #include <linux/netfilter_netdev.h>
#include <linux/crash_dump.h>
#include <linux/sctp.h>
#include <net/udp_tunnel.h>
@@@ -303,6 -303,12 +303,12 @@@ static struct netdev_name_node *netdev_
return NULL;
}
+ bool netdev_name_in_use(struct net *net, const char *name)
+ {
+ return netdev_name_node_lookup(net, name);
+ }
+ EXPORT_SYMBOL(netdev_name_in_use);
+
int netdev_name_node_alt_create(struct net_device *dev, const char *name)
{
struct netdev_name_node *name_node;
@@@ -1133,7 -1139,7 +1139,7 @@@ static int __dev_alloc_name(struct net
}
snprintf(buf, IFNAMSIZ, name, i);
- if (!__dev_get_by_name(net, buf))
+ if (!netdev_name_in_use(net, buf))
return i;
/* It is possible to run out of possible slots
@@@ -1187,7 -1193,7 +1193,7 @@@ static int dev_get_valid_name(struct ne
if (strchr(name, '%'))
return dev_alloc_name_ns(net, dev, name);
- else if (__dev_get_by_name(net, name))
+ else if (netdev_name_in_use(net, name))
return -EEXIST;
else if (dev->name != name)
strlcpy(dev->name, name, IFNAMSIZ);
@@@ -1290,8 -1296,8 +1296,8 @@@ rollback
old_assign_type = NET_NAME_RENAMED;
goto rollback;
} else {
- pr_err("%s: name change rollback failed: %d\n",
- dev->name, ret);
+ netdev_err(dev, "name change rollback failed: %d\n",
+ ret);
}
}
@@@ -2345,7 -2351,7 +2351,7 @@@ static void netif_setup_tc(struct net_d
/* If TC0 is invalidated disable TC mapping */
if (tc->offset + tc->count > txq) {
- pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
+ netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
dev->num_tc = 0;
return;
}
@@@ -2356,8 -2362,8 +2362,8 @@@
tc = &dev->tc_to_txq[q];
if (tc->offset + tc->count > txq) {
- pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
- i, q);
+ netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
+ i, q);
netdev_set_prio_tc_map(dev, i, 0);
}
}
@@@ -2921,6 -2927,8 +2927,8 @@@ int netif_set_real_num_tx_queues(struc
if (dev->num_tc)
netif_setup_tc(dev, txq);
+ dev_qdisc_change_real_num_tx(dev, txq);
+
dev->real_num_tx_queues = txq;
if (disabling) {
@@@ -3163,12 -3171,6 +3171,12 @@@ static u16 skb_tx_hash(const struct net
qoffset = sb_dev->tc_to_txq[tc].offset;
qcount = sb_dev->tc_to_txq[tc].count;
+ if (unlikely(!qcount)) {
+ net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
+ sb_dev->name, qoffset, tc);
+ qoffset = 0;
+ qcount = dev->real_num_tx_queues;
+ }
}
if (skb_rx_queue_recorded(skb)) {
@@@ -3414,7 -3416,7 +3422,7 @@@ EXPORT_SYMBOL(__skb_gso_segment)
#ifdef CONFIG_BUG
static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{
- pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
+ netdev_err(dev, "hw csum failure\n");
skb_dump(KERN_ERR, skb, true);
dump_stack();
}
@@@ -3912,8 -3914,7 +3920,8 @@@ int dev_loopback_xmit(struct net *net,
skb_reset_mac_header(skb);
__skb_pull(skb, skb_network_offset(skb));
skb->pkt_type = PACKET_LOOPBACK;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (skb->ip_summed == CHECKSUM_NONE)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
WARN_ON(!skb_dst(skb));
skb_dst_force(skb);
netif_rx_ni(skb);
@@@ -3925,6 -3926,7 +3933,7 @@@ EXPORT_SYMBOL(dev_loopback_xmit)
static struct sk_buff *
sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{
+ #ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
struct tcf_result cl_res;
@@@ -3960,6 -3962,7 +3969,7 @@@
default:
break;
}
+ #endif /* CONFIG_NET_CLS_ACT */
return skb;
}
@@@ -4153,13 -4156,20 +4163,20 @@@ static int __dev_queue_xmit(struct sk_b
qdisc_pkt_len_init(skb);
#ifdef CONFIG_NET_CLS_ACT
skb->tc_at_ingress = 0;
- # ifdef CONFIG_NET_EGRESS
+ #endif
+ #ifdef CONFIG_NET_EGRESS
if (static_branch_unlikely(&egress_needed_key)) {
+ if (nf_hook_egress_active()) {
+ skb = nf_hook_egress(skb, &rc, dev);
+ if (!skb)
+ goto out;
+ }
+ nf_skip_egress(skb, true);
skb = sch_handle_egress(skb, &rc, dev);
if (!skb)
goto out;
+ nf_skip_egress(skb, false);
}
- # endif
#endif
/* If device/qdisc don't need skb->dst, release it right now while
* its hot in this cpu cache.
@@@ -5301,6 -5311,7 +5318,7 @@@ skip_taps
if (static_branch_unlikely(&ingress_needed_key)) {
bool another = false;
+ nf_skip_egress(skb, true);
skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
&another);
if (another)
@@@ -5308,6 -5319,7 +5326,7 @@@
if (!skb)
goto out;
+ nf_skip_egress(skb, false);
if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
goto out;
}
@@@ -5844,7 -5856,7 +5863,7 @@@ static void gro_normal_one(struct napi_
gro_normal_list(napi);
}
- static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
+ static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
struct packet_offload *ptype;
__be16 type = skb->protocol;
@@@ -5873,12 -5885,11 +5892,11 @@@
if (err) {
WARN_ON(&ptype->list == head);
kfree_skb(skb);
- return NET_RX_SUCCESS;
+ return;
}
out:
gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
- return NET_RX_SUCCESS;
}
static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
@@@ -6905,19 -6916,25 +6923,25 @@@ EXPORT_SYMBOL(netif_napi_add)
void napi_disable(struct napi_struct *n)
{
+ unsigned long val, new;
+
might_sleep();
set_bit(NAPI_STATE_DISABLE, &n->state);
- while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
- msleep(1);
- while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
- msleep(1);
+ do {
+ val = READ_ONCE(n->state);
+ if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
+ usleep_range(20, 200);
+ continue;
+ }
+
+ new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
+ new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
+ } while (cmpxchg(&n->state, val, new) != val);
hrtimer_cancel(&n->timer);
- clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
clear_bit(NAPI_STATE_DISABLE, &n->state);
- clear_bit(NAPI_STATE_THREADED, &n->state);
}
EXPORT_SYMBOL(napi_disable);
@@@ -6995,8 -7012,8 +7019,8 @@@ static int __napi_poll(struct napi_stru
}
if (unlikely(work > weight))
- pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
- n->poll, work, weight);
+ netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
+ n->poll, work, weight);
if (likely(work < weight))
return work;
@@@ -8550,8 -8567,7 +8574,7 @@@ static int __dev_set_promiscuity(struc
dev->flags &= ~IFF_PROMISC;
else {
dev->promiscuity -= inc;
- pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
- dev->name);
+ netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
return -EOVERFLOW;
}
}
@@@ -8621,8 -8637,7 +8644,7 @@@ static int __dev_set_allmulti(struct ne
dev->flags &= ~IFF_ALLMULTI;
else {
dev->allmulti -= inc;
- pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
- dev->name);
+ netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
return -EOVERFLOW;
}
}
@@@ -9159,14 -9174,11 +9181,11 @@@ int dev_get_port_parent_id(struct net_d
}
err = devlink_compat_switch_id_get(dev, ppid);
- if (!err || err != -EOPNOTSUPP)
+ if (!recurse || err != -EOPNOTSUPP)
return err;
- if (!recurse)
- return -EOPNOTSUPP;
-
netdev_for_each_lower_dev(dev, lower_dev, iter) {
- err = dev_get_port_parent_id(lower_dev, ppid, recurse);
+ err = dev_get_port_parent_id(lower_dev, ppid, true);
if (err)
break;
if (!first.id_len)
@@@ -10867,7 -10879,7 +10886,7 @@@ struct net_device *alloc_netdev_mqs(in
if (!dev->ethtool_ops)
dev->ethtool_ops = &default_ethtool_ops;
- nf_hook_ingress_init(dev);
+ nf_hook_netdev_init(dev);
return dev;
@@@ -11153,7 -11165,7 +11172,7 @@@ int __dev_change_net_namespace(struct n
* we can use it in the destination network namespace.
*/
err = -EEXIST;
- if (__dev_get_by_name(net, dev->name)) {
+ if (netdev_name_in_use(net, dev->name)) {
/* We get here if we can't use the current device name */
if (!pat)
goto out;
@@@ -11506,7 -11518,7 +11525,7 @@@ static void __net_exit default_device_e
/* Push remaining network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
- if (__dev_get_by_name(&init_net, fb_name))
+ if (netdev_name_in_use(&init_net, fb_name))
snprintf(fb_name, IFNAMSIZ, "dev%%d");
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
diff --combined net/core/net-sysfs.c
index b2e49eb7001d,d6e4e0b43beb..9c01c642cf9e
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@@ -175,6 -175,14 +175,14 @@@ static int change_carrier(struct net_de
static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
+ struct net_device *netdev = to_net_dev(dev);
+
+ /* The check is also done in change_carrier; this helps returning early
+ * without hitting the trylock/restart in netdev_store.
+ */
+ if (!netdev->netdev_ops->ndo_change_carrier)
+ return -EOPNOTSUPP;
+
return netdev_store(dev, attr, buf, len, change_carrier);
}
@@@ -196,6 -204,12 +204,12 @@@ static ssize_t speed_show(struct devic
struct net_device *netdev = to_net_dev(dev);
int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps
+ * returning early without hitting the trylock/restart below.
+ */
+ if (!netdev->ethtool_ops->get_link_ksettings)
+ return ret;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -216,6 -230,12 +230,12 @@@ static ssize_t duplex_show(struct devic
struct net_device *netdev = to_net_dev(dev);
int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps
+ * returning early without hitting the trylock/restart below.
+ */
+ if (!netdev->ethtool_ops->get_link_ksettings)
+ return ret;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -468,6 -488,14 +488,14 @@@ static ssize_t proto_down_store(struct
struct device_attribute *attr,
const char *buf, size_t len)
{
+ struct net_device *netdev = to_net_dev(dev);
+
+ /* The check is also done in change_proto_down; this helps returning
+ * early without hitting the trylock/restart in netdev_store.
+ */
+ if (!netdev->netdev_ops->ndo_change_proto_down)
+ return -EOPNOTSUPP;
+
return netdev_store(dev, attr, buf, len, change_proto_down);
}
NETDEVICE_SHOW_RW(proto_down, fmt_dec);
@@@ -478,6 -506,12 +506,12 @@@ static ssize_t phys_port_id_show(struc
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
+ /* The check is also done in dev_get_phys_port_id; this helps returning
+ * early without hitting the trylock/restart below.
+ */
+ if (!netdev->netdev_ops->ndo_get_phys_port_id)
+ return -EOPNOTSUPP;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -500,6 -534,13 +534,13 @@@ static ssize_t phys_port_name_show(stru
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps
+ * returning early without hitting the trylock/restart below.
+ */
+ if (!netdev->netdev_ops->ndo_get_phys_port_name &&
+ !netdev->netdev_ops->ndo_get_devlink_port)
+ return -EOPNOTSUPP;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -522,6 -563,14 +563,14 @@@ static ssize_t phys_switch_id_show(stru
struct net_device *netdev = to_net_dev(dev);
ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps
+ * returning early without hitting the trylock/restart below. This works
+ * because recurse is false when calling dev_get_port_parent_id.
+ */
+ if (!netdev->netdev_ops->ndo_get_port_parent_id &&
+ !netdev->netdev_ops->ndo_get_devlink_port)
+ return -EOPNOTSUPP;
+
if (!rtnl_trylock())
return restart_syscall();
@@@ -1226,6 -1275,12 +1275,12 @@@ static ssize_t tx_maxrate_store(struct
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ /* The check is also done later; this helps returning early without
+ * hitting the trylock/restart below.
+ */
+ if (!dev->netdev_ops->ndo_set_tx_maxrate)
+ return -EOPNOTSUPP;
+
err = kstrtou32(buf, 10, &rate);
if (err < 0)
return err;
@@@ -1869,7 -1924,7 +1924,7 @@@ static struct class net_class __ro_afte
.get_ownership = net_get_ownership,
};
- #ifdef CONFIG_OF_NET
+ #ifdef CONFIG_OF
static int of_dev_node_match(struct device *dev, const void *data)
{
for (; dev; dev = dev->parent) {
@@@ -1973,9 -2028,9 +2028,9 @@@ int netdev_register_kobject(struct net_
int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
const struct net *net_new)
{
+ kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID;
+ kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID;
struct device *dev = &ndev->dev;
- kuid_t old_uid, new_uid;
- kgid_t old_gid, new_gid;
int error;
net_ns_get_ownership(net_old, &old_uid, &old_gid);
diff --combined net/core/skbuff.c
index fe9358437380,74601bbc56ac..09b8cf8ab234
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@@ -80,7 -80,6 +80,7 @@@
#include <linux/indirect_call_wrapper.h>
#include "datagram.h"
+#include "sock_destructor.h"
struct kmem_cache *skbuff_head_cache __ro_after_init;
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
@@@ -135,34 -134,31 +135,31 @@@ struct napi_alloc_cache
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
- static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask,
- unsigned int align_mask)
+ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask);
- }
-
- void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
- {
fragsz = SKB_DATA_ALIGN(fragsz);
- return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
+ return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
}
EXPORT_SYMBOL(__napi_alloc_frag_align);
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
- struct page_frag_cache *nc;
void *data;
fragsz = SKB_DATA_ALIGN(fragsz);
if (in_hardirq() || irqs_disabled()) {
- nc = this_cpu_ptr(&netdev_alloc_cache);
+ struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
+
data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
} else {
+ struct napi_alloc_cache *nc;
+
local_bh_disable();
- data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
+ nc = this_cpu_ptr(&napi_alloc_cache);
+ data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
local_bh_enable();
}
return data;
@@@ -398,8 -394,9 +395,9 @@@ struct sk_buff *__alloc_skb(unsigned in
{
struct kmem_cache *cache;
struct sk_buff *skb;
- u8 *data;
+ unsigned int osize;
bool pfmemalloc;
+ u8 *data;
cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_head_cache;
@@@ -431,7 -428,8 +429,8 @@@
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
- size = SKB_WITH_OVERHEAD(ksize(data));
+ osize = ksize(data);
+ size = SKB_WITH_OVERHEAD(osize);
prefetchw(data + size);
/*
@@@ -440,7 -438,7 +439,7 @@@
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
- __build_skb_around(skb, data, 0);
+ __build_skb_around(skb, data, osize);
skb->pfmemalloc = pfmemalloc;
if (flags & SKB_ALLOC_FCLONE) {
@@@ -1805,39 -1803,30 +1804,39 @@@ EXPORT_SYMBOL(skb_realloc_headroom)
struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
{
int delta = headroom - skb_headroom(skb);
+ int osize = skb_end_offset(skb);
+ struct sock *sk = skb->sk;
if (WARN_ONCE(delta <= 0,
"%s is expecting an increase in the headroom", __func__))
return skb;
- /* pskb_expand_head() might crash, if skb is shared */
- if (skb_shared(skb)) {
+ delta = SKB_DATA_ALIGN(delta);
+ /* pskb_expand_head() might crash, if skb is shared. */
+ if (skb_shared(skb) || !is_skb_wmem(skb)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
- if (likely(nskb)) {
- if (skb->sk)
- skb_set_owner_w(nskb, skb->sk);
- consume_skb(skb);
- } else {
- kfree_skb(skb);
- }
+ if (unlikely(!nskb))
+ goto fail;
+
+ if (sk)
+ skb_set_owner_w(nskb, sk);
+ consume_skb(skb);
skb = nskb;
}
- if (skb &&
- pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
- kfree_skb(skb);
- skb = NULL;
+ if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC))
+ goto fail;
+
+ if (sk && is_skb_wmem(skb)) {
+ delta = skb_end_offset(skb) - osize;
+ refcount_add(delta, &sk->sk_wmem_alloc);
+ skb->truesize += delta;
}
return skb;
+
+fail:
+ kfree_skb(skb);
+ return NULL;
}
EXPORT_SYMBOL(skb_expand_head);
diff --combined net/ipv4/tcp.c
index f5c336f8b0c8,d0b848ff5c0f..c262cb3e4f35
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@@ -287,8 -287,8 +287,8 @@@ enum
TCP_CMSG_TS = 2
};
- struct percpu_counter tcp_orphan_count;
- EXPORT_SYMBOL_GPL(tcp_orphan_count);
+ DEFINE_PER_CPU(unsigned int, tcp_orphan_count);
+ EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
long sysctl_tcp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_mem);
@@@ -325,11 -325,6 +325,6 @@@ struct tcp_splice_state
unsigned long tcp_memory_pressure __read_mostly;
EXPORT_SYMBOL_GPL(tcp_memory_pressure);
- DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
- EXPORT_SYMBOL(tcp_rx_skb_cache_key);
-
- DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
-
void tcp_enter_memory_pressure(struct sock *sk)
{
unsigned long val;
@@@ -486,7 -481,10 +481,7 @@@ static bool tcp_stream_is_readable(stru
{
if (tcp_epollin_ready(sk, target))
return true;
-
- if (sk->sk_prot->stream_memory_read)
- return sk->sk_prot->stream_memory_read(sk);
- return false;
+ return sk_is_readable(sk);
}
/*
@@@ -644,7 -642,7 +639,7 @@@ int tcp_ioctl(struct sock *sk, int cmd
}
EXPORT_SYMBOL(tcp_ioctl);
- static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
+ void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
tp->pushed_seq = tp->write_seq;
@@@ -655,7 -653,7 +650,7 @@@ static inline bool forced_push(const st
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
- static void skb_entail(struct sock *sk, struct sk_buff *skb)
+ void tcp_skb_entail(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
@@@ -858,30 -856,15 +853,15 @@@ ssize_t tcp_splice_read(struct socket *
}
EXPORT_SYMBOL(tcp_splice_read);
- struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
- bool force_schedule)
+ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ bool force_schedule)
{
struct sk_buff *skb;
- if (likely(!size)) {
- skb = sk->sk_tx_skb_cache;
- if (skb) {
- skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
- sk->sk_tx_skb_cache = NULL;
- pskb_trim(skb, 0);
- INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
- skb_shinfo(skb)->tx_flags = 0;
- memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb));
- return skb;
- }
- }
- /* The TCP header must be at least 32-bit aligned. */
- size = ALIGN(size, 4);
-
if (unlikely(tcp_under_memory_pressure(sk)))
sk_mem_reclaim_partial(sk);
- skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+ skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp);
if (likely(skb)) {
bool mem_scheduled;
@@@ -892,7 -875,7 +872,7 @@@
mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
}
if (likely(mem_scheduled)) {
- skb_reserve(skb, sk->sk_prot->max_header);
+ skb_reserve(skb, MAX_TCP_HEADER);
/*
* Make sure that we have exactly size bytes
* available to the caller, no more, no less.
@@@ -952,7 -935,7 +932,7 @@@ int tcp_send_mss(struct sock *sk, int *
*/
void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
{
- if (skb && !skb->len) {
+ if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
tcp_unlink_write_queue(skb, sk);
if (tcp_write_queue_empty(sk))
tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
@@@ -960,8 -943,8 +940,8 @@@
}
}
- struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
- struct page *page, int offset, size_t *size)
+ static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
+ struct page *page, int offset, size_t *size)
{
struct sk_buff *skb = tcp_write_queue_tail(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@@ -974,15 -957,15 +954,15 @@@ new_segment
if (!sk_stream_memory_free(sk))
return NULL;
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
- tcp_rtx_and_write_queues_empty(sk));
+ skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation,
+ tcp_rtx_and_write_queues_empty(sk));
if (!skb)
return NULL;
#ifdef CONFIG_TLS_DEVICE
skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
#endif
- skb_entail(sk, skb);
+ tcp_skb_entail(sk, skb);
copy = size_goal;
}
@@@ -1303,15 -1286,15 +1283,15 @@@ new_segment
goto restart;
}
first_skb = tcp_rtx_and_write_queues_empty(sk);
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
- first_skb);
+ skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation,
+ first_skb);
if (!skb)
goto wait_for_space;
process_backlog++;
skb->ip_summed = CHECKSUM_PARTIAL;
- skb_entail(sk, skb);
+ tcp_skb_entail(sk, skb);
copy = size_goal;
/* All packets are restored as if they have
@@@ -2687,11 -2670,36 +2667,36 @@@ void tcp_shutdown(struct sock *sk, int
}
EXPORT_SYMBOL(tcp_shutdown);
+ int tcp_orphan_count_sum(void)
+ {
+ int i, total = 0;
+
+ for_each_possible_cpu(i)
+ total += per_cpu(tcp_orphan_count, i);
+
+ return max(total, 0);
+ }
+
+ static int tcp_orphan_cache;
+ static struct timer_list tcp_orphan_timer;
+ #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100)
+
+ static void tcp_orphan_update(struct timer_list *unused)
+ {
+ WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum());
+ mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
+ }
+
+ static bool tcp_too_many_orphans(int shift)
+ {
+ return READ_ONCE(tcp_orphan_cache) << shift > sysctl_tcp_max_orphans;
+ }
+
bool tcp_check_oom(struct sock *sk, int shift)
{
bool too_many_orphans, out_of_socket_memory;
- too_many_orphans = tcp_too_many_orphans(sk, shift);
+ too_many_orphans = tcp_too_many_orphans(shift);
out_of_socket_memory = tcp_out_of_memory(sk);
if (too_many_orphans)
@@@ -2800,7 -2808,7 +2805,7 @@@ adjudge_to_death
/* remove backlog if any, without releasing ownership. */
__release_sock(sk);
- percpu_counter_inc(sk->sk_prot->orphan_count);
+ this_cpu_inc(tcp_orphan_count);
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
@@@ -2917,11 -2925,6 +2922,6 @@@ void tcp_write_queue_purge(struct sock
sk_wmem_free_skb(sk, skb);
}
tcp_rtx_queue_purge(sk);
- skb = sk->sk_tx_skb_cache;
- if (skb) {
- __kfree_skb(skb);
- sk->sk_tx_skb_cache = NULL;
- }
INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk));
@@@ -2958,10 -2961,6 +2958,6 @@@ int tcp_disconnect(struct sock *sk, in
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
- if (sk->sk_rx_skb_cache) {
- __kfree_skb(sk->sk_rx_skb_cache);
- sk->sk_rx_skb_cache = NULL;
- }
WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
tp->urg_data = 0;
tcp_write_queue_purge(sk);
@@@ -4502,7 -4501,10 +4498,10 @@@ void __init tcp_init(void
sizeof_field(struct sk_buff, cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
- percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
+
+ timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE);
+ mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
+
inet_hashinfo_init(&tcp_hashinfo);
inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
thash_entries, 21, /* one slot per 2 MB*/
diff --combined net/mac80211/mesh.c
index 5dcfd53a4ab6,a4212a333d61..15ac08d111ea
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@@ -672,7 -672,7 +672,7 @@@ ieee80211_mesh_update_bss_params(struc
u8 *ie, u8 ie_len)
{
struct ieee80211_supported_band *sband;
- const u8 *cap;
+ const struct element *cap;
const struct ieee80211_he_operation *he_oper = NULL;
sband = ieee80211_get_sband(sdata);
@@@ -687,10 -687,9 +687,10 @@@
sdata->vif.bss_conf.he_support = true;
- cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
- if (cap && cap[1] >= ieee80211_he_oper_size(&cap[3]))
- he_oper = (void *)(cap + 3);
+ cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ie_len);
+ if (cap && cap->datalen >= 1 + sizeof(*he_oper) &&
+ cap->datalen >= 1 + ieee80211_he_oper_size(cap->data + 1))
+ he_oper = (void *)(cap->data + 1);
if (he_oper)
sdata->vif.bss_conf.he_oper.params =
@@@ -1247,7 -1246,7 +1247,7 @@@ ieee80211_mesh_rx_probe_req(struct ieee
struct sk_buff *presp;
struct beacon_data *bcn;
struct ieee80211_mgmt *hdr;
- struct ieee802_11_elems elems;
+ struct ieee802_11_elems *elems;
size_t baselen;
u8 *pos;
@@@ -1256,22 -1255,24 +1256,24 @@@
if (baselen > len)
return;
- ieee802_11_parse_elems(pos, len - baselen, false, &elems, mgmt->bssid,
- NULL);
-
- if (!elems.mesh_id)
+ elems = ieee802_11_parse_elems(pos, len - baselen, false, mgmt->bssid,
+ NULL);
+ if (!elems)
return;
+ if (!elems->mesh_id)
+ goto free;
+
/* 802.11-2012 10.1.4.3.2 */
if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) &&
!is_broadcast_ether_addr(mgmt->da)) ||
- elems.ssid_len != 0)
- return;
+ elems->ssid_len != 0)
+ goto free;
- if (elems.mesh_id_len != 0 &&
- (elems.mesh_id_len != ifmsh->mesh_id_len ||
- memcmp(elems.mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len)))
- return;
+ if (elems->mesh_id_len != 0 &&
+ (elems->mesh_id_len != ifmsh->mesh_id_len ||
+ memcmp(elems->mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len)))
+ goto free;
rcu_read_lock();
bcn = rcu_dereference(ifmsh->beacon);
@@@ -1295,6 -1296,8 +1297,8 @@@
ieee80211_tx_skb(sdata, presp);
out:
rcu_read_unlock();
+ free:
+ kfree(elems);
}
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
@@@ -1305,7 -1308,7 +1309,7 @@@
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee802_11_elems elems;
+ struct ieee802_11_elems *elems;
struct ieee80211_channel *channel;
size_t baselen;
int freq;
@@@ -1320,42 -1323,47 +1324,47 @@@
if (baselen > len)
return;
- ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
- false, &elems, mgmt->bssid, NULL);
+ elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable,
+ len - baselen,
+ false, mgmt->bssid, NULL);
+ if (!elems)
+ return;
/* ignore non-mesh or secure / unsecure mismatch */
- if ((!elems.mesh_id || !elems.mesh_config) ||
- (elems.rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) ||
- (!elems.rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE))
- return;
+ if ((!elems->mesh_id || !elems->mesh_config) ||
+ (elems->rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) ||
+ (!elems->rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE))
+ goto free;
- if (elems.ds_params)
- freq = ieee80211_channel_to_frequency(elems.ds_params[0], band);
+ if (elems->ds_params)
+ freq = ieee80211_channel_to_frequency(elems->ds_params[0], band);
else
freq = rx_status->freq;
channel = ieee80211_get_channel(local->hw.wiphy, freq);
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
- return;
+ goto free;
- if (mesh_matches_local(sdata, &elems)) {
+ if (mesh_matches_local(sdata, elems)) {
mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n",
sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal);
if (!sdata->u.mesh.user_mpm ||
sdata->u.mesh.mshcfg.rssi_threshold == 0 ||
sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal)
- mesh_neighbour_update(sdata, mgmt->sa, &elems,
+ mesh_neighbour_update(sdata, mgmt->sa, elems,
rx_status);
if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
!sdata->vif.csa_active)
- ieee80211_mesh_process_chnswitch(sdata, &elems, true);
+ ieee80211_mesh_process_chnswitch(sdata, elems, true);
}
if (ifmsh->sync_ops)
- ifmsh->sync_ops->rx_bcn_presp(sdata,
- stype, mgmt, &elems, rx_status);
+ ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, len,
+ elems->mesh_config, rx_status);
+ free:
+ kfree(elems);
}
int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
@@@ -1447,7 -1455,7 +1456,7 @@@ static void mesh_rx_csa_frame(struct ie
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee802_11_elems elems;
+ struct ieee802_11_elems *elems;
u16 pre_value;
bool fwd_csa = true;
size_t baselen;
@@@ -1460,33 -1468,37 +1469,37 @@@
pos = mgmt->u.action.u.chan_switch.variable;
baselen = offsetof(struct ieee80211_mgmt,
u.action.u.chan_switch.variable);
- ieee802_11_parse_elems(pos, len - baselen, true, &elems,
- mgmt->bssid, NULL);
-
- if (!mesh_matches_local(sdata, &elems))
+ elems = ieee802_11_parse_elems(pos, len - baselen, true,
+ mgmt->bssid, NULL);
+ if (!elems)
return;
- ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl;
+ if (!mesh_matches_local(sdata, elems))
+ goto free;
+
+ ifmsh->chsw_ttl = elems->mesh_chansw_params_ie->mesh_ttl;
if (!--ifmsh->chsw_ttl)
fwd_csa = false;
- pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value);
+ pre_value = le16_to_cpu(elems->mesh_chansw_params_ie->mesh_pre_value);
if (ifmsh->pre_value >= pre_value)
- return;
+ goto free;
ifmsh->pre_value = pre_value;
if (!sdata->vif.csa_active &&
- !ieee80211_mesh_process_chnswitch(sdata, &elems, false)) {
+ !ieee80211_mesh_process_chnswitch(sdata, elems, false)) {
mcsa_dbg(sdata, "Failed to process CSA action frame");
- return;
+ goto free;
}
/* forward or re-broadcast the CSA frame */
if (fwd_csa) {
- if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0)
+ if (mesh_fwd_csa_frame(sdata, mgmt, len, elems) < 0)
mcsa_dbg(sdata, "Failed to forward the CSA frame");
}
+ free:
+ kfree(elems);
}
static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
diff --combined net/netfilter/ipvs/ip_vs_ctl.c
index 0ff94c66641f,e62b40bd349e..38ed88b89007
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@@ -48,8 -48,6 +48,8 @@@
#include <net/ip_vs.h>
+MODULE_ALIAS_GENL_FAMILY(IPVS_GENL_NAME);
+
/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
static DEFINE_MUTEX(__ip_vs_mutex);
@@@ -2019,6 -2017,12 +2019,12 @@@ static struct ctl_table vs_vars[] =
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "run_estimation",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
#ifdef CONFIG_IP_VS_DEBUG
{
.procname = "debug_level",
@@@ -4092,6 -4096,8 +4098,8 @@@ static int __net_init ip_vs_control_net
tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
+ ipvs->sysctl_run_estimation = 1;
+ tbl[idx++].data = &ipvs->sysctl_run_estimation;
#ifdef CONFIG_IP_VS_DEBUG
/* Global sysctls must be ro in non-init netns */
if (!net_eq(net, &init_net))
diff --combined net/tls/tls_main.c
index 9ab81db8a654,278192ee133e..acfba9f1ba72
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@@ -421,6 -421,88 +421,88 @@@ static int do_tls_getsockopt_conf(struc
rc = -EFAULT;
break;
}
+ case TLS_CIPHER_AES_CCM_128: {
+ struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_aes_ccm_128, info);
+
+ if (len != sizeof(*aes_ccm_128)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(aes_ccm_128->iv,
+ cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE,
+ TLS_CIPHER_AES_CCM_128_IV_SIZE);
+ memcpy(aes_ccm_128->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_CHACHA20_POLY1305: {
+ struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_chacha20_poly1305,
+ info);
+
+ if (len != sizeof(*chacha20_poly1305)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(chacha20_poly1305->iv,
+ cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE,
+ TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE);
+ memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval, chacha20_poly1305,
+ sizeof(*chacha20_poly1305)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_SM4_GCM: {
+ struct tls12_crypto_info_sm4_gcm *sm4_gcm_info =
+ container_of(crypto_info,
+ struct tls12_crypto_info_sm4_gcm, info);
+
+ if (len != sizeof(*sm4_gcm_info)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(sm4_gcm_info->iv,
+ cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE,
+ TLS_CIPHER_SM4_GCM_IV_SIZE);
+ memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_SM4_CCM: {
+ struct tls12_crypto_info_sm4_ccm *sm4_ccm_info =
+ container_of(crypto_info,
+ struct tls12_crypto_info_sm4_ccm, info);
+
+ if (len != sizeof(*sm4_ccm_info)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(sm4_ccm_info->iv,
+ cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE,
+ TLS_CIPHER_SM4_CCM_IV_SIZE);
+ memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info)))
+ rc = -EFAULT;
+ break;
+ }
default:
rc = -EINVAL;
}
@@@ -524,6 -606,12 +606,12 @@@ static int do_tls_setsockopt_conf(struc
case TLS_CIPHER_CHACHA20_POLY1305:
optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305);
break;
+ case TLS_CIPHER_SM4_GCM:
+ optsize = sizeof(struct tls12_crypto_info_sm4_gcm);
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ optsize = sizeof(struct tls12_crypto_info_sm4_ccm);
+ break;
default:
rc = -EINVAL;
goto err_crypto_info;
@@@ -681,12 -769,12 +769,12 @@@ static void build_protos(struct proto p
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
- prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
+ prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
- prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
+ prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
#ifdef CONFIG_TLS_DEVICE
diff --combined net/tls/tls_sw.c
index d5d09bd817b7,4147bb2e7057..c5ba3fd50e91
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@@ -498,9 -498,15 +498,15 @@@ static int tls_do_encryption(struct soc
int rc, iv_offset = 0;
/* For CCM based ciphers, first byte of IV is a constant */
- if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
+ switch (prot->cipher_type) {
+ case TLS_CIPHER_AES_CCM_128:
rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
iv_offset = 1;
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
+ iv_offset = 1;
+ break;
}
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
@@@ -1457,10 -1463,16 +1463,16 @@@ static int decrypt_internal(struct soc
aad = (u8 *)(sgout + n_sgout);
iv = aad + prot->aad_size;
- /* For CCM based ciphers, first byte of nonce+iv is always '2' */
- if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
- iv[0] = 2;
+ /* For CCM based ciphers, first byte of nonce+iv is a constant */
+ switch (prot->cipher_type) {
+ case TLS_CIPHER_AES_CCM_128:
+ iv[0] = TLS_AES_CCM_IV_B0_BYTE;
+ iv_offset = 1;
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
iv_offset = 1;
+ break;
}
/* Prepare IV */
@@@ -2026,7 -2038,7 +2038,7 @@@ splice_read_end
return copied ? : err;
}
-bool tls_sw_stream_read(const struct sock *sk)
+bool tls_sw_sock_is_readable(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@@ -2424,6 -2436,40 +2436,40 @@@ int tls_set_sw_offload(struct sock *sk
cipher_name = "rfc7539(chacha20,poly1305)";
break;
}
+ case TLS_CIPHER_SM4_GCM: {
+ struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
+
+ sm4_gcm_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
+ tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
+ iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
+ iv = sm4_gcm_info->iv;
+ rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
+ rec_seq = sm4_gcm_info->rec_seq;
+ keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
+ key = sm4_gcm_info->key;
+ salt = sm4_gcm_info->salt;
+ salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
+ cipher_name = "gcm(sm4)";
+ break;
+ }
+ case TLS_CIPHER_SM4_CCM: {
+ struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
+
+ sm4_ccm_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
+ tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
+ iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
+ iv = sm4_ccm_info->iv;
+ rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
+ rec_seq = sm4_ccm_info->rec_seq;
+ keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
+ key = sm4_ccm_info->key;
+ salt = sm4_ccm_info->salt;
+ salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
+ cipher_name = "ccm(sm4)";
+ break;
+ }
default:
rc = -EINVAL;
goto free_priv;
diff --combined net/wireless/core.c
index aaba847d79eb,45be124a98f1..eb297e1015e0
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@@ -524,7 -524,6 +524,7 @@@ use_default_name
INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk);
INIT_WORK(&rdev->mgmt_registrations_update_wk,
cfg80211_mgmt_registrations_update_wk);
+ spin_lock_init(&rdev->mgmt_registrations_lock);
#ifdef CONFIG_CFG80211_DEFAULT_PS
rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@@ -1081,6 -1080,16 +1081,16 @@@ void cfg80211_dev_free(struct cfg80211_
list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
cfg80211_put_bss(&rdev->wiphy, &scan->pub);
mutex_destroy(&rdev->wiphy.mtx);
+
+ /*
+ * The 'regd' can only be non-NULL if we never finished
+ * initializing the wiphy and thus never went through the
+ * unregister path - e.g. in failure scenarios. Thus, it
+ * cannot have been visible to anyone if non-NULL, so we
+ * can just free it here.
+ */
+ kfree(rcu_dereference_raw(rdev->wiphy.regd));
+
kfree(rdev);
}
@@@ -1280,6 -1289,7 +1290,6 @@@ void cfg80211_init_wdev(struct wireless
INIT_LIST_HEAD(&wdev->event_list);
spin_lock_init(&wdev->event_lock);
INIT_LIST_HEAD(&wdev->mgmt_registrations);
- spin_lock_init(&wdev->mgmt_registrations_lock);
INIT_LIST_HEAD(&wdev->pmsr_list);
spin_lock_init(&wdev->pmsr_lock);
INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk);
diff --combined net/wireless/scan.c
index adc0d14cfd86,e4f79b23f7f6..22e92be61938
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@@ -383,7 -383,7 +383,7 @@@ static bool is_bss(struct cfg80211_bss
const u8 *ssid, size_t ssid_len)
{
const struct cfg80211_bss_ies *ies;
- const u8 *ssidie;
+ const struct element *ssid_elem;
if (bssid && !ether_addr_equal(a->bssid, bssid))
return false;
@@@ -394,12 -394,12 +394,12 @@@
ies = rcu_access_pointer(a->ies);
if (!ies)
return false;
- ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
- if (!ssidie)
+ ssid_elem = cfg80211_find_elem(WLAN_EID_SSID, ies->data, ies->len);
+ if (!ssid_elem)
return false;
- if (ssidie[1] != ssid_len)
+ if (ssid_elem->datalen != ssid_len)
return false;
- return memcmp(ssidie + 2, ssid, ssid_len) == 0;
+ return memcmp(ssid_elem->data, ssid, ssid_len) == 0;
}
static int
@@@ -418,17 -418,14 +418,17 @@@ cfg80211_add_nontrans_list(struct cfg80
}
ssid_len = ssid[1];
ssid = ssid + 2;
- rcu_read_unlock();
/* check if nontrans_bss is in the list */
list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) {
- if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len))
+ if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) {
+ rcu_read_unlock();
return 0;
+ }
}
+ rcu_read_unlock();
+
/* add to the list */
list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list);
return 0;
@@@ -1794,25 -1791,13 +1794,13 @@@ cfg80211_bss_update(struct cfg80211_reg
return NULL;
}
- /*
- * Update RX channel information based on the available frame payload
- * information. This is mainly for the 2.4 GHz band where frames can be received
- * from neighboring channels and the Beacon frames use the DSSS Parameter Set
- * element to indicate the current (transmitting) channel, but this might also
- * be needed on other bands if RX frequency does not match with the actual
- * operating channel of a BSS.
- */
- static struct ieee80211_channel *
- cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
- struct ieee80211_channel *channel,
- enum nl80211_bss_scan_width scan_width)
+ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ enum nl80211_band band)
{
const u8 *tmp;
- u32 freq;
int channel_number = -1;
- struct ieee80211_channel *alt_channel;
- if (channel->band == NL80211_BAND_S1GHZ) {
+ if (band == NL80211_BAND_S1GHZ) {
tmp = cfg80211_find_ie(WLAN_EID_S1G_OPERATION, ie, ielen);
if (tmp && tmp[1] >= sizeof(struct ieee80211_s1g_oper_ie)) {
struct ieee80211_s1g_oper_ie *s1gop = (void *)(tmp + 2);
@@@ -1833,6 -1818,29 +1821,29 @@@
}
}
+ return channel_number;
+ }
+ EXPORT_SYMBOL(cfg80211_get_ies_channel_number);
+
+ /*
+ * Update RX channel information based on the available frame payload
+ * information. This is mainly for the 2.4 GHz band where frames can be received
+ * from neighboring channels and the Beacon frames use the DSSS Parameter Set
+ * element to indicate the current (transmitting) channel, but this might also
+ * be needed on other bands if RX frequency does not match with the actual
+ * operating channel of a BSS.
+ */
+ static struct ieee80211_channel *
+ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width)
+ {
+ u32 freq;
+ int channel_number;
+ struct ieee80211_channel *alt_channel;
+
+ channel_number = cfg80211_get_ies_channel_number(ie, ielen, channel->band);
+
if (channel_number < 0) {
/* No channel information in frame payload */
return channel;
@@@ -2075,12 -2083,12 +2086,12 @@@ static void cfg80211_parse_mbssid_data(
if (!non_tx_data)
return;
- if (!cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+ if (!cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
return;
if (!wiphy->support_mbssid)
return;
if (wiphy->support_only_he_mbssid &&
- !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+ !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
return;
new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
@@@ -2447,10 -2455,10 +2458,10 @@@ cfg80211_inform_bss_frame_data(struct w
res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
len, gfp);
if (!res || !wiphy->support_mbssid ||
- !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+ !cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
return res;
if (wiphy->support_only_he_mbssid &&
- !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+ !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
return res;
non_tx_data.tx_bss = res;
diff --combined net/wireless/util.c
index a1a99a574984,2991f711491a..5ff1f8726faf
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@@ -80,6 -80,7 +80,7 @@@ u32 ieee80211_channel_to_freq_khz(int c
return 0; /* not supported */
switch (band) {
case NL80211_BAND_2GHZ:
+ case NL80211_BAND_LC:
if (chan == 14)
return MHZ_TO_KHZ(2484);
else if (chan < 14)
@@@ -209,6 -210,7 +210,7 @@@ static void set_mandatory_flags_band(st
WARN_ON(want);
break;
case NL80211_BAND_2GHZ:
+ case NL80211_BAND_LC:
want = 7;
for (i = 0; i < sband->n_bitrates; i++) {
switch (sband->bitrates[i].bitrate) {
@@@ -1028,14 -1030,14 +1030,14 @@@ int cfg80211_change_iface(struct cfg802
!(rdev->wiphy.interface_modes & (1 << ntype)))
return -EOPNOTSUPP;
- /* if it's part of a bridge, reject changing type to station/ibss */
- if (netif_is_bridge_port(dev) &&
- (ntype == NL80211_IFTYPE_ADHOC ||
- ntype == NL80211_IFTYPE_STATION ||
- ntype == NL80211_IFTYPE_P2P_CLIENT))
- return -EBUSY;
-
if (ntype != otype) {
+ /* if it's part of a bridge, reject changing type to station/ibss */
+ if (netif_is_bridge_port(dev) &&
+ (ntype == NL80211_IFTYPE_ADHOC ||
+ ntype == NL80211_IFTYPE_STATION ||
+ ntype == NL80211_IFTYPE_P2P_CLIENT))
+ return -EBUSY;
+
dev->ieee80211_ptr->use_4addr = false;
dev->ieee80211_ptr->mesh_id_up_len = 0;
wdev_lock(dev->ieee80211_ptr);
--
LinuxNextTracking
8 months
Build check errors found: 2021-10-27
by postmaster@open-mesh.org
Name of failed tests
====================
master
------
* difference between net-next and batadv master
Output of different failed tests
================================
master: difference between net-next and batadv master
-----------------------------------------------------
netnext/net/batman-adv/bridge_loop_avoidance.c | 8 --
netnext/net/batman-adv/main.c | 56 ++++------------
netnext/net/batman-adv/network-coding.c | 4 -
netnext/net/batman-adv/translation-table.c | 4 -
4 files changed, 20 insertions(+), 52 deletions(-)
Statistics
==========
maint
-----
Failed tests: 0
Started build tests: 306
Tested Linux versions: 41
Tested configs: 114
master
------
Failed tests: 1
Started build tests: 274
Tested Linux versions: 41
Tested configs: 114
8 months
[linux-next] LinuxNextTracking branch, master, updated. next-20211021
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit 47ce5f1e3e4e87f141310c975497a4de87f49ab9
Author: Jakub Kicinski <kuba(a)kernel.org>
Date: Tue Oct 19 09:30:07 2021 -0700
batman-adv: prepare for const netdev->dev_addr
netdev->dev_addr will be constant soon, make sure
the qualifier is propagated thru batman-adv.
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Acked-by: Sven Eckelmann <sven(a)narfation.org>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 1669744304c5..7242b32fff80 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -254,7 +254,7 @@ batadv_claim_hash_find(struct batadv_priv *bat_priv,
* Return: backbone gateway if found or NULL otherwise
*/
static struct batadv_bla_backbone_gw *
-batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
+batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
@@ -336,7 +336,7 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
* @vid: the VLAN ID
* @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
*/
-static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
+static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac,
unsigned short vid, int claimtype)
{
struct sk_buff *skb;
@@ -488,7 +488,7 @@ static void batadv_bla_loopdetect_report(struct work_struct *work)
* Return: the (possibly created) backbone gateway or NULL on error
*/
static struct batadv_bla_backbone_gw *
-batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
+batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig,
unsigned short vid, bool own_backbone)
{
struct batadv_bla_backbone_gw *entry;
@@ -926,7 +926,7 @@ static bool batadv_handle_request(struct batadv_priv *bat_priv,
*/
static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- u8 *backbone_addr, u8 *claim_addr,
+ const u8 *backbone_addr, const u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
@@ -964,7 +964,7 @@ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
*/
static bool batadv_handle_claim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- u8 *backbone_addr, u8 *claim_addr,
+ const u8 *backbone_addr, const u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
@@ -2126,7 +2126,7 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
struct batadv_hard_iface *primary_if,
struct batadv_bla_claim *claim)
{
- u8 *primary_addr = primary_if->net_dev->dev_addr;
+ const u8 *primary_addr = primary_if->net_dev->dev_addr;
u16 backbone_crc;
bool is_own;
void *hdr;
@@ -2294,7 +2294,7 @@ batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
struct batadv_hard_iface *primary_if,
struct batadv_bla_backbone_gw *backbone_gw)
{
- u8 *primary_addr = primary_if->net_dev->dev_addr;
+ const u8 *primary_addr = primary_if->net_dev->dev_addr;
u16 backbone_crc;
bool is_own;
int msecs;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 970d0d7ccc98..83f31494ea4d 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -747,7 +747,8 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_orig_node *orig_node = NULL;
struct batadv_hard_iface *primary_if = NULL;
bool ret = false;
- u8 *orig_addr, orig_ttvn;
+ const u8 *orig_addr;
+ u8 orig_ttvn;
if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
primary_if = batadv_primary_if_get_selected(bat_priv);
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index 56b9fe97b3b4..fbcb15c7c29b 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -631,9 +631,9 @@ static void batadv_tp_recv_ack(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node = NULL;
const struct batadv_icmp_tp_packet *icmp;
struct batadv_tp_vars *tp_vars;
+ const unsigned char *dev_addr;
size_t packet_len, mss;
u32 rtt, recv_ack, cwnd;
- unsigned char *dev_addr;
packet_len = BATADV_TP_PLEN;
mss = BATADV_TP_PLEN;
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index 992773376e51..0cb58eb04093 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -587,8 +587,8 @@ void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
* @tvlv_value: tvlv content
* @tvlv_value_len: tvlv content length
*/
-void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
- u8 *dst, u8 type, u8 version,
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, const u8 *src,
+ const u8 *dst, u8 type, u8 version,
void *tvlv_value, u16 tvlv_value_len)
{
struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
diff --git a/net/batman-adv/tvlv.h b/net/batman-adv/tvlv.h
index 54f2a35653d0..4cf8af00fc11 100644
--- a/net/batman-adv/tvlv.h
+++ b/net/batman-adv/tvlv.h
@@ -42,8 +42,8 @@ int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
u8 *src, u8 *dst,
void *tvlv_buff, u16 tvlv_buff_len);
-void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
- u8 *dst, u8 type, u8 version,
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, const u8 *src,
+ const u8 *dst, u8 type, u8 version,
void *tvlv_value, u16 tvlv_value_len);
#endif /* _NET_BATMAN_ADV_TVLV_H_ */
--
LinuxNextTracking
8 months, 1 week
[linux-next] LinuxNextTracking branch, master, updated. next-20211021
by batman@open-mesh.org
The following commit has been merged in the master branch:
commit 0f00e70ef645e43527a1b622721ae084149c21ca
Author: Jakub Kicinski <kuba(a)kernel.org>
Date: Tue Oct 19 09:39:27 2021 -0700
batman-adv: use eth_hw_addr_set() instead of ether_addr_copy()
Commit 406f42fa0d3c ("net-next: When a bond have a massive amount
of VLANs...") introduced a rbtree for faster Ethernet address look
up. To maintain netdev->dev_addr in this tree we need to make all
the writes to it got through appropriate helpers.
Convert batman from ether_addr_copy() to eth_hw_addr_set():
@@
expression dev, np;
@@
- ether_addr_copy(dev->dev_addr, np)
+ eth_hw_addr_set(dev, np)
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Acked-by: Sven Eckelmann <sven(a)narfation.org>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 0604b0279573..7ee09337fc40 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -134,7 +134,7 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
return -EADDRNOTAVAIL;
ether_addr_copy(old_addr, dev->dev_addr);
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
/* only modify transtable if it has been initialized before */
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
--
LinuxNextTracking
8 months, 1 week
Build check errors found: 2021-10-21
by postmaster@open-mesh.org
Name of failed tests
====================
master
------
* sparse linux-4.10 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-4.10 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-4.10 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-4.10 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-4.11 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-4.11 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-4.11 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.11 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=y
* sparse linux-4.11 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-4.12 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-4.12 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.13 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=y
* sparse linux-4.13 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
* sparse linux-4.13 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-4.13 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.13 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-4.14 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-4.14 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
* sparse linux-4.14 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.14 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-4.14 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-4.14 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-4.14 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.14 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-4.14 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.14 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.14 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.14.246 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.14.246 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-4.14.246 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-4.14.246 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-4.14.246 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.14.246 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
* sparse linux-4.14.246 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.14.246 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.14.246 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-4.14.246 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.15 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=y
* sparse linux-4.15 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-4.15 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-4.15 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.15 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.15 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-4.15 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-4.15 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-4.15 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.16 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-4.16 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.16 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=y
* sparse linux-4.16 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-4.16 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-4.17 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-4.17 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-4.17 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-4.17 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-4.17 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-4.17 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-4.17 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.17 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-4.17 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.17 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-4.18 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-4.18 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-4.18 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=y
* sparse linux-4.18 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-4.18 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.18 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-4.18 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.18 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.18 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-4.18 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.19 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
* sparse linux-4.19 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-4.19 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.19 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.19 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=y
* sparse linux-4.19 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=y
* sparse linux-4.19 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-4.19 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.19.206 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=y
* sparse linux-4.19.206 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-4.19.206 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-4.19.206 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-4.19.206 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-4.20 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-4.20 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=y
* sparse linux-4.20 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-4.20 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-4.20 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-4.20 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-4.20 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-4.20 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-4.20 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.20 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.20 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-4.20 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.20 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-4.4 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-4.4 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-4.4 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-4.4 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-4.4 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-4.4 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-4.4.283 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.4.283 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-4.4.283 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.4.283 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-4.4.283 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-4.4.283 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-4.4.283 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=y
* sparse linux-4.4.283 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=n
* sparse linux-4.5 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.5 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-4.5 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.5 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-4.6 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-4.6 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.6 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-4.6 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=y
* sparse linux-4.6 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.6 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.7 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-4.7 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-4.7 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.7 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-4.8 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-4.8 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-4.8 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-4.8 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-4.8 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-4.9 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=y
* sparse linux-4.9 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=y
* sparse linux-4.9 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.9 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-4.9 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-4.9 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
* sparse linux-4.9 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.9.282 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-4.9.282 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-4.9.282 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-4.9.282 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
* sparse linux-4.9.282 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-4.9.282 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-4.9.282 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-4.9.282 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-4.9.282 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-5.0 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=n
* sparse linux-5.0 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-5.0 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=y
* sparse linux-5.0 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-5.0 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-5.0 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
* sparse linux-5.0 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=y
* sparse linux-5.1 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-5.1 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-5.10 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-5.10 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-5.10 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-5.10 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-5.10 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-5.10.64 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-5.10.64 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-5.10.64 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-5.10.64 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-5.10.64 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-5.10.64 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-5.10.64 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-5.10.64 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=y
* sparse linux-5.10.64 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-5.10.64 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-5.10.64 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-5.10.64 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-5.11 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-5.11 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=n
* sparse linux-5.11 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-5.11 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-5.11 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
* sparse linux-5.11 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-5.12 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=y
* sparse linux-5.12 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-5.12 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-5.12 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-5.12 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=n
* sparse linux-5.12 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-5.12 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-5.12 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-5.12 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-5.12 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-5.13 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=y
* sparse linux-5.13 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-5.13 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-5.13 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-5.13 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-5.13 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-5.13.16 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-5.13.16 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-5.13.16 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=y
* sparse linux-5.13.16 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-5.13.16 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-5.13.16 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-5.13.16 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-5.14 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-5.14 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-5.14 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-5.14.3 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-5.14.3 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* sparse linux-5.14.3 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-5.14.3 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-5.2 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-5.2 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-5.2 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-5.2 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-5.2 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-5.2 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
* sparse linux-5.3 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-5.3 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-5.3 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
* sparse linux-5.3 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-5.4 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-5.4 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=y
* sparse linux-5.4 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-5.4 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-5.4.145 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=y
* sparse linux-5.4.145 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* sparse linux-5.4.145 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-5.4.145 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-5.4.145 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=y
* sparse linux-5.4.145 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-5.4.145 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-5.4.145 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-5.4.145 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-5.5 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-5.5 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* sparse linux-5.5 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-5.5 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-5.5 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-5.5 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=y
* sparse linux-5.5 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* sparse linux-5.5 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
* sparse linux-5.6 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-5.6 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* sparse linux-5.6 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-5.6 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
* sparse linux-5.6 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* sparse linux-5.6 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=y
* sparse linux-5.7 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-5.7 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* sparse linux-5.7 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* sparse linux-5.8 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-5.8 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-5.8 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-5.9 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-5.9 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* sparse linux-5.9 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=y
* sparse linux-5.9 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* sparse linux-5.9 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* sparse linux-5.9 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* sparse linux-5.9 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.10 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.10 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.10 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.10 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-4.11 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.11 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.11 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.11 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.11 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.12 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.12 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.13 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.13 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-4.13 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.13 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.13 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.14 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.14 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-4.14 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.14 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.14 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.14 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.14 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.14 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.14 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.14 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.14 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.14.246 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.14.246 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-4.14.246 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.14.246 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.14.246 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.14.246 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-4.14.246 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.14.246 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.14.246 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.14.246 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.15 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.15 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.15 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.15 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.15 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.15 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.15 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.15 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-4.15 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.16 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.16 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.16 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.16 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.16 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.17 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.17 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.17 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.17 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.17 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.17 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.17 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.17 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.17 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.17 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.18 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.18 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.18 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.18 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.18 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.18 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-4.18 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.18 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.18 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.18 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.19 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.19 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.19 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.19 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.19 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.19 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.19 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.19 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.19.206 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.19.206 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.19.206 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.19.206 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.19.206 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.20 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.20 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.20 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.20 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.20 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.20 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.20 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.20 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.20 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.20 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.20 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.20 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.20 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.4 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.4 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.4 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.4 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.4 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-4.4 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.4.283 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.4.283 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.4.283 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.4.283 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.4.283 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.4.283 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.4.283 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.4.283 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-4.5 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.5 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.5 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.5 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.6 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.6 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.6 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.6 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.6 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.6 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.7 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.7 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.7 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.7 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.8 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.8 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.8 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-4.8 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.8 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-4.9 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.9 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.9 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.9 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.9 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-4.9 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.9 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.9.282 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-4.9.282 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.9.282 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-4.9.282 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.9.282 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.9.282 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-4.9.282 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-4.9.282 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-4.9.282 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.0 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.0 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.0 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.0 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-5.0 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.0 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.0 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.1 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-5.1 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.10 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.10 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.10 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.10 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.10 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.10.64 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.10.64 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.10.64 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.10.64 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-5.10.64 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.10.64 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.10.64 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.10.64 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-5.10.64 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.10.64 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.10.64 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.10.64 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.11 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.11 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.11 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-5.11 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-5.11 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.11 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.12 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.12 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.12 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.12 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.12 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.12 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.12 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.12 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.12 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-5.12 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.13 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.13 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.13 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.13 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.13 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.13 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.13.16 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.13.16 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.13.16 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.13.16 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.13.16 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.13.16 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-5.13.16 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.14 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.14 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.14 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.14.3 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.14.3 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.14.3 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-5.14.3 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.2 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-5.2 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-5.2 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.2 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.2 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.2 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.3 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-5.3 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-5.3 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.3 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.4 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.4 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.4 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.4 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-5.4.145 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.4.145 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-5.4.145 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.4.145 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.4.145 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.4.145 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.4.145 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.4.145 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.4.145 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.5 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.5 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
* unused_symbols linux-5.5 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-5.5 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-5.5 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.5 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-5.5 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.5 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.6 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.6 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.6 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.6 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
* unused_symbols linux-5.6 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-5.6 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-5.7 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.7 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.7 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=n
* unused_symbols linux-5.8 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.8 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.8 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.9 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.9 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
* unused_symbols linux-5.9 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=y
* unused_symbols linux-5.9 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
* unused_symbols linux-5.9 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
* unused_symbols linux-5.9 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
* unused_symbols linux-5.9 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
Output of different failed tests
================================
master: sparse linux-4.10 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_inherit’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_inherit
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:295: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:553: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1490: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.11 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:295: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:553: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1492: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.13 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:303: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:561: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1512: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.14 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=y
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:315: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:573: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1503: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.14.246 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:330: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:588: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1552: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.15 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:317: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:575: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1508: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.16 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=y
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:323: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:581: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1556: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.17 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:313: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:559: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1571: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.18 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:318: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:558: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1500: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.19 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:306: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:546: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1517: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.19.206 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:304: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:544: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1546: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.20 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:292: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:516: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1563: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.4 cfg: BLA=n DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_inherit’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_inherit
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:259: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:403: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1386: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.4.283 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=y
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_inherit’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_inherit
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:281: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:487: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1453: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.5 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_inherit’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_inherit
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:259: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:407: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1393: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.6 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_inherit’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_inherit
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:292: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:440: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1430: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.7 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_inherit’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_inherit
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:290: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:440: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1459: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.8 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_inherit’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_inherit
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:290: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:440: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1473: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.9 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_inherit’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_inherit
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:294: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:544: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1490: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-4.9.282 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_inherit’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_inherit
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:308: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:558: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1544: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.0 cfg: BLA=n DAT=n DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:277: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:492: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1553: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.1 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:276: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:486: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1571: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.10 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_crc’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_crc
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:279: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:496: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1805: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.10.64 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_crc’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_crc
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:280: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:497: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1822: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.11 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_crc’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_crc
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:279: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:496: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1800: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.12 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_crc’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_crc
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:271: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:514: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1851: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.13 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_crc’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_crc
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:272: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:515: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1847: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.13.16 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_crc’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_crc
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:273: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:516: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1862: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.2 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:279: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:489: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1595: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.3 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:281: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:497: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1624: _module_/home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.4 cfg: BLA=n DAT=y DEBUG=n TRACING=y NC=n MCAST=n BATMAN_V=y
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:266: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:509: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1652: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.4.145 cfg: BLA=y DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:262: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:497: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1734: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.5 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:266: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:503: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1693: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.6 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:268: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:505: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1683: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.7 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_addr_dec’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_addr_dec
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:267: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:488: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1729: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.8 cfg: BLA=y DAT=y DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=y
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:9: error: undefined identifier 'eth_hw_addr_set'
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_crc’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_crc
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:281: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:497: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1756: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: sparse linux-5.9 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c: In function ‘batadv_interface_set_mac_addr’:
/home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.c:137:2: error: implicit declaration of function ‘eth_hw_addr_set’; did you mean ‘eth_hw_addr_crc’? [-Werror=implicit-function-declaration]
eth_hw_addr_set(dev, addr->sa_data);
^~~~~~~~~~~~~~~
eth_hw_addr_crc
cc1: some warnings being treated as errors
make[3]: *** [scripts/Makefile.build:283: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv/soft-interface.o] Error 1
make[2]: *** [scripts/Makefile.build:500: /home/build_test/build_env/tmp.xNORWGhz9K/net/batman-adv] Error 2
make[1]: *** [Makefile:1784: /home/build_test/build_env/tmp.xNORWGhz9K] Error 2
make: *** [Makefile:68: all] Error 2
master: unused_symbols linux-4.10 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_bla_rx
batadv_bla_tx
batadv_dat_snoop_outgoing_arp_reply
batadv_dat_snoop_outgoing_arp_request
batadv_dat_snoop_outgoing_dhcp_ack
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mcast_forw_mode
batadv_mcast_forw_send
batadv_mcast_forw_send_orig
batadv_mesh_free
batadv_mesh_init
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_vlan_ap_isola_get
master: unused_symbols linux-4.13 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=y MCAST=y BATMAN_V=y
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_bla_rx
batadv_bla_tx
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mcast_forw_mode
batadv_mcast_forw_send
batadv_mcast_forw_send_orig
batadv_mesh_free
batadv_mesh_init
batadv_nc_init_bat_priv
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_send_skb_via_tt_generic
batadv_vlan_ap_isola_get
master: unused_symbols linux-4.17 cfg: BLA=y DAT=n DEBUG=n TRACING=n NC=y MCAST=n BATMAN_V=y
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_bla_is_backbone_gw_orig
batadv_bla_rx
batadv_bla_tx
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mesh_free
batadv_mesh_init
batadv_nc_init_bat_priv
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_send_skb_via_tt_generic
batadv_vlan_ap_isola_get
master: unused_symbols linux-4.19 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mesh_free
batadv_mesh_init
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_send_skb_via_tt_generic
batadv_vlan_ap_isola_get
master: unused_symbols linux-4.19 cfg: BLA=n DAT=y DEBUG=n TRACING=n NC=n MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_dat_snoop_outgoing_arp_reply
batadv_dat_snoop_outgoing_arp_request
batadv_dat_snoop_outgoing_dhcp_ack
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mesh_free
batadv_mesh_init
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_vlan_ap_isola_get
master: unused_symbols linux-4.20 cfg: BLA=n DAT=y DEBUG=y TRACING=y NC=y MCAST=n BATMAN_V=y
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_dat_snoop_outgoing_arp_reply
batadv_dat_snoop_outgoing_arp_request
batadv_dat_snoop_outgoing_dhcp_ack
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mesh_free
batadv_mesh_init
batadv_nc_init_bat_priv
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_vlan_ap_isola_get
master: unused_symbols linux-4.4.283 cfg: BLA=y DAT=y DEBUG=n TRACING=n NC=y MCAST=y BATMAN_V=y
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_bla_rx
batadv_bla_tx
batadv_dat_snoop_outgoing_arp_reply
batadv_dat_snoop_outgoing_arp_request
batadv_dat_snoop_outgoing_dhcp_ack
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mcast_forw_mode
batadv_mcast_forw_send
batadv_mcast_forw_send_orig
batadv_mesh_free
batadv_mesh_init
batadv_nc_init_bat_priv
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_vlan_ap_isola_get
master: unused_symbols linux-4.5 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=n MCAST=y BATMAN_V=y
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mcast_forw_mode
batadv_mcast_forw_send
batadv_mcast_forw_send_orig
batadv_mesh_free
batadv_mesh_init
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_send_skb_via_tt_generic
batadv_vlan_ap_isola_get
master: unused_symbols linux-4.5 cfg: BLA=n DAT=n DEBUG=y TRACING=y NC=y MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mcast_forw_mode
batadv_mcast_forw_send
batadv_mcast_forw_send_orig
batadv_mesh_free
batadv_mesh_init
batadv_nc_init_bat_priv
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_send_skb_via_tt_generic
batadv_vlan_ap_isola_get
master: unused_symbols linux-4.6 cfg: BLA=y DAT=y DEBUG=y TRACING=y NC=n MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_bla_is_backbone_gw_orig
batadv_bla_rx
batadv_bla_tx
batadv_dat_snoop_outgoing_arp_reply
batadv_dat_snoop_outgoing_arp_request
batadv_dat_snoop_outgoing_dhcp_ack
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mesh_free
batadv_mesh_init
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_vlan_ap_isola_get
master: unused_symbols linux-4.9.282 cfg: BLA=y DAT=n DEBUG=y TRACING=n NC=n MCAST=n BATMAN_V=y
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_bla_is_backbone_gw_orig
batadv_bla_rx
batadv_bla_tx
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mesh_free
batadv_mesh_init
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_send_skb_via_tt_generic
batadv_vlan_ap_isola_get
master: unused_symbols linux-5.1 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_dat_snoop_outgoing_arp_reply
batadv_dat_snoop_outgoing_arp_request
batadv_dat_snoop_outgoing_dhcp_ack
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mcast_forw_mode
batadv_mcast_forw_send
batadv_mcast_forw_send_orig
batadv_mesh_free
batadv_mesh_init
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_vlan_ap_isola_get
master: unused_symbols linux-5.10.64 cfg: BLA=y DAT=y DEBUG=n TRACING=y NC=y MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_bla_is_backbone_gw_orig
batadv_bla_rx
batadv_bla_tx
batadv_dat_snoop_outgoing_arp_reply
batadv_dat_snoop_outgoing_arp_request
batadv_dat_snoop_outgoing_dhcp_ack
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mesh_free
batadv_mesh_init
batadv_nc_init_bat_priv
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_vlan_ap_isola_get
master: unused_symbols linux-5.11 cfg: BLA=y DAT=n DEBUG=n TRACING=y NC=n MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_bla_rx
batadv_bla_tx
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mcast_forw_mode
batadv_mcast_forw_send
batadv_mcast_forw_send_orig
batadv_mesh_free
batadv_mesh_init
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_send_skb_via_tt_generic
batadv_vlan_ap_isola_get
master: unused_symbols linux-5.12 cfg: BLA=n DAT=n DEBUG=y TRACING=n NC=y MCAST=n BATMAN_V=n
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mesh_free
batadv_mesh_init
batadv_nc_init_bat_priv
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_send_skb_via_tt_generic
batadv_vlan_ap_isola_get
master: unused_symbols linux-5.6 cfg: BLA=n DAT=y DEBUG=y TRACING=n NC=y MCAST=y BATMAN_V=n
---------------------------------------------------------------------------
batadv_algo_get
batadv_algo_select
batadv_dat_snoop_outgoing_arp_reply
batadv_dat_snoop_outgoing_arp_request
batadv_dat_snoop_outgoing_dhcp_ack
batadv_gw_dhcp_recipient_get
batadv_gw_out_of_range
batadv_hardif_disable_interface
batadv_hardif_enable_interface
batadv_hardif_min_mtu
batadv_mcast_forw_mode
batadv_mcast_forw_send
batadv_mcast_forw_send_orig
batadv_mesh_free
batadv_mesh_init
batadv_nc_init_bat_priv
batadv_netlink_tpmeter_notify
batadv_orig_node_vlan_get
batadv_orig_node_vlan_new
batadv_orig_node_vlan_release
batadv_routing_algo
batadv_send_bcast_packet
batadv_send_skb_via_gw
batadv_vlan_ap_isola_get
Statistics
==========
master
------
Failed tests: 516
Started build tests: 271
Tested Linux versions: 41
Tested configs: 113
maint
-----
Failed tests: 0
Started build tests: 294
Tested Linux versions: 41
Tested configs: 118
8 months, 1 week