The following commit has been merged in the master branch: commit 7df621a3eea6761bc83e641aaca6963210c7290d Merge: f2edaa4ad5d51371709196f2c258fbe875962dee 411a44c24a561e449b592ff631b7ae321f1eb559 Author: Jakub Kicinski kuba@kernel.org Date: Thu Oct 28 10:43:58 2021 -0700
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
include/net/sock.h 7b50ecfcc6cd ("net: Rename ->stream_memory_read to ->sock_is_readable") 4c1e34c0dbff ("vsock: Enable y2038 safe timeval for timeout")
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c 0daa55d033b0 ("octeontx2-af: cn10k: debugfs for dumping LMTST map table") e77bcdd1f639 ("octeontx2-af: Display all enabled PF VF rsrc_alloc entries.")
Adjacent code addition in both cases, keep both.
Signed-off-by: Jakub Kicinski kuba@kernel.org
diff --combined MAINTAINERS index 975086c5345d,c34486524d40..3b85f039fbf9 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -2899,12 -2899,6 +2899,12 @@@ S: Maintaine F: Documentation/hwmon/asc7621.rst F: drivers/hwmon/asc7621.c
+ASIX AX88796C SPI ETHERNET ADAPTER +M: ��ukasz Stelmach l.stelmach@samsung.com +S: Maintained +F: Documentation/devicetree/bindings/net/asix,ax88796c.yaml +F: drivers/net/ethernet/asix/ax88796c_* + ASPEED PINCTRL DRIVERS M: Andrew Jeffery andrew@aj.id.au L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers) @@@ -5464,6 -5458,19 +5464,19 @@@ F: include/net/devlink. F: include/uapi/linux/devlink.h F: net/core/devlink.c
+ DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT + M: Christoph Niedermaier cniedermaier@dh-electronics.com + L: kernel@dh-electronics.com + S: Maintained + F: arch/arm/boot/dts/imx6*-dhcom-* + + DH ELECTRONICS STM32MP1 DHCOM/DHCOR BOARD SUPPORT + M: Marek Vasut marex@denx.de + L: kernel@dh-electronics.com + S: Maintained + F: arch/arm/boot/dts/stm32mp1*-dhcom-* + F: arch/arm/boot/dts/stm32mp1*-dhcor-* + DIALOG SEMICONDUCTOR DRIVERS M: Support Opensource support.opensource@diasemi.com S: Supported @@@ -7018,6 -7025,7 +7031,6 @@@ F: drivers/net/mdio/fwnode_mdio. F: drivers/net/mdio/of_mdio.c F: drivers/net/pcs/ F: drivers/net/phy/ -F: drivers/of/of_net.c F: include/dt-bindings/net/qca-ar803x.h F: include/linux/*mdio*.h F: include/linux/mdio/*.h @@@ -7029,7 -7037,6 +7042,7 @@@ F: include/linux/platform_data/mdio-gpi F: include/trace/events/mdio.h F: include/uapi/linux/mdio.h F: include/uapi/linux/mii.h +F: net/core/of_net.c
EXFAT FILE SYSTEM M: Namjae Jeon linkinjeon@kernel.org @@@ -11284,7 -11291,6 +11297,6 @@@ F: Documentation/networking/device_driv F: drivers/net/ethernet/marvell/octeontx2/af/
MARVELL PRESTERA ETHERNET SWITCH DRIVER - M: Vadym Kochan vkochan@marvell.com M: Taras Chornyi tchornyi@marvell.com S: Supported W: https://github.com/Marvell-switching/switchdev-prestera @@@ -11830,9 -11836,7 +11842,9 @@@ F: drivers/mmc/host/mtk-sd. MEDIATEK MT76 WIRELESS LAN DRIVER M: Felix Fietkau nbd@nbd.name M: Lorenzo Bianconi lorenzo.bianconi83@gmail.com -R: Ryder Lee ryder.lee@mediatek.com +M: Ryder Lee ryder.lee@mediatek.com +R: Shayne Chen shayne.chen@mediatek.com +R: Sean Wang sean.wang@mediatek.com L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/mediatek/mt76/ @@@ -13056,7 -13060,6 +13068,7 @@@ F: include/linux/dsa F: include/linux/platform_data/dsa.h F: include/net/dsa.h F: net/dsa/ +F: tools/testing/selftests/drivers/net/dsa/
NETWORKING [GENERAL] M: "David S. Miller" davem@davemloft.net @@@ -15472,7 -15475,6 +15484,7 @@@ M: ath9k-devel@qca.qualcomm.co L: linux-wireless@vger.kernel.org S: Supported W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k +F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml F: drivers/net/wireless/ath/ath9k/
QUALCOMM CAMERA SUBSYSTEM DRIVER @@@ -15894,12 -15896,6 +15906,12 @@@ L: linux-wireless@vger.kernel.or S: Maintained F: drivers/net/wireless/realtek/rtw88/
+REALTEK WIRELESS DRIVER (rtw89) +M: Ping-Ke Shih pkshih@realtek.com +L: linux-wireless@vger.kernel.org +S: Maintained +F: drivers/net/wireless/realtek/rtw89/ + REDPINE WIRELESS DRIVER M: Amitkumar Karwar amitkarwar@gmail.com M: Siva Rebbagondla siva8118@gmail.com @@@ -20352,6 -20348,7 +20364,7 @@@ X86 ARCHITECTURE (32-BIT AND 64-BIT M: Thomas Gleixner tglx@linutronix.de M: Ingo Molnar mingo@redhat.com M: Borislav Petkov bp@alien8.de + M: Dave Hansen dave.hansen@linux.intel.com M: x86@kernel.org R: "H. Peter Anvin" hpa@zytor.com L: linux-kernel@vger.kernel.org diff --combined drivers/infiniband/hw/mlx5/mr.c index 14c5564428ab,22e2f4d79743..d2044df30394 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@@ -88,8 -88,9 +88,8 @@@ static void set_mkc_access_pd_addr_fiel MLX5_SET64(mkc, mkc, start_addr, start_addr); }
-static void -assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey, - u32 *in) +static void assign_mkey_variant(struct mlx5_ib_dev *dev, + struct mlx5_ib_mkey *mkey, u32 *in) { u8 key = atomic_inc_return(&dev->mkey_var); void *mkc; @@@ -99,22 -100,17 +99,22 @@@ mkey->key = key; }
-static int -mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey, - u32 *in, int inlen) +static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, + struct mlx5_ib_mkey *mkey, u32 *in, int inlen) { + int ret; + assign_mkey_variant(dev, mkey, in); - return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen); + ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen); + if (!ret) + init_waitqueue_head(&mkey->wait); + + return ret; }
static int mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev, - struct mlx5_core_mkey *mkey, + struct mlx5_ib_mkey *mkey, struct mlx5_async_ctx *async_ctx, u32 *in, int inlen, u32 *out, int outlen, struct mlx5_async_work *context) @@@ -137,7 -133,7 +137,7 @@@ static int destroy_mkey(struct mlx5_ib_ { WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
- return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); + return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); }
static void create_mkey_callback(int status, struct mlx5_async_work *context) @@@ -264,11 -260,10 +264,11 @@@ static struct mlx5_ib_mr *create_cache_ goto free_in; }
- err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen); + err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen); if (err) goto free_mr;
+ init_waitqueue_head(&mr->mmkey.wait); mr->mmkey.type = MLX5_MKEY_MR; WRITE_ONCE(ent->dev->cache.last_add, jiffies); spin_lock_irq(&ent->lock); @@@ -295,7 -290,7 +295,7 @@@ static void remove_cache_mr_locked(stru ent->available_mrs--; ent->total_mrs--; spin_unlock_irq(&ent->lock); - mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); + mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key); kfree(mr); spin_lock_irq(&ent->lock); } @@@ -663,7 -658,7 +663,7 @@@ static void clean_keys(struct mlx5_ib_d ent->available_mrs--; ent->total_mrs--; spin_unlock_irq(&ent->lock); - mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); + mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); }
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { @@@ -916,13 -911,12 +916,13 @@@ static struct mlx5_cache_ent *mr_cache_ }
static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, - u64 length, int access_flags) + u64 length, int access_flags, u64 iova) { mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; mr->ibmr.length = length; mr->ibmr.device = &dev->ib_dev; + mr->ibmr.iova = iova; mr->access_flags = access_flags; }
@@@ -980,8 -974,11 +980,8 @@@ static struct mlx5_ib_mr *alloc_cacheab
mr->ibmr.pd = pd; mr->umem = umem; - mr->mmkey.iova = iova; - mr->mmkey.size = umem->length; - mr->mmkey.pd = to_mpd(pd)->pdn; mr->page_shift = order_base_2(page_size); - set_mr_fields(dev, mr, umem->length, access_flags); + set_mr_fields(dev, mr, umem->length, access_flags, iova);
return mr; } @@@ -1090,8 -1087,8 +1090,8 @@@ static void *mlx5_ib_create_xlt_wr(stru wr->wr.opcode = MLX5_IB_WR_UMR; wr->pd = mr->ibmr.pd; wr->mkey = mr->mmkey.key; - wr->length = mr->mmkey.size; - wr->virt_addr = mr->mmkey.iova; + wr->length = mr->ibmr.length; + wr->virt_addr = mr->ibmr.iova; wr->access_flags = mr->access_flags; wr->page_shift = mr->page_shift; wr->xlt_size = sg->length; @@@ -1342,9 -1339,8 +1342,8 @@@ static struct mlx5_ib_mr *reg_create(st goto err_2; } mr->mmkey.type = MLX5_MKEY_MR; - mr->desc_size = sizeof(struct mlx5_mtt); mr->umem = umem; - set_mr_fields(dev, mr, umem->length, access_flags); + set_mr_fields(dev, mr, umem->length, access_flags, iova); kvfree(in);
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); @@@ -1391,7 -1387,7 +1390,7 @@@ static struct ib_mr *mlx5_ib_get_dm_mr(
kfree(in);
- set_mr_fields(dev, mr, length, acc); + set_mr_fields(dev, mr, length, acc, start_addr);
return &mr->ibmr;
@@@ -1536,6 -1532,7 +1535,7 @@@ static struct ib_mr *create_user_odp_mr ib_umem_release(&odp->umem); return ERR_CAST(mr); } + xa_init(&mr->implicit_children);
odp->private = mr; err = mlx5r_store_odp_mkey(dev, &mr->mmkey); @@@ -1712,6 -1709,7 +1712,6 @@@ static int umr_rereg_pd_access(struct m return err;
mr->access_flags = access_flags; - mr->mmkey.pd = to_mpd(pd)->pdn; return 0; }
@@@ -1756,6 -1754,7 +1756,6 @@@ static int umr_rereg_pas(struct mlx5_ib
if (flags & IB_MR_REREG_PD) { mr->ibmr.pd = pd; - mr->mmkey.pd = to_mpd(pd)->pdn; upd_flags |= MLX5_IB_UPD_XLT_PD; } if (flags & IB_MR_REREG_ACCESS) { @@@ -1764,8 -1763,8 +1764,8 @@@ }
mr->ibmr.length = new_umem->length; - mr->mmkey.iova = iova; - mr->mmkey.size = new_umem->length; + mr->ibmr.iova = iova; + mr->ibmr.length = new_umem->length; mr->page_shift = order_base_2(page_size); mr->umem = new_umem; err = mlx5_ib_update_mr_pas(mr, upd_flags); @@@ -1835,7 -1834,7 +1835,7 @@@ struct ib_mr *mlx5_ib_rereg_user_mr(str mr->umem = NULL; atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
- return create_real_mr(new_pd, umem, mr->mmkey.iova, + return create_real_mr(new_pd, umem, mr->ibmr.iova, new_access_flags); }
@@@ -2264,9 -2263,9 +2264,9 @@@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw struct mlx5_ib_dev *dev = to_mdev(ibmw->device); int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); struct mlx5_ib_mw *mw = to_mmw(ibmw); + unsigned int ndescs; u32 *in = NULL; void *mkc; - int ndescs; int err; struct mlx5_ib_alloc_mw req = {}; struct { @@@ -2311,7 -2310,7 +2311,7 @@@
mw->mmkey.type = MLX5_MKEY_MW; ibmw->rkey = mw->mmkey.key; - mw->ndescs = ndescs; + mw->mmkey.ndescs = ndescs;
resp.response_length = min(offsetofend(typeof(resp), response_length), udata->outlen); @@@ -2331,7 -2330,7 +2331,7 @@@ return 0;
free_mkey: - mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); + mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key); free: kfree(in); return err; @@@ -2350,7 -2349,7 +2350,7 @@@ int mlx5_ib_dealloc_mw(struct ib_mw *mw */ mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
- return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey); + return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key); }
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, @@@ -2407,7 -2406,7 +2407,7 @@@ mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *i mr->meta_length = 0; if (data_sg_nents == 1) { n++; - mr->ndescs = 1; + mr->mmkey.ndescs = 1; if (data_sg_offset) sg_offset = *data_sg_offset; mr->data_length = sg_dma_len(data_sg) - sg_offset; @@@ -2460,7 -2459,7 +2460,7 @@@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *m if (sg_offset_p) *sg_offset_p = sg_offset;
- mr->ndescs = i; + mr->mmkey.ndescs = i; mr->data_length = mr->ibmr.length;
if (meta_sg_nents) { @@@ -2493,11 -2492,11 +2493,11 @@@ static int mlx5_set_page(struct ib_mr * struct mlx5_ib_mr *mr = to_mmr(ibmr); __be64 *descs;
- if (unlikely(mr->ndescs == mr->max_descs)) + if (unlikely(mr->mmkey.ndescs == mr->max_descs)) return -ENOMEM;
descs = mr->descs; - descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); + descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
return 0; } @@@ -2507,11 -2506,11 +2507,11 @@@ static int mlx5_set_page_pi(struct ib_m struct mlx5_ib_mr *mr = to_mmr(ibmr); __be64 *descs;
- if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs)) + if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) return -ENOMEM;
descs = mr->descs; - descs[mr->ndescs + mr->meta_ndescs++] = + descs[mr->mmkey.ndescs + mr->meta_ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
return 0; @@@ -2527,7 -2526,7 +2527,7 @@@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr * struct mlx5_ib_mr *pi_mr = mr->mtt_mr; int n;
- pi_mr->ndescs = 0; + pi_mr->mmkey.ndescs = 0; pi_mr->meta_ndescs = 0; pi_mr->meta_length = 0;
@@@ -2561,7 -2560,7 +2561,7 @@@ * metadata offset at the first metadata page */ pi_mr->pi_iova = (iova & page_mask) + - pi_mr->ndescs * ibmr->page_size + + pi_mr->mmkey.ndescs * ibmr->page_size + (pi_mr->ibmr.iova & ~page_mask); /* * In order to use one MTT MR for data and metadata, we register @@@ -2592,7 -2591,7 +2592,7 @@@ mlx5_ib_map_klm_mr_sg_pi(struct ib_mr * struct mlx5_ib_mr *pi_mr = mr->klm_mr; int n;
- pi_mr->ndescs = 0; + pi_mr->mmkey.ndescs = 0; pi_mr->meta_ndescs = 0; pi_mr->meta_length = 0;
@@@ -2627,7 -2626,7 +2627,7 @@@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *
WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
- mr->ndescs = 0; + mr->mmkey.ndescs = 0; mr->data_length = 0; mr->data_iova = 0; mr->meta_ndescs = 0; @@@ -2683,7 -2682,7 +2683,7 @@@ int mlx5_ib_map_mr_sg(struct ib_mr *ibm struct mlx5_ib_mr *mr = to_mmr(ibmr); int n;
- mr->ndescs = 0; + mr->mmkey.ndescs = 0;
ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, mr->desc_size * mr->max_descs, diff --combined drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 1a1bebd453d3,e54f96251fea..67364ab63a1f --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@@ -137,7 -137,7 +137,7 @@@ static struct hns3_dbg_cmd_info hns3_db .name = "uc", .cmd = HNAE3_DBG_CMD_MAC_UC, .dentry = HNS3_DBG_DENTRY_MAC, - .buf_len = HNS3_DBG_READ_LEN, + .buf_len = HNS3_DBG_READ_LEN_128KB, .init = hns3_dbg_common_file_init, }, { @@@ -256,7 -256,7 +256,7 @@@ .name = "tqp", .cmd = HNAE3_DBG_CMD_REG_TQP, .dentry = HNS3_DBG_DENTRY_REG, - .buf_len = HNS3_DBG_READ_LEN, + .buf_len = HNS3_DBG_READ_LEN_128KB, .init = hns3_dbg_common_file_init, }, { @@@ -298,7 -298,7 +298,7 @@@ .name = "fd_tcam", .cmd = HNAE3_DBG_CMD_FD_TCAM, .dentry = HNS3_DBG_DENTRY_FD, - .buf_len = HNS3_DBG_READ_LEN, + .buf_len = HNS3_DBG_READ_LEN_1MB, .init = hns3_dbg_common_file_init, }, { @@@ -336,20 -336,6 +336,20 @@@ .buf_len = HNS3_DBG_READ_LEN, .init = hns3_dbg_common_file_init, }, + { + .name = "page_pool_info", + .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "coalesce_info", + .cmd = HNAE3_DBG_CMD_COAL_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, };
static struct hns3_dbg_cap_info hns3_dbg_cap[] = { @@@ -398,26 -384,6 +398,26 @@@ } };
+static const struct hns3_dbg_item coal_info_items[] = { + { "VEC_ID", 2 }, + { "ALGO_STATE", 2 }, + { "PROFILE_ID", 2 }, + { "CQE_MODE", 2 }, + { "TUNE_STATE", 2 }, + { "STEPS_LEFT", 2 }, + { "STEPS_RIGHT", 2 }, + { "TIRED", 2 }, + { "SW_GL", 2 }, + { "SW_QL", 2 }, + { "HW_GL", 2 }, + { "HW_QL", 2 }, +}; + +static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" }; +static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" }; +static const char * const +dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" }; + static void hns3_dbg_fill_content(char *content, u16 len, const struct hns3_dbg_item *items, const char **result, u16 size) @@@ -439,94 -405,6 +439,94 @@@ *pos++ = '\0'; }
+static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector, + char **result, int i, bool is_tx) +{ + unsigned int gl_offset, ql_offset; + struct hns3_enet_coalesce *coal; + unsigned int reg_val; + unsigned int j = 0; + struct dim *dim; + bool ql_enable; + + if (is_tx) { + coal = &tqp_vector->tx_group.coal; + dim = &tqp_vector->tx_group.dim; + gl_offset = HNS3_VECTOR_GL1_OFFSET; + ql_offset = HNS3_VECTOR_TX_QL_OFFSET; + ql_enable = tqp_vector->tx_group.coal.ql_enable; + } else { + coal = &tqp_vector->rx_group.coal; + dim = &tqp_vector->rx_group.dim; + gl_offset = HNS3_VECTOR_GL0_OFFSET; + ql_offset = HNS3_VECTOR_RX_QL_OFFSET; + ql_enable = tqp_vector->rx_group.coal.ql_enable; + } + + sprintf(result[j++], "%d", i); + sprintf(result[j++], "%s", dim_state_str[dim->state]); + sprintf(result[j++], "%u", dim->profile_ix); + sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]); + sprintf(result[j++], "%s", + dim_tune_stat_str[dim->tune_state]); + sprintf(result[j++], "%u", dim->steps_left); + sprintf(result[j++], "%u", dim->steps_right); + sprintf(result[j++], "%u", dim->tired); + sprintf(result[j++], "%u", coal->int_gl); + sprintf(result[j++], "%u", coal->int_ql); + reg_val = readl(tqp_vector->mask_addr + gl_offset) & + HNS3_VECTOR_GL_MASK; + sprintf(result[j++], "%u", reg_val); + if (ql_enable) { + reg_val = readl(tqp_vector->mask_addr + ql_offset) & + HNS3_VECTOR_QL_MASK; + sprintf(result[j++], "%u", reg_val); + } else { + sprintf(result[j++], "NA"); + } +} + +static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len, + int *pos, bool is_tx) +{ + char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(coal_info_items)]; + struct hns3_enet_tqp_vector *tqp_vector; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(coal_info_items); i++) + result[i] = &data_str[i][0]; + + *pos += scnprintf(buf + *pos, len - *pos, + "%s interrupt coalesce info:\n", + is_tx ? "tx" : "rx"); + hns3_dbg_fill_content(content, sizeof(content), coal_info_items, + NULL, ARRAY_SIZE(coal_info_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + hns3_get_coal_info(tqp_vector, result, i, is_tx); + hns3_dbg_fill_content(content, sizeof(content), coal_info_items, + (const char **)result, + ARRAY_SIZE(coal_info_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + } +} + +static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len) +{ + int pos = 0; + + hns3_dump_coal_info(h, buf, len, &pos, true); + pos += scnprintf(buf + pos, len - pos, "\n"); + hns3_dump_coal_info(h, buf, len, &pos, false); + + return 0; +} + static const struct hns3_dbg_item tx_spare_info_items[] = { { "QUEUE_ID", 2 }, { "COPYBREAK", 2 }, @@@ -584,7 -462,7 +584,7 @@@ static const struct hns3_dbg_item rx_qu { "TAIL", 2 }, { "HEAD", 2 }, { "FBDNUM", 2 }, - { "PKTNUM", 2 }, + { "PKTNUM", 5 }, { "COPYBREAK", 2 }, { "RING_EN", 2 }, { "RX_RING_EN", 2 }, @@@ -687,7 -565,7 +687,7 @@@ static const struct hns3_dbg_item tx_qu { "HEAD", 2 }, { "FBDNUM", 2 }, { "OFFSET", 2 }, - { "PKTNUM", 2 }, + { "PKTNUM", 5 }, { "RING_EN", 2 }, { "TX_RING_EN", 2 }, { "BASE_ADDR", 10 }, @@@ -912,13 -790,13 +912,13 @@@ static int hns3_dbg_rx_bd_info(struct h }
static const struct hns3_dbg_item tx_bd_info_items[] = { - { "BD_IDX", 5 }, - { "ADDRESS", 2 }, + { "BD_IDX", 2 }, + { "ADDRESS", 13 }, { "VLAN_TAG", 2 }, { "SIZE", 2 }, { "T_CS_VLAN_TSO", 2 }, { "OT_VLAN_TAG", 3 }, - { "TV", 2 }, + { "TV", 5 }, { "OLT_VLAN_LEN", 2 }, { "PAYLEN_OL4CS", 2 }, { "BD_FE_SC_VLD", 2 }, @@@ -1046,12 -924,6 +1046,12 @@@ hns3_dbg_dev_specs(struct hnae3_handle dev_specs->max_tm_rate); *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n", dev_specs->max_qset_num); + *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n", + dev_specs->umv_size); + *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n", + dev_specs->mc_mac_size); + *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n", + dev_specs->mac_stats_num); }
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) @@@ -1065,69 -937,6 +1065,69 @@@ return 0; }
+static const struct hns3_dbg_item page_pool_info_items[] = { + { "QUEUE_ID", 2 }, + { "ALLOCATE_CNT", 2 }, + { "FREE_CNT", 6 }, + { "POOL_SIZE(PAGE_NUM)", 2 }, + { "ORDER", 2 }, + { "NUMA_ID", 2 }, + { "MAX_LEN", 2 }, +}; + +static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring, + char **result, u32 index) +{ + u32 j = 0; + + sprintf(result[j++], "%u", index); + sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt); + sprintf(result[j++], "%u", + atomic_read(&ring->page_pool->pages_state_release_cnt)); + sprintf(result[j++], "%u", ring->page_pool->p.pool_size); + sprintf(result[j++], "%u", ring->page_pool->p.order); + sprintf(result[j++], "%d", ring->page_pool->p.nid); + sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024); +} + +static int +hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len) +{ + char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(page_pool_info_items)]; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + int pos = 0; + u32 i; + + if (!priv->ring) { + dev_err(&h->pdev->dev, "priv->ring is NULL\n"); + return -EFAULT; + } + + for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++) + result[i] = &data_str[i][0]; + + hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items, + NULL, ARRAY_SIZE(page_pool_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + for (i = 0; i < h->kinfo.num_tqps; i++) { + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EPERM; + ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)]; + hns3_dump_page_pool_info(ring, result, i); + hns3_dbg_fill_content(content, sizeof(content), + page_pool_info_items, + (const char **)result, + ARRAY_SIZE(page_pool_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index) { u32 i; @@@ -1169,14 -978,6 +1169,14 @@@ static const struct hns3_dbg_func hns3_ .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO, .dbg_dump = hns3_dbg_tx_queue_info, }, + { + .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO, + .dbg_dump = hns3_dbg_page_pool_info, + }, + { + .cmd = HNAE3_DBG_CMD_COAL_INFO, + .dbg_dump = hns3_dbg_coal_info, + }, };
static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data, diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index f0aa4fbd2200,9cda8b3562b8..4e0a8c2f7c05 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@@ -391,7 -391,7 +391,7 @@@ static int hclge_dbg_dump_mac(struct hc static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len, int *pos) { - struct hclge_dbg_bitmap_cmd *bitmap; + struct hclge_dbg_bitmap_cmd req; struct hclge_desc desc; u16 qset_id, qset_num; int ret; @@@ -408,12 -408,12 +408,12 @@@ if (ret) return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; + req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "%04u %#x %#x %#x %#x\n", - qset_id, bitmap->bit0, bitmap->bit1, - bitmap->bit2, bitmap->bit3); + qset_id, req.bit0, req.bit1, req.bit2, + req.bit3); }
return 0; @@@ -422,7 -422,7 +422,7 @@@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len, int *pos) { - struct hclge_dbg_bitmap_cmd *bitmap; + struct hclge_dbg_bitmap_cmd req; struct hclge_desc desc; u8 pri_id, pri_num; int ret; @@@ -439,12 -439,11 +439,11 @@@ if (ret) return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; + req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "%03u %#x %#x %#x\n", - pri_id, bitmap->bit0, bitmap->bit1, - bitmap->bit2); + pri_id, req.bit0, req.bit1, req.bit2); }
return 0; @@@ -453,7 -452,7 +452,7 @@@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len, int *pos) { - struct hclge_dbg_bitmap_cmd *bitmap; + struct hclge_dbg_bitmap_cmd req; struct hclge_desc desc; u8 pg_id; int ret; @@@ -466,12 -465,11 +465,11 @@@ if (ret) return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; + req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "%03u %#x %#x %#x\n", - pg_id, bitmap->bit0, bitmap->bit1, - bitmap->bit2); + pg_id, req.bit0, req.bit1, req.bit2); }
return 0; @@@ -511,7 -509,7 +509,7 @@@ static int hclge_dbg_dump_dcb_queue(str static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len, int *pos) { - struct hclge_dbg_bitmap_cmd *bitmap; + struct hclge_dbg_bitmap_cmd req; struct hclge_desc desc; u8 port_id = 0; int ret; @@@ -521,12 -519,12 +519,12 @@@ if (ret) return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; + req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n", - bitmap->bit0); + req.bit0); *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n", - bitmap->bit1); + req.bit1);
return 0; } @@@ -1992,9 -1990,6 +1990,9 @@@ static int hclge_dbg_dump_umv_info(stru } mutex_unlock(&hdev->vport_lock);
+ pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n", + hdev->used_mc_mac_num); + return 0; }
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index f1db6699f81f,d891390d492f..2e41aa2d1df8 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@@ -156,210 -156,174 +156,210 @@@ static const char hns3_nic_test_strs[][ };
static const struct hclge_comm_stats_str g_mac_stats_string[] = { - {"mac_tx_mac_pause_num", + {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, - {"mac_rx_mac_pause_num", + {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, - {"mac_tx_control_pkt_num", + {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)}, + {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)}, + {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, - {"mac_rx_control_pkt_num", + {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, - {"mac_tx_pfc_pkt_num", + {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, - {"mac_tx_pfc_pri0_pkt_num", + {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, - {"mac_tx_pfc_pri1_pkt_num", + {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, - {"mac_tx_pfc_pri2_pkt_num", + {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, - {"mac_tx_pfc_pri3_pkt_num", + {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, - {"mac_tx_pfc_pri4_pkt_num", + {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, - {"mac_tx_pfc_pri5_pkt_num", + {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, - {"mac_tx_pfc_pri6_pkt_num", + {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, - {"mac_tx_pfc_pri7_pkt_num", + {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, - {"mac_rx_pfc_pkt_num", + {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)}, + {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)}, + {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)}, + {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)}, + {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)}, + {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)}, + {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)}, + {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)}, + {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, - {"mac_rx_pfc_pri0_pkt_num", + {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, - {"mac_rx_pfc_pri1_pkt_num", + {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, - {"mac_rx_pfc_pri2_pkt_num", + {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, - {"mac_rx_pfc_pri3_pkt_num", + {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, - {"mac_rx_pfc_pri4_pkt_num", + {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, - {"mac_rx_pfc_pri5_pkt_num", + {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, - {"mac_rx_pfc_pri6_pkt_num", + {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, - {"mac_rx_pfc_pri7_pkt_num", + {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, - {"mac_tx_total_pkt_num", + {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)}, + {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)}, + {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)}, + {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)}, + {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)}, + {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)}, + {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)}, + {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)}, + {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, - {"mac_tx_total_oct_num", + {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, - {"mac_tx_good_pkt_num", + {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, - {"mac_tx_bad_pkt_num", + {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, - {"mac_tx_good_oct_num", + {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, - {"mac_tx_bad_oct_num", + {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, - {"mac_tx_uni_pkt_num", + {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, - {"mac_tx_multi_pkt_num", + {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, - {"mac_tx_broad_pkt_num", + {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, - {"mac_tx_undersize_pkt_num", + {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, - {"mac_tx_oversize_pkt_num", + {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, - {"mac_tx_64_oct_pkt_num", + {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, - {"mac_tx_65_127_oct_pkt_num", + {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, - {"mac_tx_128_255_oct_pkt_num", + {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, - {"mac_tx_256_511_oct_pkt_num", + {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, - {"mac_tx_512_1023_oct_pkt_num", + {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, - {"mac_tx_1024_1518_oct_pkt_num", + {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, - {"mac_tx_1519_2047_oct_pkt_num", + {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, - {"mac_tx_2048_4095_oct_pkt_num", + {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, - {"mac_tx_4096_8191_oct_pkt_num", + {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, - {"mac_tx_8192_9216_oct_pkt_num", + {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, - {"mac_tx_9217_12287_oct_pkt_num", + {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, - {"mac_tx_12288_16383_oct_pkt_num", + {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, - {"mac_tx_1519_max_good_pkt_num", + {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, - {"mac_tx_1519_max_bad_pkt_num", + {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, - {"mac_rx_total_pkt_num", + {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, - {"mac_rx_total_oct_num", + {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, - {"mac_rx_good_pkt_num", + {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, - {"mac_rx_bad_pkt_num", + {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, - {"mac_rx_good_oct_num", + {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, - {"mac_rx_bad_oct_num", + {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, - {"mac_rx_uni_pkt_num", + {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, - {"mac_rx_multi_pkt_num", + {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, - {"mac_rx_broad_pkt_num", + {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, - {"mac_rx_undersize_pkt_num", + {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, - {"mac_rx_oversize_pkt_num", + {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, - {"mac_rx_64_oct_pkt_num", + {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, - {"mac_rx_65_127_oct_pkt_num", + {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, - {"mac_rx_128_255_oct_pkt_num", + {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, - {"mac_rx_256_511_oct_pkt_num", + {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, - {"mac_rx_512_1023_oct_pkt_num", + {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, - {"mac_rx_1024_1518_oct_pkt_num", + {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, - {"mac_rx_1519_2047_oct_pkt_num", + {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, - {"mac_rx_2048_4095_oct_pkt_num", + {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, - {"mac_rx_4096_8191_oct_pkt_num", + {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, - {"mac_rx_8192_9216_oct_pkt_num", + {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, - {"mac_rx_9217_12287_oct_pkt_num", + {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, - {"mac_rx_12288_16383_oct_pkt_num", + {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, - {"mac_rx_1519_max_good_pkt_num", + {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, - {"mac_rx_1519_max_bad_pkt_num", + {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
- {"mac_tx_fragment_pkt_num", + {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, - {"mac_tx_undermin_pkt_num", + {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, - {"mac_tx_jabber_pkt_num", + {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, - {"mac_tx_err_all_pkt_num", + {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, - {"mac_tx_from_app_good_pkt_num", + {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, - {"mac_tx_from_app_bad_pkt_num", + {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, - {"mac_rx_fragment_pkt_num", + {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, - {"mac_rx_undermin_pkt_num", + {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, - {"mac_rx_jabber_pkt_num", + {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, - {"mac_rx_fcs_err_pkt_num", + {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, - {"mac_rx_send_app_good_pkt_num", + {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, - {"mac_rx_send_app_bad_pkt_num", + {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} };
@@@ -487,9 -451,8 +487,9 @@@ static int hclge_mac_update_stats_defec u64 *data = (u64 *)(&hdev->mac_stats); struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; __le64 *desc_data; - int i, k, n; + u32 data_size; int ret; + u32 i;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); @@@ -500,37 -463,33 +500,37 @@@ return ret; }
- for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { - /* for special opcode 0032, only the first desc has the head */ - if (unlikely(i == 0)) { - desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_RD_FIRST_STATS_NUM; - } else { - desc_data = (__le64 *)(&desc[i]); - n = HCLGE_RD_OTHER_STATS_NUM; - } + /* The first desc has a 64-bit header, so data size need to minus 1 */ + data_size = sizeof(desc) / (sizeof(u64)) - 1;
- for (k = 0; k < n; k++) { - *data += le64_to_cpu(*desc_data); - data++; - desc_data++; - } + desc_data = (__le64 *)(&desc[0].data[0]); + for (i = 0; i < data_size; i++) { + /* data memory is continuous becase only the first desc has a + * header in this command + */ + *data += le64_to_cpu(*desc_data); + data++; + desc_data++; }
return 0; }
-static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num) +static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) { +#define HCLGE_REG_NUM_PER_DESC 4 + + u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; u64 *data = (u64 *)(&hdev->mac_stats); struct hclge_desc *desc; __le64 *desc_data; - u16 i, k, n; + u32 data_size; + u32 desc_num; int ret; + u32 i; + + /* The first desc has a 64-bit header, so need to consider it */ + desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections, * so GFP_ATOMIC is more suitalbe here @@@ -546,16 -505,21 +546,16 @@@ return ret; }
- for (i = 0; i < desc_num; i++) { - /* for special opcode 0034, only the first desc has the head */ - if (i == 0) { - desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_RD_FIRST_STATS_NUM; - } else { - desc_data = (__le64 *)(&desc[i]); - n = HCLGE_RD_OTHER_STATS_NUM; - } + data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
- for (k = 0; k < n; k++) { - *data += le64_to_cpu(*desc_data); - data++; - desc_data++; - } + desc_data = (__le64 *)(&desc[0].data[0]); + for (i = 0; i < data_size; i++) { + /* data memory is continuous becase only the first desc has a + * header in this command + */ + *data += le64_to_cpu(*desc_data); + data++; + desc_data++; }
kfree(desc); @@@ -563,37 -527,42 +563,37 @@@ return 0; }
-static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num) +static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) { struct hclge_desc desc; - __le32 *desc_data; - u32 reg_num; int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query mac statistic reg number, ret = %d\n", + ret); return ret; + }
- desc_data = (__le32 *)(&desc.data[0]); - reg_num = le32_to_cpu(*desc_data); - - *desc_num = 1 + ((reg_num - 3) >> 2) + - (u32)(((reg_num - 3) & 0x3) ? 1 : 0); + *reg_num = le32_to_cpu(desc.data[0]); + if (*reg_num == 0) { + dev_err(&hdev->pdev->dev, + "mac statistic reg number is invalid!\n"); + return -ENODATA; + }
return 0; }
static int hclge_mac_update_stats(struct hclge_dev *hdev) { - u32 desc_num; - int ret; - - ret = hclge_mac_query_reg_num(hdev, &desc_num); /* The firmware supports the new statistics acquisition method */ - if (!ret) - ret = hclge_mac_update_stats_complete(hdev, desc_num); - else if (ret == -EOPNOTSUPP) - ret = hclge_mac_update_stats_defective(hdev); + if (hdev->ae_dev->dev_specs.mac_stats_num) + return hclge_mac_update_stats_complete(hdev); else - dev_err(&hdev->pdev->dev, "query mac reg num fail!\n"); - - return ret; + return hclge_mac_update_stats_defective(hdev); }
static int hclge_tqps_update_stats(struct hnae3_handle *handle) @@@ -701,39 -670,20 +701,39 @@@ static u8 *hclge_tqps_get_strings(struc return buff; }
-static u64 *hclge_comm_get_stats(const void *comm_stats, +static int hclge_comm_get_count(struct hclge_dev *hdev, + const struct hclge_comm_stats_str strs[], + u32 size) +{ + int count = 0; + u32 i; + + for (i = 0; i < size; i++) + if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) + count++; + + return count; +} + +static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, const struct hclge_comm_stats_str strs[], int size, u64 *data) { u64 *buf = data; u32 i;
- for (i = 0; i < size; i++) - buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); + for (i = 0; i < size; i++) { + if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) + continue; + + *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); + buf++; + }
- return buf + size; + return buf; }
-static u8 *hclge_comm_get_strings(u32 stringset, +static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, const struct hclge_comm_stats_str strs[], int size, u8 *data) { @@@ -744,9 -694,6 +744,9 @@@ return buff;
for (i = 0; i < size; i++) { + if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) + continue; + snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc); buff = buff + ETH_GSTRING_LEN; } @@@ -838,8 -785,7 +838,8 @@@ static int hclge_get_sset_count(struct handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; } } else if (stringset == ETH_SS_STATS) { - count = ARRAY_SIZE(g_mac_stats_string) + + count = hclge_comm_get_count(hdev, g_mac_stats_string, + ARRAY_SIZE(g_mac_stats_string)) + hclge_tqps_get_sset_count(handle, stringset); }
@@@ -849,14 -795,12 +849,14 @@@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, u8 *data) { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; u8 *p = (char *)data; int size;
if (stringset == ETH_SS_STATS) { size = ARRAY_SIZE(g_mac_stats_string); - p = hclge_comm_get_strings(stringset, g_mac_stats_string, + p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, size, p); p = hclge_tqps_get_strings(handle, p); } else if (stringset == ETH_SS_TEST) { @@@ -890,7 -834,7 +890,7 @@@ static void hclge_get_stats(struct hnae struct hclge_dev *hdev = vport->back; u64 *p;
- p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string, + p = hclge_comm_get_stats(hdev, g_mac_stats_string, ARRAY_SIZE(g_mac_stats_string), data); p = hclge_tqps_get_stats(handle, p); } @@@ -1093,100 -1037,96 +1093,100 @@@ static int hclge_check_port_speed(struc return -EINVAL; }
-static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability) +static void hclge_convert_setting_sr(u16 speed_ability, + unsigned long *link_mode) { if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_40G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_50G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_100G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_200G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, - mac->supported); + link_mode); }
-static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability) +static void hclge_convert_setting_lr(u16 speed_ability, + unsigned long *link_mode) { if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_50G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_40G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_100G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_200G_BIT) linkmode_set_bit( ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, - mac->supported); + link_mode); }
-static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability) +static void hclge_convert_setting_cr(u16 speed_ability, + unsigned long *link_mode) { if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_40G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_50G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_100G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_200G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, - mac->supported); + link_mode); }
-static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability) +static void hclge_convert_setting_kr(u16 speed_ability, + unsigned long *link_mode) { if (speed_ability & HCLGE_SUPPORT_1G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_40G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_50G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_100G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_200G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, - mac->supported); + link_mode); }
static void hclge_convert_setting_fec(struct hclge_mac *mac) @@@ -1230,9 -1170,9 +1230,9 @@@ static void hclge_parse_fiber_link_mode linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, mac->supported);
- hclge_convert_setting_sr(mac, speed_ability); - hclge_convert_setting_lr(mac, speed_ability); - hclge_convert_setting_cr(mac, speed_ability); + hclge_convert_setting_sr(speed_ability, mac->supported); + hclge_convert_setting_lr(speed_ability, mac->supported); + hclge_convert_setting_cr(speed_ability, mac->supported); if (hnae3_dev_fec_supported(hdev)) hclge_convert_setting_fec(mac);
@@@ -1248,7 -1188,7 +1248,7 @@@ static void hclge_parse_backplane_link_ { struct hclge_mac *mac = &hdev->hw.mac;
- hclge_convert_setting_kr(mac, speed_ability); + hclge_convert_setting_kr(speed_ability, mac->supported); if (hnae3_dev_fec_supported(hdev)) hclge_convert_setting_fec(mac);
@@@ -1402,6 -1342,8 +1402,6 @@@ static void hclge_parse_cfg(struct hclg cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), HCLGE_CFG_UMV_TBL_SPACE_M, HCLGE_CFG_UMV_TBL_SPACE_S); - if (!cfg->umv_space) - cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), HCLGE_CFG_PF_RSS_SIZE_M, @@@ -1477,7 -1419,6 +1477,7 @@@ static void hclge_set_default_dev_specs ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; + ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; }
static void hclge_parse_dev_specs(struct hclge_dev *hdev, @@@ -1499,8 -1440,6 +1499,8 @@@ ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); + ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); + ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); }
static void hclge_check_dev_specs(struct hclge_dev *hdev) @@@ -1521,21 -1460,6 +1521,21 @@@ dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; if (!dev_specs->max_frm_size) dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; + if (!dev_specs->umv_size) + dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; +} + +static int hclge_query_mac_stats_num(struct hclge_dev *hdev) +{ + u32 reg_num = 0; + int ret; + + ret = hclge_mac_query_reg_num(hdev, ®_num); + if (ret && ret != -EOPNOTSUPP) + return ret; + + hdev->ae_dev->dev_specs.mac_stats_num = reg_num; + return 0; }
static int hclge_query_dev_specs(struct hclge_dev *hdev) @@@ -1544,10 -1468,6 +1544,10 @@@ int ret; int i;
+ ret = hclge_query_mac_stats_num(hdev); + if (ret) + return ret; + /* set default specifications as devices lower than version V3 do not * support querying specifications from firmware. */ @@@ -1629,10 -1549,7 +1629,10 @@@ static int hclge_configure(struct hclge hdev->tm_info.num_pg = 1; hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; - hdev->wanted_umv_size = cfg.umv_space; + if (cfg.umv_space) + hdev->wanted_umv_size = cfg.umv_space; + else + hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; hdev->gro_en = true; if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) @@@ -2930,33 -2847,29 +2930,29 @@@ static void hclge_mbx_task_schedule(str { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) - mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), - hclge_wq, &hdev->service_task, 0); + mod_delayed_work(hclge_wq, &hdev->service_task, 0); }
static void hclge_reset_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) - mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), - hclge_wq, &hdev->service_task, 0); + mod_delayed_work(hclge_wq, &hdev->service_task, 0); }
static void hclge_errhand_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) - mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), - hclge_wq, &hdev->service_task, 0); + mod_delayed_work(hclge_wq, &hdev->service_task, 0); }
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) - mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), - hclge_wq, &hdev->service_task, - delay_time); + mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); }
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) @@@ -3051,82 -2964,6 +3047,82 @@@ static void hclge_update_link_status(st clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); }
+static void hclge_update_speed_advertising(struct hclge_mac *mac) +{ + u32 speed_ability; + + if (hclge_get_speed_bit(mac->speed, &speed_ability)) + return; + + switch (mac->module_type) { + case HNAE3_MODULE_TYPE_FIBRE_LR: + hclge_convert_setting_lr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_FIBRE_SR: + case HNAE3_MODULE_TYPE_AOC: + hclge_convert_setting_sr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_CR: + hclge_convert_setting_cr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_KR: + hclge_convert_setting_kr(speed_ability, mac->advertising); + break; + default: + break; + } +} + +static void hclge_update_fec_advertising(struct hclge_mac *mac) +{ + if (mac->fec_mode & BIT(HNAE3_FEC_RS)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, + mac->advertising); + else if (mac->fec_mode & BIT(HNAE3_FEC_BASER)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, + mac->advertising); + else + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, + mac->advertising); +} + +static void hclge_update_pause_advertising(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + bool rx_en, tx_en; + + switch (hdev->fc_mode_last_time) { + case HCLGE_FC_RX_PAUSE: + rx_en = true; + tx_en = false; + break; + case HCLGE_FC_TX_PAUSE: + rx_en = false; + tx_en = true; + break; + case HCLGE_FC_FULL: + rx_en = true; + tx_en = true; + break; + default: + rx_en = false; + tx_en = false; + break; + } + + linkmode_set_pause(mac->advertising, tx_en, rx_en); +} + +static void hclge_update_advertising(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + linkmode_zero(mac->advertising); + hclge_update_speed_advertising(mac); + hclge_update_fec_advertising(mac); + hclge_update_pause_advertising(hdev); +} + static void hclge_update_port_capability(struct hclge_dev *hdev, struct hclge_mac *mac) { @@@ -3149,7 -2986,7 +3145,7 @@@ } else { linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); - linkmode_zero(mac->advertising); + hclge_update_advertising(hdev); } }
@@@ -3650,33 -3487,14 +3646,14 @@@ static void hclge_get_misc_vector(struc hdev->num_msi_used += 1; }
- static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) - { - struct hclge_dev *hdev = container_of(notify, struct hclge_dev, - affinity_notify); - - cpumask_copy(&hdev->affinity_mask, mask); - } - - static void hclge_irq_affinity_release(struct kref *ref) - { - } - static void hclge_misc_affinity_setup(struct hclge_dev *hdev) { irq_set_affinity_hint(hdev->misc_vector.vector_irq, &hdev->affinity_mask); - - hdev->affinity_notify.notify = hclge_irq_affinity_notify; - hdev->affinity_notify.release = hclge_irq_affinity_release; - irq_set_affinity_notifier(hdev->misc_vector.vector_irq, - &hdev->affinity_notify); }
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) { - irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL); irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); }
@@@ -8657,9 -8475,6 +8634,9 @@@ static int hclge_init_umv_space(struct hdev->share_umv_size = hdev->priv_umv_size + hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ if (hdev->ae_dev->dev_specs.mc_mac_size) + set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); + return 0; }
@@@ -8677,8 -8492,6 +8654,8 @@@ static void hclge_reset_umv_space(struc hdev->share_umv_size = hdev->priv_umv_size + hdev->max_umv_size % (hdev->num_alloc_vport + 1); mutex_unlock(&hdev->vport_lock); + + hdev->used_mc_mac_num = 0; }
static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) @@@ -8933,7 -8746,6 +8910,7 @@@ int hclge_add_mc_addr_common(struct hcl struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; + bool is_new_addr = false; int status;
/* mac addr check */ @@@ -8947,13 -8759,6 +8924,13 @@@ hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (status) { + if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && + hdev->used_mc_mac_num >= + hdev->ae_dev->dev_specs.mc_mac_size) + goto err_no_space; + + is_new_addr = true; + /* This mac addr do not exist, add new entry for it */ memset(desc[0].data, 0, sizeof(desc[0].data)); memset(desc[1].data, 0, sizeof(desc[0].data)); @@@ -8963,18 -8768,12 +8940,18 @@@ if (status) return status; status = hclge_add_mac_vlan_tbl(vport, &req, desc); - /* if already overflow, not to print each time */ - if (status == -ENOSPC && - !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) - dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + if (status == -ENOSPC) + goto err_no_space; + else if (!status && is_new_addr) + hdev->used_mc_mac_num++;
return status; + +err_no_space: + /* if already overflow, not to print each time */ + if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) + dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + return -ENOSPC; }
static int hclge_rm_mc_addr(struct hnae3_handle *handle, @@@ -9011,15 -8810,12 +8988,15 @@@ int hclge_rm_mc_addr_common(struct hclg if (status) return status;
- if (hclge_is_all_function_id_zero(desc)) + if (hclge_is_all_function_id_zero(desc)) { /* All the vfid is zero, so need to delete this entry */ status = hclge_remove_mac_vlan_tbl(vport, &req); - else + if (!status) + hdev->used_mc_mac_num--; + } else { /* Not all the vfid is zero, update the vfid */ status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } } else if (status == -ENOENT) { status = 0; } @@@ -9595,7 -9391,7 +9572,7 @@@ int hclge_update_mac_node_for_dev_addr( return 0; }
-static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, +static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, bool is_first) { const unsigned char *new_addr = (const unsigned char *)p; @@@ -13233,7 -13029,7 +13210,7 @@@ static int hclge_init(void { pr_info("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME); + hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME); if (!hclge_wq) { pr_err("%s: failed to create workqueue\n", HCLGE_NAME); return -ENOMEM; diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 4f8403af84be,69cd8f87b4c8..9e1eede599ec --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@@ -403,13 -403,8 +403,13 @@@ struct hclge_tm_info u8 pfc_en; /* PFC enabled or not for user priority */ };
+/* max number of mac statistics on each version */ +#define HCLGE_MAC_STATS_MAX_NUM_V1 84 +#define HCLGE_MAC_STATS_MAX_NUM_V2 105 + struct hclge_comm_stats_str { char desc[ETH_GSTRING_LEN]; + u32 stats_num; unsigned long offset; };
@@@ -417,7 -412,6 +417,7 @@@ struct hclge_mac_stats { u64 mac_tx_mac_pause_num; u64 mac_rx_mac_pause_num; + u64 rsv0; u64 mac_tx_pfc_pri0_pkt_num; u64 mac_tx_pfc_pri1_pkt_num; u64 mac_tx_pfc_pri2_pkt_num; @@@ -454,7 -448,7 +454,7 @@@ u64 mac_tx_1519_2047_oct_pkt_num; u64 mac_tx_2048_4095_oct_pkt_num; u64 mac_tx_4096_8191_oct_pkt_num; - u64 rsv0; + u64 rsv1; u64 mac_tx_8192_9216_oct_pkt_num; u64 mac_tx_9217_12287_oct_pkt_num; u64 mac_tx_12288_16383_oct_pkt_num; @@@ -481,7 -475,7 +481,7 @@@ u64 mac_rx_1519_2047_oct_pkt_num; u64 mac_rx_2048_4095_oct_pkt_num; u64 mac_rx_4096_8191_oct_pkt_num; - u64 rsv1; + u64 rsv2; u64 mac_rx_8192_9216_oct_pkt_num; u64 mac_rx_9217_12287_oct_pkt_num; u64 mac_rx_12288_16383_oct_pkt_num; @@@ -504,28 -498,6 +504,28 @@@ u64 mac_rx_pfc_pause_pkt_num; u64 mac_tx_ctrl_pkt_num; u64 mac_rx_ctrl_pkt_num; + + /* duration of pfc */ + u64 mac_tx_pfc_pri0_xoff_time; + u64 mac_tx_pfc_pri1_xoff_time; + u64 mac_tx_pfc_pri2_xoff_time; + u64 mac_tx_pfc_pri3_xoff_time; + u64 mac_tx_pfc_pri4_xoff_time; + u64 mac_tx_pfc_pri5_xoff_time; + u64 mac_tx_pfc_pri6_xoff_time; + u64 mac_tx_pfc_pri7_xoff_time; + u64 mac_rx_pfc_pri0_xoff_time; + u64 mac_rx_pfc_pri1_xoff_time; + u64 mac_rx_pfc_pri2_xoff_time; + u64 mac_rx_pfc_pri3_xoff_time; + u64 mac_rx_pfc_pri4_xoff_time; + u64 mac_rx_pfc_pri5_xoff_time; + u64 mac_rx_pfc_pri6_xoff_time; + u64 mac_rx_pfc_pri7_xoff_time; + + /* duration of pause */ + u64 mac_tx_pause_xoff_time; + u64 mac_rx_pause_xoff_time; };
#define HCLGE_STATS_TIMER_INTERVAL 300UL @@@ -966,15 -938,12 +966,14 @@@ struct hclge_dev u16 priv_umv_size; /* unicast mac vlan space shared by PF and its VFs */ u16 share_umv_size; + /* multicast mac address number used by PF and its VFs */ + u16 used_mc_mac_num;
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE);
/* affinity mask and notify for misc interrupt */ cpumask_t affinity_mask; - struct irq_affinity_notify affinity_notify; struct hclge_ptp *ptp; struct devlink *devlink; }; diff --combined drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 3306050ad72c,cf00ad7bb881..645b2c0011e6 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@@ -1349,7 -1349,7 +1349,7 @@@ static void hclgevf_get_mac_addr(struc ether_addr_copy(p, hdev->hw.mac.mac_addr); }
-static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, +static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, bool is_first) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@@ -2232,6 -2232,7 +2232,7 @@@ static void hclgevf_get_misc_vector(str void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) { if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && + test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); @@@ -3449,6 -3450,8 +3450,8 @@@ static int hclgevf_init_hdev(struct hcl
hclgevf_init_rxd_adv_layout(hdev);
+ set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); + hdev->last_reset_time = jiffies; dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); @@@ -3899,7 -3902,7 +3902,7 @@@ static int hclgevf_init(void { pr_info("%s is initializing\n", HCLGEVF_NAME);
- hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); + hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); if (!hclgevf_wq) { pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); return -ENOMEM; diff --combined drivers/net/ethernet/intel/ice/ice_ptp.c index a1be0d04a2d0,d1ef3d48a4b0..bf7247c6f58e --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@@ -6,252 -6,6 +6,252 @@@
#define E810_OUT_PROP_DELAY_NS 1
+static const struct ptp_pin_desc ice_pin_desc_e810t[] = { + /* name idx func chan */ + { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, + { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, + { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, + { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, + { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, +}; + +/** + * ice_get_sma_config_e810t + * @hw: pointer to the hw struct + * @ptp_pins: pointer to the ptp_pin_desc struture + * + * Read the configuration of the SMA control logic and put it into the + * ptp_pin_desc structure + */ +static int +ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) +{ + u8 data, i; + int status; + + /* Read initial pin state */ + status = ice_read_sma_ctrl_e810t(hw, &data); + if (status) + return status; + + /* initialize with defaults */ + for (i = 0; i < NUM_PTP_PINS_E810T; i++) { + snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name), + "%s", ice_pin_desc_e810t[i].name); + ptp_pins[i].index = ice_pin_desc_e810t[i].index; + ptp_pins[i].func = ice_pin_desc_e810t[i].func; + ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; + } + + /* Parse SMA1/UFL1 */ + switch (data & ICE_SMA1_MASK_E810T) { + case ICE_SMA1_MASK_E810T: + default: + ptp_pins[SMA1].func = PTP_PF_NONE; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case ICE_SMA1_DIR_EN_E810T: + ptp_pins[SMA1].func = PTP_PF_PEROUT; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case ICE_SMA1_TX_EN_E810T: + ptp_pins[SMA1].func = PTP_PF_EXTTS; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case 0: + ptp_pins[SMA1].func = PTP_PF_EXTTS; + ptp_pins[UFL1].func = PTP_PF_PEROUT; + break; + } + + /* Parse SMA2/UFL2 */ + switch (data & ICE_SMA2_MASK_E810T) { + case ICE_SMA2_MASK_E810T: + default: + ptp_pins[SMA2].func = PTP_PF_NONE; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): + ptp_pins[SMA2].func = PTP_PF_EXTTS; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): + ptp_pins[SMA2].func = PTP_PF_PEROUT; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): + ptp_pins[SMA2].func = PTP_PF_NONE; + ptp_pins[UFL2].func = PTP_PF_EXTTS; + break; + case ICE_SMA2_DIR_EN_E810T: + ptp_pins[SMA2].func = PTP_PF_PEROUT; + ptp_pins[UFL2].func = PTP_PF_EXTTS; + break; + } + + return 0; +} + +/** + * ice_ptp_set_sma_config_e810t + * @hw: pointer to the hw struct + * @ptp_pins: pointer to the ptp_pin_desc struture + * + * Set the configuration of the SMA control logic based on the configuration in + * num_pins parameter + */ +static int +ice_ptp_set_sma_config_e810t(struct ice_hw *hw, + const struct ptp_pin_desc *ptp_pins) +{ + int status; + u8 data; + + /* SMA1 and UFL1 cannot be set to TX at the same time */ + if (ptp_pins[SMA1].func == PTP_PF_PEROUT && + ptp_pins[UFL1].func == PTP_PF_PEROUT) + return -EINVAL; + + /* SMA2 and UFL2 cannot be set to RX at the same time */ + if (ptp_pins[SMA2].func == PTP_PF_EXTTS && + ptp_pins[UFL2].func == PTP_PF_EXTTS) + return -EINVAL; + + /* Read initial pin state value */ + status = ice_read_sma_ctrl_e810t(hw, &data); + if (status) + return status; + + /* Set the right sate based on the desired configuration */ + data &= ~ICE_SMA1_MASK_E810T; + if (ptp_pins[SMA1].func == PTP_PF_NONE && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); + data |= ICE_SMA1_MASK_E810T; + } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 RX"); + data |= ICE_SMA1_TX_EN_E810T; + } else if (ptp_pins[SMA1].func == PTP_PF_NONE && + ptp_pins[UFL1].func == PTP_PF_PEROUT) { + /* U.FL 1 TX will always enable SMA 1 RX */ + dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); + } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && + ptp_pins[UFL1].func == PTP_PF_PEROUT) { + dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); + } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 TX"); + data |= ICE_SMA1_DIR_EN_E810T; + } + + data &= ~ICE_SMA2_MASK_E810T; + if (ptp_pins[SMA2].func == PTP_PF_NONE && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); + data |= ICE_SMA2_MASK_E810T; + } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 RX"); + data |= (ICE_SMA2_TX_EN_E810T | + ICE_SMA2_UFL2_RX_DIS_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_NONE && + ptp_pins[UFL2].func == PTP_PF_EXTTS) { + dev_info(ice_hw_to_dev(hw), "UFL2 RX"); + data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 TX"); + data |= (ICE_SMA2_DIR_EN_E810T | + ICE_SMA2_UFL2_RX_DIS_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && + ptp_pins[UFL2].func == PTP_PF_EXTTS) { + dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); + data |= ICE_SMA2_DIR_EN_E810T; + } + + return ice_write_sma_ctrl_e810t(hw, data); +} + +/** + * ice_ptp_set_sma_e810t + * @info: the driver's PTP info structure + * @pin: pin index in kernel structure + * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) + * + * Set the configuration of a single SMA pin + */ +static int +ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func) +{ + struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; + struct ice_pf *pf = ptp_info_to_pf(info); + struct ice_hw *hw = &pf->hw; + int err; + + if (pin < SMA1 || func > PTP_PF_PEROUT) + return -EOPNOTSUPP; + + err = ice_get_sma_config_e810t(hw, ptp_pins); + if (err) + return err; + + /* Disable the same function on the other pin sharing the channel */ + if (pin == SMA1 && ptp_pins[UFL1].func == func) + ptp_pins[UFL1].func = PTP_PF_NONE; + if (pin == UFL1 && ptp_pins[SMA1].func == func) + ptp_pins[SMA1].func = PTP_PF_NONE; + + if (pin == SMA2 && ptp_pins[UFL2].func == func) + ptp_pins[UFL2].func = PTP_PF_NONE; + if (pin == UFL2 && ptp_pins[SMA2].func == func) + ptp_pins[SMA2].func = PTP_PF_NONE; + + /* Set up new pin function in the temp table */ + ptp_pins[pin].func = func; + + return ice_ptp_set_sma_config_e810t(hw, ptp_pins); +} + +/** + * ice_verify_pin_e810t + * @info: the driver's PTP info structure + * @pin: Pin index + * @func: Assigned function + * @chan: Assigned channel + * + * Verify if pin supports requested pin function. If the Check pins consistency. + * Reconfigure the SMA logic attached to the given pin to enable its + * desired functionality + */ +static int +ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + /* Don't allow channel reassignment */ + if (chan != ice_pin_desc_e810t[pin].chan) + return -EOPNOTSUPP; + + /* Check if functions are properly assigned */ + switch (func) { + case PTP_PF_NONE: + break; + case PTP_PF_EXTTS: + if (pin == UFL1) + return -EOPNOTSUPP; + break; + case PTP_PF_PEROUT: + if (pin == UFL2 || pin == GNSS) + return -EOPNOTSUPP; + break; + case PTP_PF_PHYSYNC: + return -EOPNOTSUPP; + } + + return ice_ptp_set_sma_e810t(info, pin, func); +} + /** * ice_set_tx_tstamp - Enable or disable Tx timestamping * @pf: The PF pointer to search in @@@ -981,34 -735,17 +981,34 @@@ ice_ptp_gpio_enable_e810(struct ptp_clo { struct ice_pf *pf = ptp_info_to_pf(info); struct ice_perout_channel clk_cfg = {0}; + bool sma_pres = false; unsigned int chan; u32 gpio_pin; int err;
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) + sma_pres = true; + switch (rq->type) { case PTP_CLK_REQ_PEROUT: chan = rq->perout.index; - if (chan == PPS_CLK_GEN_CHAN) + if (sma_pres) { + if (chan == ice_pin_desc_e810t[SMA1].chan) + clk_cfg.gpio_pin = GPIO_20; + else if (chan == ice_pin_desc_e810t[SMA2].chan) + clk_cfg.gpio_pin = GPIO_22; + else + return -1; + } else if (ice_is_e810t(&pf->hw)) { + if (chan == 0) + clk_cfg.gpio_pin = GPIO_20; + else + clk_cfg.gpio_pin = GPIO_22; + } else if (chan == PPS_CLK_GEN_CHAN) { clk_cfg.gpio_pin = PPS_PIN_INDEX; - else + } else { clk_cfg.gpio_pin = chan; + }
clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + rq->perout.period.nsec); @@@ -1020,19 -757,7 +1020,19 @@@ break; case PTP_CLK_REQ_EXTTS: chan = rq->extts.index; - gpio_pin = chan; + if (sma_pres) { + if (chan < ice_pin_desc_e810t[SMA2].chan) + gpio_pin = GPIO_21; + else + gpio_pin = GPIO_23; + } else if (ice_is_e810t(&pf->hw)) { + if (chan == 0) + gpio_pin = GPIO_21; + else + gpio_pin = GPIO_23; + } else { + gpio_pin = chan; + }
err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, rq->extts.flags); @@@ -1287,7 -1012,7 +1287,7 @@@ int ice_ptp_set_ts_config(struct ice_p * The timestamp is in ns, so we must convert the result first. */ void -ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, +ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { u32 ts_high; @@@ -1312,94 -1037,14 +1312,94 @@@ } }
+/** + * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins + * @pf: pointer to the PF structure + * @info: PTP clock info structure + * + * Disable the OS access to the SMA pins. Called to clear out the OS + * indications of pin support when we fail to setup the E810-T SMA control + * register. + */ +static void +ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +{ + struct device *dev = ice_pf_to_dev(pf); + + dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); + + info->enable = NULL; + info->verify = NULL; + info->n_pins = 0; + info->n_ext_ts = 0; + info->n_per_out = 0; +} + +/** + * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins + * @pf: pointer to the PF structure + * @info: PTP clock info structure + * + * Finish setting up the SMA pins by allocating pin_config, and setting it up + * according to the current status of the SMA. On failure, disable all of the + * extended SMA pin support. + */ +static void +ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +{ + struct device *dev = ice_pf_to_dev(pf); + int err; + + /* Allocate memory for kernel pins interface */ + info->pin_config = devm_kcalloc(dev, info->n_pins, + sizeof(*info->pin_config), GFP_KERNEL); + if (!info->pin_config) { + ice_ptp_disable_sma_pins_e810t(pf, info); + return; + } + + /* Read current SMA status */ + err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); + if (err) + ice_ptp_disable_sma_pins_e810t(pf, info); +} + +/** + * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs + * @pf: pointer to the PF instance + * @info: PTP clock capabilities + */ +static void +ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +{ + /* Check if SMA controller is in the netlist */ + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) && + !ice_is_pca9575_present(&pf->hw)) + ice_clear_feature_support(pf, ICE_F_SMA_CTRL); + + if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { + info->n_ext_ts = N_EXT_TS_E810_NO_SMA; + info->n_per_out = N_PER_OUT_E810T_NO_SMA; + return; + } + + info->n_per_out = N_PER_OUT_E810T; + info->n_ext_ts = N_EXT_TS_E810; + info->n_pins = NUM_PTP_PINS_E810T; + info->verify = ice_verify_pin_e810t; + + /* Complete setup of the SMA pins */ + ice_ptp_setup_sma_pins_e810t(pf, info); +} + /** * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs * @info: PTP clock capabilities */ static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info) { - info->n_per_out = E810_N_PER_OUT; - info->n_ext_ts = E810_N_EXT_TS; + info->n_per_out = N_PER_OUT_E810; + info->n_ext_ts = N_EXT_TS_E810; }
/** @@@ -1417,10 -1062,7 +1417,10 @@@ ice_ptp_set_funcs_e810(struct ice_pf *p { info->enable = ice_ptp_gpio_enable_e810;
- ice_ptp_setup_pins_e810(info); + if (ice_is_e810t(&pf->hw)) + ice_ptp_setup_pins_e810t(pf, info); + else + ice_ptp_setup_pins_e810(info); }
/** @@@ -1929,6 -1571,9 +1929,9 @@@ err_kworker */ void ice_ptp_release(struct ice_pf *pf) { + if (!test_bit(ICE_FLAG_PTP, pf->flags)) + return; + /* Disable timestamping for both Tx and Rx */ ice_ptp_cfg_timestamp(pf, false);
diff --combined drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 94d479010410,49d822a98ada..c7fd466a0efd --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@@ -95,7 -95,7 +95,7 @@@ static char *cgx_tx_stats_fields[] = [CGX_STAT5] = "Total frames sent on the interface", [CGX_STAT6] = "Packets sent with an octet count < 64", [CGX_STAT7] = "Packets sent with an octet count == 64", - [CGX_STAT8] = "Packets sent with an octet count of 65���127", + [CGX_STAT8] = "Packets sent with an octet count of 65-127", [CGX_STAT9] = "Packets sent with an octet count of 128-255", [CGX_STAT10] = "Packets sent with an octet count of 256-511", [CGX_STAT11] = "Packets sent with an octet count of 512-1023", @@@ -125,7 -125,7 +125,7 @@@ static char *rpm_rx_stats_fields[] = "Total frames received on interface", "Packets received with an octet count < 64", "Packets received with an octet count == 64", - "Packets received with an octet count of 65������127", + "Packets received with an octet count of 65-127", "Packets received with an octet count of 128-255", "Packets received with an octet count of 256-511", "Packets received with an octet count of 512-1023", @@@ -164,7 -164,7 +164,7 @@@ static char *rpm_tx_stats_fields[] = "Packets sent to the multicast DMAC", "Packets sent to a broadcast DMAC", "Packets sent with an octet count == 64", - "Packets sent with an octet count of 65������127", + "Packets sent with an octet count of 65-127", "Packets sent with an octet count of 128-255", "Packets sent with an octet count of 256-511", "Packets sent with an octet count of 512-1023", @@@ -226,108 -226,85 +226,175 @@@ static const struct file_operations rvu
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+#define LMT_MAPTBL_ENTRY_SIZE 16 +/* Dump LMTST map table */ +static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rvu *rvu = filp->private_data; + u64 lmt_addr, val, tbl_base; + int pf, vf, num_vfs, hw_vfs; + void __iomem *lmt_map_base; + int index = 0, off = 0; + int bytes_not_copied; + int buf_size = 10240; + char *buf; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOSPC; + + tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + + lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); + if (!lmt_map_base) { + dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); + kfree(buf); + return false; + } + + off += scnprintf(&buf[off], buf_size - 1 - off, + "\n\t\t\t\t\tLmtst Map Table Entries"); + off += scnprintf(&buf[off], buf_size - 1 - off, + "\n\t\t\t\t\t======================="); + off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t"); + off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t"); + off += scnprintf(&buf[off], buf_size - 1 - off, + "Lmtline Base (word 0)\t\t"); + off += scnprintf(&buf[off], buf_size - 1 - off, + "Lmt Map Entry (word 1)"); + off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", + pf); + + index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; + off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", + (tbl_base + index)); + lmt_addr = readq(lmt_map_base + index); + off += scnprintf(&buf[off], buf_size - 1 - off, + " 0x%016llx\t\t", lmt_addr); + index += 8; + val = readq(lmt_map_base + index); + off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n", + val); + /* Reading num of VFs per PF */ + rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); + for (vf = 0; vf < num_vfs; vf++) { + index = (pf * rvu->hw->total_vfs * 16) + + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); + off += scnprintf(&buf[off], buf_size - 1 - off, + "PF%d:VF%d \t\t", pf, vf); + off += scnprintf(&buf[off], buf_size - 1 - off, + " 0x%llx\t\t", (tbl_base + index)); + lmt_addr = readq(lmt_map_base + index); + off += scnprintf(&buf[off], buf_size - 1 - off, + " 0x%016llx\t\t", lmt_addr); + index += 8; + val = readq(lmt_map_base + index); + off += scnprintf(&buf[off], buf_size - 1 - off, + " 0x%016llx\n", val); + } + } + off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); + + bytes_not_copied = copy_to_user(buffer, buf, off); + kfree(buf); + + iounmap(lmt_map_base); + if (bytes_not_copied) + return -EFAULT; + + *ppos = off; + return off; +} + +RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL); + + static void get_lf_str_list(struct rvu_block block, int pcifunc, + char *lfs) + { + int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max; + + for_each_set_bit(lf, block.lf.bmap, block.lf.max) { + if (lf >= block.lf.max) + break; + + if (block.fn_map[lf] != pcifunc) + continue; + + if (lf == prev_lf + 1) { + prev_lf = lf; + seq = 1; + continue; + } + + if (seq) + len += sprintf(lfs + len, "-%d,%d", prev_lf, lf); + else + len += (len ? sprintf(lfs + len, ",%d", lf) : + sprintf(lfs + len, "%d", lf)); + + prev_lf = lf; + seq = 0; + } + + if (seq) + len += sprintf(lfs + len, "-%d", prev_lf); + + lfs[len] = '\0'; + } + + static int get_max_column_width(struct rvu *rvu) + { + int index, pf, vf, lf_str_size = 12, buf_size = 256; + struct rvu_block block; + u16 pcifunc; + char *buf; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { + pcifunc = pf << 10 | vf; + if (!pcifunc) + continue; + + for (index = 0; index < BLK_COUNT; index++) { + block = rvu->hw->block[index]; + if (!strlen(block.name)) + continue; + + get_lf_str_list(block, pcifunc, buf); + if (lf_str_size <= strlen(buf)) + lf_str_size = strlen(buf) + 1; + } + } + } + + kfree(buf); + return lf_str_size; + } + /* Dumps current provisioning status of all RVU block LFs */ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - int index, off = 0, flag = 0, go_back = 0, len = 0; + int index, off = 0, flag = 0, len = 0, i = 0; struct rvu *rvu = filp->private_data; - int lf, pf, vf, pcifunc; + int bytes_not_copied = 0; struct rvu_block block; - int bytes_not_copied; - int lf_str_size = 12; + int pf, vf, pcifunc; int buf_size = 2048; + int lf_str_size; char *lfs; char *buf;
@@@ -339,6 -316,9 +406,9 @@@ if (!buf) return -ENOSPC;
+ /* Get the maximum width of a column */ + lf_str_size = get_max_column_width(rvu); + lfs = kzalloc(lf_str_size, GFP_KERNEL); if (!lfs) { kfree(buf); @@@ -352,65 -332,69 +422,69 @@@ "%-*s", lf_str_size, rvu->hw->block[index].name); } + off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); + bytes_not_copied = copy_to_user(buffer + (i * off), buf, off); + if (bytes_not_copied) + goto out; + + i++; + *ppos += off; for (pf = 0; pf < rvu->hw->total_pfs; pf++) { for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { + off = 0; + flag = 0; pcifunc = pf << 10 | vf; if (!pcifunc) continue;
if (vf) { sprintf(lfs, "PF%d:VF%d", pf, vf - 1); - go_back = scnprintf(&buf[off], - buf_size - 1 - off, - "%-*s", lf_str_size, lfs); + off = scnprintf(&buf[off], + buf_size - 1 - off, + "%-*s", lf_str_size, lfs); } else { sprintf(lfs, "PF%d", pf); - go_back = scnprintf(&buf[off], - buf_size - 1 - off, - "%-*s", lf_str_size, lfs); + off = scnprintf(&buf[off], + buf_size - 1 - off, + "%-*s", lf_str_size, lfs); }
- off += go_back; - for (index = 0; index < BLKTYPE_MAX; index++) { + for (index = 0; index < BLK_COUNT; index++) { block = rvu->hw->block[index]; if (!strlen(block.name)) continue; len = 0; lfs[len] = '\0'; - for (lf = 0; lf < block.lf.max; lf++) { - if (block.fn_map[lf] != pcifunc) - continue; + get_lf_str_list(block, pcifunc, lfs); + if (strlen(lfs)) flag = 1; - len += sprintf(&lfs[len], "%d,", lf); - }
- if (flag) - len--; - lfs[len] = '\0'; off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, lfs); - if (!strlen(lfs)) - go_back += lf_str_size; } - if (!flag) - off -= go_back; - else - flag = 0; - off--; - off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); + if (flag) { + off += scnprintf(&buf[off], + buf_size - 1 - off, "\n"); + bytes_not_copied = copy_to_user(buffer + + (i * off), + buf, off); + if (bytes_not_copied) + goto out; + + i++; + *ppos += off; + } } }
- bytes_not_copied = copy_to_user(buffer, buf, off); + out: kfree(lfs); kfree(buf); - if (bytes_not_copied) return -EFAULT;
- *ppos = off; - return off; + return *ppos; }
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL); @@@ -594,7 -578,7 +668,7 @@@ static ssize_t rvu_dbg_qsize_write(stru if (cmd_buf) ret = -EINVAL;
- if (!strncmp(subtoken, "help", 4) || ret < 0) { + if (ret < 0 || !strncmp(subtoken, "help", 4)) { dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string); goto qsize_write_done; } @@@ -1809,6 -1793,10 +1883,10 @@@ static int rvu_dbg_nix_band_prof_ctx_di u16 pcifunc; char *str;
+ /* Ingress policers do not exist on all platforms */ + if (!nix_hw->ipolicer) + return 0; + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { if (layer == BAND_PROF_INVAL_LAYER) continue; @@@ -1858,6 -1846,10 +1936,10 @@@ static int rvu_dbg_nix_band_prof_rsrc_d int layer; char *str;
+ /* Ingress policers do not exist on all platforms */ + if (!nix_hw->ipolicer) + return 0; + seq_puts(m, "\nBandwidth profile resource free count\n"); seq_puts(m, "=====================================\n"); for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { @@@ -1968,7 -1960,7 +2050,7 @@@ static int cgx_print_stats(struct seq_f return -ENODEV;
mac_ops = get_mac_ops(cgxd); - + /* There can be no CGX devices at all */ if (!mac_ops) return 0;
@@@ -2046,13 -2038,13 +2128,13 @@@ if (err) return err;
- if (is_rvu_otx2(rvu)) - seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], - tx_stat); - else - seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat], - tx_stat); - stat++; + if (is_rvu_otx2(rvu)) + seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], + tx_stat); + else + seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat], + tx_stat); + stat++; }
return err; @@@ -2490,8 -2482,6 +2572,8 @@@ static int rvu_dbg_npc_mcam_show_rules( seq_printf(s, "VF%d", vf); } seq_puts(s, "\n"); + seq_printf(s, "\tchannel: 0x%x\n", iter->chan); + seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask); }
rvu_dbg_npc_mcam_show_action(s, iter); @@@ -2764,10 -2754,6 +2846,10 @@@ void rvu_dbg_init(struct rvu *rvu debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu, &rvu_dbg_rsrc_status_fops);
+ if (!is_rvu_otx2(rvu)) + debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root, + rvu, &rvu_dbg_lmtst_map_table_fops); + if (!cgx_get_cgxcnt_max()) goto create;
diff --combined drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 7761dcf17b91,6970540dc470..d8b1948aaa0a --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@@ -28,7 -28,6 +28,7 @@@ static int nix_verify_bandprof(struct n static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, u32 leaf_prof); +static const char *nix_get_ctx_name(int ctype);
enum mc_tbl_sz { MC_TBL_SZ_256, @@@ -1062,68 -1061,10 +1062,68 @@@ static int rvu_nix_blk_aq_enq_inst(stru return 0; }
+static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, + struct nix_aq_enq_req *req, u8 ctype) +{ + struct nix_cn10k_aq_enq_req aq_req; + struct nix_cn10k_aq_enq_rsp aq_rsp; + int rc, word; + + if (req->ctype != NIX_AQ_CTYPE_CQ) + return 0; + + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, + req->hdr.pcifunc, ctype, req->qidx); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", + __func__, nix_get_ctx_name(ctype), req->qidx, + req->hdr.pcifunc); + return rc; + } + + /* Make copy of original context & mask which are required + * for resubmission + */ + memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); + memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); + + /* exclude fields which HW can update */ + aq_req.cq_mask.cq_err = 0; + aq_req.cq_mask.wrptr = 0; + aq_req.cq_mask.tail = 0; + aq_req.cq_mask.head = 0; + aq_req.cq_mask.avg_level = 0; + aq_req.cq_mask.update_time = 0; + aq_req.cq_mask.substream = 0; + + /* Context mask (cq_mask) holds mask value of fields which + * are changed in AQ WRITE operation. + * for example cq.drop = 0xa; + * cq_mask.drop = 0xff; + * Below logic performs '&' between cq and cq_mask so that non + * updated fields are masked out for request and response + * comparison + */ + for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); + word++) { + *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= + (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); + *(u64 *)((u8 *)&aq_req.cq + word * 8) &= + (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); + } + + if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) + return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; + + return 0; +} + static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { struct nix_hw *nix_hw; + int err, retries = 5; int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); @@@ -1134,24 -1075,7 +1134,24 @@@ if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK;
- return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); +retry: + err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); + + /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' + * As a work around perfrom CQ context read after each AQ write. If AQ + * read shows AQ write is not updated perform AQ write again. + */ + if (!err && req->op == NIX_AQ_INSTOP_WRITE) { + err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); + if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { + if (retries--) + goto retry; + else + return NIX_AF_ERR_CQ_CTX_WRITE_ERR; + } + } + + return err; }
static const char *nix_get_ctx_name(int ctype) @@@ -2583,6 -2507,9 +2583,9 @@@ static void nix_free_tx_vtag_entries(st return;
nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return; + vlan = &nix_hw->txvlan;
mutex_lock(&vlan->rsrc_lock); @@@ -4512,17 -4439,10 +4515,17 @@@ int rvu_mbox_handler_nix_lf_stop_rx(str return rvu_cgx_start_stop_io(rvu, pcifunc, false); }
+#define RX_SA_BASE GENMASK_ULL(52, 7) + void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct hwctx_disable_req ctx_req; + int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + u64 sa_base; + void *cgxd; int err;
ctx_req.hdr.pcifunc = pcifunc; @@@ -4559,33 -4479,9 +4562,33 @@@ dev_err(rvu->dev, "CQ ctx disable failed\n"); }
+ /* reset HW config done for Switch headers */ + rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, + (PKIND_TX | PKIND_RX), 0, 0, 0, 0); + + /* Disabling CGX and NPC config done for PTP */ + if (pfvf->hw_rx_tstamp_en) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); + /* Undo NPC config done for PTP */ + if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) + dev_err(rvu->dev, "NPC config for PTP failed\n"); + pfvf->hw_rx_tstamp_en = false; + } + nix_ctx_free(rvu, pfvf);
nix_free_all_bandprof(rvu, pcifunc); + + sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf)); + if (FIELD_GET(RX_SA_BASE, sa_base)) { + err = rvu_cpt_ctx_flush(rvu, pcifunc); + if (err) + dev_err(rvu->dev, + "CPT ctx flush failed with error: %d\n", err); + } }
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) @@@ -4686,119 -4582,6 +4689,119 @@@ int rvu_mbox_handler_nix_lso_format_cfg return 0; }
+#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) +#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) +#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) +#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) + +#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) +#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) +#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) + +static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, + int blkaddr) +{ + u8 cpt_idx, cpt_blkaddr; + u64 val; + + cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; + if (req->enable) { + val = 0; + /* Enable context prefetching */ + if (!is_rvu_otx2(rvu)) + val |= BIT_ULL(51); + + /* Set OPCODE and EGRP */ + val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); + val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); + val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); + val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); + + rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); + + /* Set CPT queue for inline IPSec */ + val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); + val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, + req->inst_qsel.cpt_pf_func); + + if (!is_rvu_otx2(rvu)) { + cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : + BLKADDR_CPT1; + val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); + } + + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), + val); + + /* Set CPT credit */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + req->cpt_credit); + } else { + rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), + 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + 0x3FFFFF); + } +} + +int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, + struct nix_inline_ipsec_cfg *req, + struct msg_rsp *rsp) +{ + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); + if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) + nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); + + return 0; +} + +int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, + struct nix_inline_ipsec_lf_cfg *req, + struct msg_rsp *rsp) +{ + int lf, blkaddr, err; + u64 val; + + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); + if (err) + return err; + + if (req->enable) { + /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ + val = (u64)req->ipsec_cfg0.tt << 44 | + (u64)req->ipsec_cfg0.tag_const << 20 | + (u64)req->ipsec_cfg0.sa_pow2_size << 16 | + req->ipsec_cfg0.lenm1_max; + + if (blkaddr == BLKADDR_NIX1) + val |= BIT_ULL(46); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); + + /* Set SA_IDX_W and SA_IDX_MAX */ + val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | + req->ipsec_cfg1.sa_idx_max; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); + + /* Set SA base address */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), + req->sa_base_addr); + } else { + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), + 0x0); + } + + return 0; +} void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) { bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); diff --combined drivers/net/ethernet/microchip/lan743x_main.c index 03d02403c19e,4d5a5d6595b3..4fc97823bc84 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@@ -816,7 -816,7 +816,7 @@@ static int lan743x_mac_init(struct lan7 eth_random_addr(adapter->mac_address); } lan743x_mac_set_address(adapter, adapter->mac_address); - ether_addr_copy(netdev->dev_addr, adapter->mac_address); + eth_hw_addr_set(netdev, adapter->mac_address);
return 0; } @@@ -1743,6 -1743,16 +1743,16 @@@ static int lan743x_tx_ring_init(struct ret = -EINVAL; goto cleanup; } + if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, + DMA_BIT_MASK(64))) { + if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, + DMA_BIT_MASK(32))) { + dev_warn(&tx->adapter->pdev->dev, + "lan743x_: No suitable DMA available\n"); + ret = -ENOMEM; + goto cleanup; + } + } ring_allocation_size = ALIGN(tx->ring_size * sizeof(struct lan743x_tx_descriptor), PAGE_SIZE); @@@ -1934,7 -1944,8 +1944,8 @@@ static void lan743x_rx_update_tail(stru index); }
- static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index) + static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, + gfp_t gfp) { struct net_device *netdev = rx->adapter->netdev; struct device *dev = &rx->adapter->pdev->dev; @@@ -1948,7 -1959,7 +1959,7 @@@
descriptor = &rx->ring_cpu_ptr[index]; buffer_info = &rx->buffer_info[index]; - skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA); + skb = __netdev_alloc_skb(netdev, buffer_length, gfp); if (!skb) return -ENOMEM; dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE); @@@ -2110,7 -2121,8 +2121,8 @@@ static int lan743x_rx_process_buffer(st
/* save existing skb, allocate new skb and map to dma */ skb = buffer_info->skb; - if (lan743x_rx_init_ring_element(rx, rx->last_head)) { + if (lan743x_rx_init_ring_element(rx, rx->last_head, + GFP_ATOMIC | GFP_DMA)) { /* failed to allocate next skb. * Memory is very low. * Drop this packet and reuse buffer. @@@ -2276,6 -2288,16 +2288,16 @@@ static int lan743x_rx_ring_init(struct ret = -EINVAL; goto cleanup; } + if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, + DMA_BIT_MASK(64))) { + if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, + DMA_BIT_MASK(32))) { + dev_warn(&rx->adapter->pdev->dev, + "lan743x_: No suitable DMA available\n"); + ret = -ENOMEM; + goto cleanup; + } + } ring_allocation_size = ALIGN(rx->ring_size * sizeof(struct lan743x_rx_descriptor), PAGE_SIZE); @@@ -2315,13 -2337,16 +2337,16 @@@
rx->last_head = 0; for (index = 0; index < rx->ring_size; index++) { - ret = lan743x_rx_init_ring_element(rx, index); + ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL); if (ret) goto cleanup; } return 0;
cleanup: + netif_warn(rx->adapter, ifup, rx->adapter->netdev, + "Error allocating memory for LAN743x\n"); + lan743x_rx_ring_cleanup(rx); return ret; } @@@ -2645,7 -2670,7 +2670,7 @@@ static int lan743x_netdev_set_mac_addre ret = eth_prepare_mac_addr_change(netdev, sock_addr); if (ret) return ret; - ether_addr_copy(netdev->dev_addr, sock_addr->sa_data); + eth_hw_addr_set(netdev, sock_addr->sa_data); lan743x_mac_set_address(adapter, sock_addr->sa_data); lan743x_rfe_update_mac_address(adapter); return 0; @@@ -3019,6 -3044,8 +3044,8 @@@ static int lan743x_pm_resume(struct dev if (ret) { netif_err(adapter, probe, adapter->netdev, "lan743x_hardware_init returned %d\n", ret); + lan743x_pci_cleanup(adapter); + return ret; }
/* open netdev when netdev is at running state while resume. diff --combined drivers/net/ethernet/nxp/lpc_eth.c index a63cc295b979,c910fa2f40a4..bc39558fe82b --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@@ -419,7 -419,7 +419,7 @@@ struct netdata_local /* * MAC support functions */ -static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac) +static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac) { u32 tmp;
@@@ -1015,9 -1015,6 +1015,6 @@@ static int lpc_eth_close(struct net_dev napi_disable(&pldat->napi); netif_stop_queue(ndev);
- if (ndev->phydev) - phy_stop(ndev->phydev); - spin_lock_irqsave(&pldat->lock, flags); __lpc_eth_reset(pldat); netif_carrier_off(ndev); @@@ -1025,6 -1022,8 +1022,8 @@@ writel(0, LPC_ENET_MAC2(pldat->net_base)); spin_unlock_irqrestore(&pldat->lock, flags);
+ if (ndev->phydev) + phy_stop(ndev->phydev); clk_disable_unprepare(pldat->clk);
return 0; @@@ -1093,7 -1092,7 +1092,7 @@@ static int lpc_set_mac_address(struct n
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&pldat->lock, flags);
@@@ -1232,7 -1231,6 +1231,7 @@@ static int lpc_eth_drv_probe(struct pla struct net_device *ndev; dma_addr_t dma_handle; struct resource *res; + u8 addr[ETH_ALEN]; int irq, ret;
/* Setup network interface for RMII or MII mode */ @@@ -1348,11 -1346,10 +1347,11 @@@ pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Get MAC address from current HW setting (POR state is all zeros) */ - __lpc_get_mac(pldat, ndev->dev_addr); + __lpc_get_mac(pldat, addr); + eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr)) { - of_get_mac_address(np, ndev->dev_addr); + of_get_ethdev_address(np, ndev); } if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); diff --combined drivers/net/ethernet/realtek/r8169_main.c index ee6c9c842012,2918947dd57c..bbe21db20417 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@@ -118,6 -118,7 +118,6 @@@ static const struct [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, - [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, @@@ -156,6 -157,7 +156,7 @@@ static const struct pci_device_id rtl81 { PCI_VDEVICE(REALTEK, 0x8129) }, { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT }, { PCI_VDEVICE(REALTEK, 0x8161) }, + { PCI_VDEVICE(REALTEK, 0x8162) }, { PCI_VDEVICE(REALTEK, 0x8167) }, { PCI_VDEVICE(REALTEK, 0x8168) }, { PCI_VDEVICE(NCUBE, 0x8168) }, @@@ -984,6 -986,33 +985,6 @@@ DECLARE_RTL_COND(rtl_ocpar_cond return RTL_R32(tp, OCPAR) & OCPAR_FLAG; }
-static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) -{ - RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); - RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); - RTL_W32(tp, EPHY_RXER_NUM, 0); - - rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); -} - -static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) -{ - r8168dp_1_mdio_access(tp, reg, - OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); -} - -static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) -{ - r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); - - mdelay(1); - RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); - RTL_W32(tp, EPHY_RXER_NUM, 0); - - return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? - RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT; -} - #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
static void r8168dp_2_mdio_start(struct rtl8169_private *tp) @@@ -1025,6 -1054,9 +1026,6 @@@ static int r8168dp_2_mdio_read(struct r static void rtl_writephy(struct rtl8169_private *tp, int location, int val) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - r8168dp_1_mdio_write(tp, location, val); - break; case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: r8168dp_2_mdio_write(tp, location, val); @@@ -1041,6 -1073,8 +1042,6 @@@ static int rtl_readphy(struct rtl8169_private *tp, int location) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - return r8168dp_1_mdio_read(tp, location); case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_2_mdio_read(tp, location); @@@ -1202,6 -1236,7 +1203,6 @@@ static bool r8168ep_check_dash(struct r static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE; @@@ -2006,7 -2041,8 +2007,7 @@@ static enum mac_version rtl8169_get_mac
/* 8168DP family. */ /* It seems this early RTL8168dp version never made it to - * the wild. Let's see whether somebody complains, if not - * we'll remove support for this chip version completely. + * the wild. Support has been removed. * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, */ { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, @@@ -2336,7 -2372,7 +2337,7 @@@ static void rtl_jumbo_config(struct rtl r8168c_hw_jumbo_disable(tp); } break; - case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_28: if (jumbo) r8168dp_hw_jumbo_enable(tp); else @@@ -3684,6 -3720,7 +3685,6 @@@ static void rtl_hw_config(struct rtl816 [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, - [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d, [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, @@@ -3946,6 -3983,7 +3947,6 @@@ static void rtl8169_cleanup(struct rtl8 goto no_reset;
switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); @@@ -5217,7 -5255,7 +5218,7 @@@ static int rtl_get_ether_clk(struct rtl static void rtl_init_mac_address(struct rtl8169_private *tp) { struct net_device *dev = tp->dev; - u8 *mac_addr = dev->dev_addr; + u8 mac_addr[ETH_ALEN]; int rc;
rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); @@@ -5235,7 -5273,6 +5236,7 @@@ eth_hw_addr_random(dev); dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); done: + eth_hw_addr_set(dev, mac_addr); rtl_rar_set(tp, mac_addr); }
diff --combined drivers/net/usb/lan78xx.c index 03319fdb5235,63cd72c5f580..f20376c1ef3f --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@@ -1817,7 -1817,7 +1817,7 @@@ static void lan78xx_init_mac_address(st lan78xx_write_reg(dev, MAF_LO(0), addr_lo); lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
- ether_addr_copy(dev->net->dev_addr, addr); + eth_hw_addr_set(dev->net, addr); }
/* MDIO read and write wrappers for phylib */ @@@ -2416,7 -2416,7 +2416,7 @@@ static int lan78xx_set_mac_addr(struct if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
- ether_addr_copy(netdev->dev_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data);
addr_lo = netdev->dev_addr[0] | netdev->dev_addr[1] << 8 | @@@ -4122,6 -4122,12 +4122,12 @@@ static int lan78xx_probe(struct usb_int
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+ /* Reject broken descriptors. */ + if (dev->maxpacket == 0) { + ret = -ENODEV; + goto out4; + } + /* driver requires remote-wakeup capability during autosuspend. */ intf->needs_remote_wakeup = 1;
diff --combined drivers/net/usb/usbnet.c index 350bae673ed4,a33d7fb82a00..9a6450f796dc --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@@ -165,13 -165,12 +165,13 @@@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints)
int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) { + u8 addr[ETH_ALEN]; int tmp = -1, ret; unsigned char buf [13];
ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf); if (ret == 12) - tmp = hex2bin(dev->net->dev_addr, buf, 6); + tmp = hex2bin(addr, buf, 6); if (tmp < 0) { dev_dbg(&dev->udev->dev, "bad MAC string %d fetch, %d\n", iMACAddress, tmp); @@@ -179,7 -178,6 +179,7 @@@ ret = -EINVAL; return ret; } + eth_hw_addr_set(dev->net, addr); return 0; } EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); @@@ -1728,7 -1726,7 +1728,7 @@@ usbnet_probe (struct usb_interface *ude
dev->net = net; strscpy(net->name, "usb%d", sizeof(net->name)); - memcpy (net->dev_addr, node_id, sizeof node_id); + eth_hw_addr_set(net, node_id);
/* rx and tx sides can use different message sizes; * bind() should set rx_urb_size in that case. @@@ -1792,6 -1790,7 +1792,7 @@@ dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); if (dev->maxpacket == 0) { /* that is a broken device */ + status = -ENODEV; goto out4; }
diff --combined drivers/net/vmxnet3/vmxnet3_drv.c index 3e1b7746cce4,8799854bacb2..14fae317bc70 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@@ -46,7 -46,7 +46,7 @@@ MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_ static int enable_mq = 1;
static void -vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); +vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
/* * Enable/Disable the given intr @@@ -2806,7 -2806,7 +2806,7 @@@ vmxnet3_quiesce_dev(struct vmxnet3_adap
static void -vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) +vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac) { u32 tmp;
@@@ -2824,7 -2824,7 +2824,7 @@@ vmxnet3_set_mac_addr(struct net_device struct sockaddr *addr = p; struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + dev_addr_set(netdev, addr->sa_data); vmxnet3_write_mac_addr(adapter, addr->sa_data);
return 0; @@@ -3638,7 -3638,7 +3638,7 @@@ vmxnet3_probe_device(struct pci_dev *pd #endif
vmxnet3_read_mac_addr(adapter, mac); - memcpy(netdev->dev_addr, mac, netdev->addr_len); + dev_addr_set(netdev, mac);
netdev->netdev_ops = &vmxnet3_netdev_ops; vmxnet3_set_ethtool_ops(netdev); @@@ -3833,7 -3833,6 +3833,6 @@@ vmxnet3_suspend(struct device *device vmxnet3_free_intr_resources(adapter);
netif_device_detach(netdev); - netif_tx_stop_all_queues(netdev);
/* Create wake-up filters. */ pmConf = adapter->pm_conf; diff --combined drivers/net/xen-netfront.c index 57437e4b8a94,fc41ba95f81d..911f43986a8c --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@@ -1730,6 -1730,10 +1730,10 @@@ static int netfront_resume(struct xenbu
dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ netif_tx_lock_bh(info->netdev); + netif_device_detach(info->netdev); + netif_tx_unlock_bh(info->netdev); + xennet_disconnect_backend(info); return 0; } @@@ -2157,7 -2161,6 +2161,7 @@@ static int talk_to_netback(struct xenbu unsigned int max_queues = 0; struct netfront_queue *queue = NULL; unsigned int num_queues = 1; + u8 addr[ETH_ALEN];
info->netdev->irq = 0;
@@@ -2171,12 -2174,11 +2175,12 @@@ "feature-split-event-channels", 0);
/* Read mac addr. */ - err = xen_net_read_mac(dev, info->netdev->dev_addr); + err = xen_net_read_mac(dev, addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out_unlocked; } + eth_hw_addr_set(info->netdev, addr);
info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend, "feature-xdp-headroom", 0); @@@ -2351,6 -2353,10 +2355,10 @@@ static int xennet_connect(struct net_de * domain a kick because we've probably just requeued some * packets. */ + netif_tx_lock_bh(np->netdev); + netif_device_attach(np->netdev); + netif_tx_unlock_bh(np->netdev); + netif_carrier_on(np->netdev); for (j = 0; j < num_queues; ++j) { queue = &np->queues[j]; diff --combined include/linux/bpf.h index 1c7fd7c4c6d3,3db6f6c95489..e6f5579f9356 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@@ -48,7 -48,6 +48,7 @@@ extern struct idr btf_idr extern spinlock_t btf_idr_lock; extern struct kobject *btf_kobj;
+typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, struct bpf_iter_aux_info *aux); typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); @@@ -143,8 -142,7 +143,8 @@@ struct bpf_map_ops int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, struct bpf_func_state *caller, struct bpf_func_state *callee); - int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn, + int (*map_for_each_callback)(struct bpf_map *map, + bpf_callback_t callback_fn, void *callback_ctx, u64 flags);
/* BTF name and id of struct allocated by map_alloc */ @@@ -931,8 -929,11 +931,11 @@@ struct bpf_array_aux * stored in the map to make sure that all callers and callees have * the same prog type and JITed flag. */ - enum bpf_prog_type type; - bool jited; + struct { + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + } owner; /* Programs with direct jumps into programs part of this array. */ struct list_head poke_progs; struct bpf_map *map; @@@ -1091,7 -1092,6 +1094,7 @@@ bool bpf_prog_array_compatible(struct b int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void); +const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, unsigned long off, unsigned long len); @@@ -2220,8 -2220,6 +2223,8 @@@ int bpf_arch_text_poke(void *ip, enum b struct btf_id_set; bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+#define MAX_BPRINTF_VARARGS 12 + int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, u32 **bin_buf, u32 num_args); void bpf_bprintf_cleanup(void); diff --combined include/linux/filter.h index 47f80adbe744,ef03ff34234d..8231a6a257f6 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@@ -360,9 -360,10 +360,9 @@@ static inline bool insn_is_zext(const s .off = 0, \ .imm = TGT })
-/* Function call */ +/* Convert function address to BPF immediate */
-#define BPF_CAST_CALL(x) \ - ((u64 (*)(u64, u64, u64, u64, u64))(x)) +#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \ ((struct bpf_insn) { \ @@@ -370,7 -371,7 +370,7 @@@ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ - .imm = ((FUNC) - __bpf_call_base) }) + .imm = BPF_CALL_IMM(FUNC) })
/* Raw code statement block */
@@@ -1050,6 -1051,7 +1050,7 @@@ extern int bpf_jit_enable extern int bpf_jit_harden; extern int bpf_jit_kallsyms; extern long bpf_jit_limit; + extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
diff --combined include/net/cfg80211.h index 7c9d5db4f0e6,27336fc70467..423f97b982ff --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@@ -739,22 -739,6 +739,22 @@@ struct cfg80211_tid_config struct cfg80211_tid_cfg tid_conf[]; };
+/** + * struct cfg80211_fils_aad - FILS AAD data + * @macaddr: STA MAC address + * @kek: FILS KEK + * @kek_len: FILS KEK length + * @snonce: STA Nonce + * @anonce: AP Nonce + */ +struct cfg80211_fils_aad { + const u8 *macaddr; + const u8 *kek; + u8 kek_len; + const u8 *snonce; + const u8 *anonce; +}; + /** * cfg80211_get_chandef_type - return old channel type from chandef * @chandef: the channel definition @@@ -1056,36 -1040,6 +1056,36 @@@ struct cfg80211_crypto_settings enum nl80211_sae_pwe_mechanism sae_pwe; };
+/** + * struct cfg80211_mbssid_config - AP settings for multi bssid + * + * @tx_wdev: pointer to the transmitted interface in the MBSSID set + * @index: index of this AP in the multi bssid group. + * @ema: set to true if the beacons should be sent out in EMA mode. + */ +struct cfg80211_mbssid_config { + struct wireless_dev *tx_wdev; + u8 index; + bool ema; +}; + +/** + * struct cfg80211_mbssid_elems - Multiple BSSID elements + * + * @cnt: Number of elements in array %elems. + * + * @elem: Array of multiple BSSID element(s) to be added into Beacon frames. + * @elem.data: Data for multiple BSSID elements. + * @elem.len: Length of data. + */ +struct cfg80211_mbssid_elems { + u8 cnt; + struct { + const u8 *data; + size_t len; + } elem[]; +}; + /** * struct cfg80211_beacon_data - beacon data * @head: head portion of beacon (before TIM IE) @@@ -1104,7 -1058,6 +1104,7 @@@ * @assocresp_ies_len: length of assocresp_ies in octets * @probe_resp_len: length of probe response template (@probe_resp) * @probe_resp: probe response template (AP mode only) + * @mbssid_ies: multiple BSSID elements * @ftm_responder: enable FTM responder functionality; -1 for no change * (which also implies no change in LCI/civic location data) * @lci: Measurement Report element content, starting with Measurement Token @@@ -1122,7 -1075,6 +1122,7 @@@ struct cfg80211_beacon_data const u8 *probe_resp; const u8 *lci; const u8 *civicloc; + struct cfg80211_mbssid_elems *mbssid_ies; s8 ftm_responder;
size_t head_len, tail_len; @@@ -1237,7 -1189,6 +1237,7 @@@ enum cfg80211_ap_settings_flags * @he_oper: HE operation IE (or %NULL if HE isn't enabled) * @fils_discovery: FILS discovery transmission parameters * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters + * @mbssid_config: AP settings for multiple bssid */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@@ -1270,7 -1221,6 +1270,7 @@@ struct cfg80211_he_bss_color he_bss_color; struct cfg80211_fils_discovery fils_discovery; struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp; + struct cfg80211_mbssid_config mbssid_config; };
/** @@@ -4068,10 -4018,6 +4068,10 @@@ struct mgmt_frame_regs * @set_sar_specs: Update the SAR (TX power) settings. * * @color_change: Initiate a color change. + * + * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use + * those to decrypt (Re)Association Request and encrypt (Re)Association + * Response frame. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@@ -4402,8 -4348,6 +4402,8 @@@ int (*color_change)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_color_change_settings *params); + int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_fils_aad *fils_aad); };
/* @@@ -5037,13 -4981,6 +5037,13 @@@ struct wiphy_iftype_akm_suites * %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes * @sar_capa: SAR control capabilities * @rfkill: a pointer to the rfkill structure + * + * @mbssid_max_interfaces: maximum number of interfaces supported by the driver + * in a multiple BSSID set. This field must be set to a non-zero value + * by the driver to advertise MBSSID support. + * @ema_max_profile_periodicity: maximum profile periodicity supported by + * the driver. Setting this field to a non-zero value indicates that the + * driver supports enhanced multi-BSSID advertisements (EMA AP). */ struct wiphy { struct mutex mtx; @@@ -5188,9 -5125,6 +5188,9 @@@
struct rfkill *rfkill;
+ u8 mbssid_max_interfaces; + u8 ema_max_profile_periodicity; + char priv[] __aligned(NETDEV_ALIGN); };
@@@ -5442,7 -5376,6 +5442,6 @@@ static inline void wiphy_unlock(struct * netdev and may otherwise be used by driver read-only, will be update * by cfg80211 on change_interface * @mgmt_registrations: list of registrations for management frames - * @mgmt_registrations_lock: lock for the list * @mgmt_registrations_need_update: mgmt registrations were updated, * need to propagate the update to the driver * @mtx: mutex used to lock data in this struct, may be used by drivers @@@ -5489,7 -5422,6 +5488,6 @@@ struct wireless_dev u32 identifier;
struct list_head mgmt_registrations; - spinlock_t mgmt_registrations_lock; u8 mgmt_registrations_need_update:1;
struct mutex mtx; @@@ -5558,7 -5490,7 +5556,7 @@@ unsigned long unprot_beacon_reported; };
-static inline u8 *wdev_address(struct wireless_dev *wdev) +static inline const u8 *wdev_address(struct wireless_dev *wdev) { if (wdev->netdev) return wdev->netdev->dev_addr; @@@ -6376,17 -6308,6 +6374,17 @@@ static inline void cfg80211_gen_new_bss u64_to_ether_addr(new_bssid_u64, new_bssid); }
+/** + * cfg80211_get_ies_channel_number - returns the channel number from ies + * @ie: IEs + * @ielen: length of IEs + * @band: enum nl80211_band of the channel + * + * Returns the channel number, or -1 if none could be determined. + */ +int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, + enum nl80211_band band); + /** * cfg80211_is_element_inherited - returns if element ID should be inherited * @element: element to check diff --combined include/net/mptcp.h index f83fa48408b3,3214848402ec..a925349b4b89 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@@ -12,8 -12,6 +12,8 @@@ #include <linux/tcp.h> #include <linux/types.h>
+struct mptcp_info; +struct mptcp_sock; struct seq_file;
/* MPTCP sk_buff extension data */ @@@ -71,6 -69,10 +71,10 @@@ struct mptcp_out_options struct { u64 sndr_key; u64 rcvr_key; + u64 data_seq; + u32 subflow_seq; + u16 data_len; + __sum16 csum; }; struct { struct mptcp_addr_info addr; @@@ -123,8 -125,6 +127,8 @@@ bool mptcp_incoming_options(struct soc void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, struct mptcp_out_options *opts);
+void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info); + /* move the skb extension owership, with the assumption that 'to' is * newly allocated */ diff --combined include/net/sock.h index fb70d8553fae,463f390d90b3..620de053002d --- a/include/net/sock.h +++ b/include/net/sock.h @@@ -259,11 -259,10 +259,11 @@@ struct bpf_local_storage * @sk_rcvbuf: size of receive buffer in bytes * @sk_wq: sock wait queue and async head * @sk_rx_dst: receive input route used by early demux + * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst + * @sk_rx_dst_cookie: cookie for @sk_rx_dst * @sk_dst_cache: destination cache * @sk_dst_pending_confirm: need to confirm neighbour * @sk_policy: flow policy - * @sk_rx_skb_cache: cache copy of recently accessed RX skb * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed * @sk_tsq_flags: TCP Small Queues flags @@@ -271,7 -270,6 +271,7 @@@ * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward + * @sk_reserved_mem: space reserved and non-reclaimable for the socket * @sk_napi_id: id of the last napi context to receive data for sk * @sk_ll_usec: usecs to busypoll when there is no data * @sk_allocation: allocation mode @@@ -331,6 -329,7 +331,6 @@@ * @sk_peek_off: current peek_offset value * @sk_send_head: front of stuff to transmit * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head] - * @sk_tx_skb_cache: cache copy of recently accessed TX skb * @sk_security: used by security modules * @sk_mark: generic packet mark * @sk_cgrp_data: cgroup data for this cgroup @@@ -395,6 -394,7 +395,6 @@@ struct sock atomic_t sk_drops; int sk_rcvlowat; struct sk_buff_head sk_error_queue; - struct sk_buff *sk_rx_skb_cache; struct sk_buff_head sk_receive_queue; /* * The backlog queue is special, it is always used with @@@ -413,7 -413,6 +413,7 @@@ #define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc; + u32 sk_reserved_mem; #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sk_ll_usec; /* ===== mostly read cache line ===== */ @@@ -432,9 -431,6 +432,9 @@@ struct xfrm_policy __rcu *sk_policy[2]; #endif struct dst_entry *sk_rx_dst; + int sk_rx_dst_ifindex; + u32 sk_rx_dst_cookie; + struct dst_entry __rcu *sk_dst_cache; atomic_t sk_omem_alloc; int sk_sndbuf; @@@ -447,6 -443,7 +447,6 @@@ struct sk_buff *sk_send_head; struct rb_root tcp_rtx_queue; }; - struct sk_buff *sk_tx_skb_cache; struct sk_buff_head sk_write_queue; __s32 sk_peek_off; int sk_write_pending; @@@ -1210,16 -1207,13 +1210,16 @@@ struct proto unsigned int inuse_idx; #endif
+ int (*forward_alloc_get)(const struct sock *sk); + bool (*stream_memory_free)(const struct sock *sk, int wake); - bool (*stream_memory_read)(const struct sock *sk); + bool (*sock_is_readable)(struct sock *sk); /* Memory pressure */ void (*enter_memory_pressure)(struct sock *sk); void (*leave_memory_pressure)(struct sock *sk); atomic_long_t *memory_allocated; /* Current allocated memory. */ struct percpu_counter *sockets_allocated; /* Current number of sockets. */ + /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. @@@ -1243,7 -1237,7 +1243,7 @@@ unsigned int useroffset; /* Usercopy region offset */ unsigned int usersize; /* Usercopy region size */
- struct percpu_counter *orphan_count; + unsigned int __percpu *orphan_count;
struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; @@@ -1297,22 -1291,20 +1297,22 @@@ static inline void sk_refcnt_debug_rele
INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
+static inline int sk_forward_alloc_get(const struct sock *sk) +{ + if (!sk->sk_prot->forward_alloc_get) + return sk->sk_forward_alloc; + + return sk->sk_prot->forward_alloc_get(sk); +} + static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) { if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf)) return false;
-#ifdef CONFIG_INET - return sk->sk_prot->stream_memory_free ? - INDIRECT_CALL_1(sk->sk_prot->stream_memory_free, - tcp_stream_memory_free, - sk, wake) : true; -#else return sk->sk_prot->stream_memory_free ? - sk->sk_prot->stream_memory_free(sk, wake) : true; -#endif + INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free, + tcp_stream_memory_free, sk, wake) : true; }
static inline bool sk_stream_memory_free(const struct sock *sk) @@@ -1526,49 -1518,20 +1526,49 @@@ sk_rmem_schedule(struct sock *sk, struc skb_pfmemalloc(skb); }
+static inline int sk_unused_reserved_mem(const struct sock *sk) +{ + int unused_mem; + + if (likely(!sk->sk_reserved_mem)) + return 0; + + unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued - + atomic_read(&sk->sk_rmem_alloc); + + return unused_mem > 0 ? unused_mem : 0; +} + static inline void sk_mem_reclaim(struct sock *sk) { + int reclaimable; + if (!sk_has_account(sk)) return; - if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) - __sk_mem_reclaim(sk, sk->sk_forward_alloc); + + reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); + + if (reclaimable >= SK_MEM_QUANTUM) + __sk_mem_reclaim(sk, reclaimable); +} + +static inline void sk_mem_reclaim_final(struct sock *sk) +{ + sk->sk_reserved_mem = 0; + sk_mem_reclaim(sk); }
static inline void sk_mem_reclaim_partial(struct sock *sk) { + int reclaimable; + if (!sk_has_account(sk)) return; - if (sk->sk_forward_alloc > SK_MEM_QUANTUM) - __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); + + reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); + + if (reclaimable > SK_MEM_QUANTUM) + __sk_mem_reclaim(sk, reclaimable - 1); }
static inline void sk_mem_charge(struct sock *sk, int size) @@@ -1578,19 -1541,11 +1578,19 @@@ sk->sk_forward_alloc -= size; }
+/* the following macros control memory reclaiming in sk_mem_uncharge() + */ +#define SK_RECLAIM_THRESHOLD (1 << 21) +#define SK_RECLAIM_CHUNK (1 << 20) + static inline void sk_mem_uncharge(struct sock *sk, int size) { + int reclaimable; + if (!sk_has_account(sk)) return; sk->sk_forward_alloc += size; + reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
/* Avoid a possible overflow. * TCP send queues can make this happen, if sk_mem_reclaim() @@@ -1599,14 -1554,22 +1599,14 @@@ * If we reach 2 MBytes, reclaim 1 MBytes right now, there is * no need to hold that much forward allocation anyway. */ - if (unlikely(sk->sk_forward_alloc >= 1 << 21)) - __sk_mem_reclaim(sk, 1 << 20); + if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD)) + __sk_mem_reclaim(sk, SK_RECLAIM_CHUNK); }
-DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) { sk_wmem_queued_add(sk, -skb->truesize); sk_mem_uncharge(sk, skb->truesize); - if (static_branch_unlikely(&tcp_tx_skb_cache_key) && - !sk->sk_tx_skb_cache && !skb_cloned(skb)) { - skb_ext_reset(skb); - skb_zcopy_clear(skb, true); - sk->sk_tx_skb_cache = skb; - return; - } __kfree_skb(skb); }
@@@ -1926,8 -1889,10 +1926,8 @@@ static inline void sk_rx_queue_set(stru if (skb_rx_queue_recorded(skb)) { u16 rx_queue = skb_get_rx_queue(skb);
- if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING)) - return; - - sk->sk_rx_queue_mapping = rx_queue; + if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) + WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue); } #endif } @@@ -1935,19 -1900,15 +1935,19 @@@ static inline void sk_rx_queue_clear(struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING - sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING; + WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING); #endif }
static inline int sk_rx_queue_get(const struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING - if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING) - return sk->sk_rx_queue_mapping; + if (sk) { + int res = READ_ONCE(sk->sk_rx_queue_mapping); + + if (res != NO_QUEUE_MAPPING) + return res; + } #endif
return -1; @@@ -2427,11 -2388,13 +2427,11 @@@ static inline void sk_stream_moderate_s return;
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); + val = max_t(u32, val, sk_unused_reserved_mem(sk));
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); }
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, - bool force_schedule); - /** * sk_page_frag - return an appropriate page_frag * @sk: socket @@@ -2645,6 -2608,7 +2645,6 @@@ static inline void skb_setup_tx_timesta &skb_shinfo(skb)->tskey); }
-DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); /** * sk_eat_skb - Release a skb if it is no longer needed * @sk: socket to eat this skb from @@@ -2656,6 -2620,12 +2656,6 @@@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) { __skb_unlink(skb, &sk->sk_receive_queue); - if (static_branch_unlikely(&tcp_rx_skb_cache_key) && - !sk->sk_rx_skb_cache) { - sk->sk_rx_skb_cache = skb; - skb_orphan(skb); - return; - } __kfree_skb(skb); }
@@@ -2850,8 -2820,10 +2850,14 @@@ void sock_set_sndtimeo(struct sock *sk
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+int sock_get_timeout(long timeo, void *optval, bool old_timeval); +int sock_copy_user_timeval(struct __kernel_sock_timeval *tv, + sockptr_t optval, int optlen, bool old_timeval); + + static inline bool sk_is_readable(struct sock *sk) + { + if (sk->sk_prot->sock_is_readable) + return sk->sk_prot->sock_is_readable(sk); + return false; + } #endif /* _SOCK_H */ diff --combined include/net/tls.h index adab19a8aed7,1fffb206f09f..526cb2c3b724 --- a/include/net/tls.h +++ b/include/net/tls.h @@@ -66,7 -66,7 +66,7 @@@ #define MAX_IV_SIZE 16 #define TLS_MAX_REC_SEQ_SIZE 8
-/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes. +/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes. * * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3] * @@@ -74,7 -74,6 +74,7 @@@ * Hence b0 contains (3 - 1) = 2. */ #define TLS_AES_CCM_IV_B0_BYTE 2 +#define TLS_SM4_CCM_IV_B0_BYTE 2
#define __TLS_INC_STATS(net, field) \ __SNMP_INC_STATS((net)->mib.tls_statistics, field) @@@ -221,8 -220,6 +221,8 @@@ union tls_crypto_context struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; + struct tls12_crypto_info_sm4_gcm sm4_gcm; + struct tls12_crypto_info_sm4_ccm sm4_ccm; }; };
@@@ -361,6 -358,7 +361,7 @@@ int tls_sk_query(struct sock *sk, int o int __user *optlen); int tls_sk_attach(struct sock *sk, int optname, char __user *optval, unsigned int optlen); + void tls_err_abort(struct sock *sk, int err);
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); @@@ -378,7 -376,7 +379,7 @@@ void tls_sw_release_resources_rx(struc void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); - bool tls_sw_stream_read(const struct sock *sk); + bool tls_sw_sock_is_readable(struct sock *sk); ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); @@@ -469,12 -467,6 +470,6 @@@ static inline bool tls_is_sk_tx_device_ #endif }
- static inline void tls_err_abort(struct sock *sk, int err) - { - sk->sk_err = err; - sk_error_report(sk); - } - static inline bool tls_bigint_increment(unsigned char *seq, int len) { int i; @@@ -515,7 -507,7 +510,7 @@@ static inline void tls_advance_record_s struct cipher_context *ctx) { if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size)) - tls_err_abort(sk, EBADMSG); + tls_err_abort(sk, -EBADMSG);
if (prot->version != TLS_1_3_VERSION && prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) diff --combined kernel/bpf/arraymap.c index 5e1ccfae916b,447def540544..c7a5be3bf8be --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@@ -645,7 -645,7 +645,7 @@@ static const struct bpf_iter_seq_info i .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), };
-static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn, +static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, void *callback_ctx, u64 flags) { u32 i, key, num_elems = 0; @@@ -668,8 -668,9 +668,8 @@@ val = array->value + array->elem_size * i; num_elems++; key = i; - ret = BPF_CAST_CALL(callback_fn)((u64)(long)map, - (u64)(long)&key, (u64)(long)val, - (u64)(long)callback_ctx, 0); + ret = callback_fn((u64)(long)map, (u64)(long)&key, + (u64)(long)val, (u64)(long)callback_ctx, 0); /* return value: 0 - continue, 1 - stop and return */ if (ret) break; @@@ -1071,6 -1072,7 +1071,7 @@@ static struct bpf_map *prog_array_map_a INIT_WORK(&aux->work, prog_array_map_clear_deferred); INIT_LIST_HEAD(&aux->poke_progs); mutex_init(&aux->poke_mutex); + spin_lock_init(&aux->owner.lock);
map = array_map_alloc(attr); if (IS_ERR(map)) { diff --combined kernel/bpf/core.c index ea8a468dbded,6e3ae90ad107..ded9163185d1 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@@ -524,6 -524,7 +524,7 @@@ int bpf_jit_enable __read_mostly = IS int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); int bpf_jit_harden __read_mostly; long bpf_jit_limit __read_mostly; + long bpf_jit_limit_max __read_mostly;
static void bpf_prog_ksym_set_addr(struct bpf_prog *prog) @@@ -817,7 -818,8 +818,8 @@@ u64 __weak bpf_jit_alloc_exec_limit(voi static int __init bpf_jit_charge_init(void) { /* Only used as heuristic here to derive limit. */ - bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2, + bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); + bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2, PAGE_SIZE), LONG_MAX); return 0; } @@@ -1821,20 -1823,26 +1823,26 @@@ static unsigned int __bpf_prog_ret0_war bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp) { + bool ret; + if (fp->kprobe_override) return false;
- if (!array->aux->type) { + spin_lock(&array->aux->owner.lock); + + if (!array->aux->owner.type) { /* There's no owner yet where we could check for * compatibility. */ - array->aux->type = fp->type; - array->aux->jited = fp->jited; - return true; + array->aux->owner.type = fp->type; + array->aux->owner.jited = fp->jited; + ret = true; + } else { + ret = array->aux->owner.type == fp->type && + array->aux->owner.jited == fp->jited; } - - return array->aux->type == fp->type && - array->aux->jited == fp->jited; + spin_unlock(&array->aux->owner.lock); + return ret; }
static int bpf_check_tail_call(const struct bpf_prog *fp) @@@ -2357,11 -2365,6 +2365,11 @@@ const struct bpf_func_proto * __weak bp return NULL; }
+const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void) +{ + return NULL; +} + u64 __weak bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) diff --combined net/batman-adv/bridge_loop_avoidance.c index 7242b32fff80,17687848daec..2ed9496fc41f --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@@ -254,7 -254,7 +254,7 @@@ batadv_claim_hash_find(struct batadv_pr * Return: backbone gateway if found or NULL otherwise */ static struct batadv_bla_backbone_gw * -batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr, +batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) { struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; @@@ -336,7 -336,7 +336,7 @@@ batadv_bla_del_backbone_claims(struct b * @vid: the VLAN ID * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) */ -static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, +static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac, unsigned short vid, int claimtype) { struct sk_buff *skb; @@@ -488,7 -488,7 +488,7 @@@ static void batadv_bla_loopdetect_repor * Return: the (possibly created) backbone gateway or NULL on error */ static struct batadv_bla_backbone_gw * -batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig, +batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig, unsigned short vid, bool own_backbone) { struct batadv_bla_backbone_gw *entry; @@@ -926,7 -926,7 +926,7 @@@ static bool batadv_handle_request(struc */ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, - u8 *backbone_addr, u8 *claim_addr, + const u8 *backbone_addr, const u8 *claim_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; @@@ -964,7 -964,7 +964,7 @@@ */ static bool batadv_handle_claim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, - u8 *backbone_addr, u8 *claim_addr, + const u8 *backbone_addr, const u8 *claim_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; @@@ -1560,10 -1560,14 +1560,14 @@@ int batadv_bla_init(struct batadv_priv return 0;
bat_priv->bla.claim_hash = batadv_hash_new(128); - bat_priv->bla.backbone_hash = batadv_hash_new(32); + if (!bat_priv->bla.claim_hash) + return -ENOMEM;
- if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash) + bat_priv->bla.backbone_hash = batadv_hash_new(32); + if (!bat_priv->bla.backbone_hash) { + batadv_hash_destroy(bat_priv->bla.claim_hash); return -ENOMEM; + }
batadv_hash_set_lock_class(bat_priv->bla.claim_hash, &batadv_claim_hash_lock_class_key); @@@ -2126,7 -2130,7 +2130,7 @@@ batadv_bla_claim_dump_entry(struct sk_b struct batadv_hard_iface *primary_if, struct batadv_bla_claim *claim) { - u8 *primary_addr = primary_if->net_dev->dev_addr; + const u8 *primary_addr = primary_if->net_dev->dev_addr; u16 backbone_crc; bool is_own; void *hdr; @@@ -2294,7 -2298,7 +2298,7 @@@ batadv_bla_backbone_dump_entry(struct s struct batadv_hard_iface *primary_if, struct batadv_bla_backbone_gw *backbone_gw) { - u8 *primary_addr = primary_if->net_dev->dev_addr; + const u8 *primary_addr = primary_if->net_dev->dev_addr; u16 backbone_crc; bool is_own; int msecs; diff --combined net/core/dev.c index e8754560e641,eb3a366bf212..edeb811c454e --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -140,7 -140,7 +140,7 @@@ #include <linux/if_macvlan.h> #include <linux/errqueue.h> #include <linux/hrtimer.h> -#include <linux/netfilter_ingress.h> +#include <linux/netfilter_netdev.h> #include <linux/crash_dump.h> #include <linux/sctp.h> #include <net/udp_tunnel.h> @@@ -303,12 -303,6 +303,12 @@@ static struct netdev_name_node *netdev_ return NULL; }
+bool netdev_name_in_use(struct net *net, const char *name) +{ + return netdev_name_node_lookup(net, name); +} +EXPORT_SYMBOL(netdev_name_in_use); + int netdev_name_node_alt_create(struct net_device *dev, const char *name) { struct netdev_name_node *name_node; @@@ -1139,7 -1133,7 +1139,7 @@@ static int __dev_alloc_name(struct net }
snprintf(buf, IFNAMSIZ, name, i); - if (!__dev_get_by_name(net, buf)) + if (!netdev_name_in_use(net, buf)) return i;
/* It is possible to run out of possible slots @@@ -1193,7 -1187,7 +1193,7 @@@ static int dev_get_valid_name(struct ne
if (strchr(name, '%')) return dev_alloc_name_ns(net, dev, name); - else if (__dev_get_by_name(net, name)) + else if (netdev_name_in_use(net, name)) return -EEXIST; else if (dev->name != name) strlcpy(dev->name, name, IFNAMSIZ); @@@ -1296,8 -1290,8 +1296,8 @@@ rollback old_assign_type = NET_NAME_RENAMED; goto rollback; } else { - pr_err("%s: name change rollback failed: %d\n", - dev->name, ret); + netdev_err(dev, "name change rollback failed: %d\n", + ret); } }
@@@ -2351,7 -2345,7 +2351,7 @@@ static void netif_setup_tc(struct net_d
/* If TC0 is invalidated disable TC mapping */ if (tc->offset + tc->count > txq) { - pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); + netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); dev->num_tc = 0; return; } @@@ -2362,8 -2356,8 +2362,8 @@@
tc = &dev->tc_to_txq[q]; if (tc->offset + tc->count > txq) { - pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", - i, q); + netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", + i, q); netdev_set_prio_tc_map(dev, i, 0); } } @@@ -2927,8 -2921,6 +2927,8 @@@ int netif_set_real_num_tx_queues(struc if (dev->num_tc) netif_setup_tc(dev, txq);
+ dev_qdisc_change_real_num_tx(dev, txq); + dev->real_num_tx_queues = txq;
if (disabling) { @@@ -3171,6 -3163,12 +3171,12 @@@ static u16 skb_tx_hash(const struct net
qoffset = sb_dev->tc_to_txq[tc].offset; qcount = sb_dev->tc_to_txq[tc].count; + if (unlikely(!qcount)) { + net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n", + sb_dev->name, qoffset, tc); + qoffset = 0; + qcount = dev->real_num_tx_queues; + } }
if (skb_rx_queue_recorded(skb)) { @@@ -3416,7 -3414,7 +3422,7 @@@ EXPORT_SYMBOL(__skb_gso_segment) #ifdef CONFIG_BUG static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) { - pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); + netdev_err(dev, "hw csum failure\n"); skb_dump(KERN_ERR, skb, true); dump_stack(); } @@@ -3914,7 -3912,8 +3920,8 @@@ int dev_loopback_xmit(struct net *net, skb_reset_mac_header(skb); __skb_pull(skb, skb_network_offset(skb)); skb->pkt_type = PACKET_LOOPBACK; - skb->ip_summed = CHECKSUM_UNNECESSARY; + if (skb->ip_summed == CHECKSUM_NONE) + skb->ip_summed = CHECKSUM_UNNECESSARY; WARN_ON(!skb_dst(skb)); skb_dst_force(skb); netif_rx_ni(skb); @@@ -3926,7 -3925,6 +3933,7 @@@ EXPORT_SYMBOL(dev_loopback_xmit) static struct sk_buff * sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) { +#ifdef CONFIG_NET_CLS_ACT struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); struct tcf_result cl_res;
@@@ -3962,7 -3960,6 +3969,7 @@@ default: break; } +#endif /* CONFIG_NET_CLS_ACT */
return skb; } @@@ -4156,20 -4153,13 +4163,20 @@@ static int __dev_queue_xmit(struct sk_b qdisc_pkt_len_init(skb); #ifdef CONFIG_NET_CLS_ACT skb->tc_at_ingress = 0; -# ifdef CONFIG_NET_EGRESS +#endif +#ifdef CONFIG_NET_EGRESS if (static_branch_unlikely(&egress_needed_key)) { + if (nf_hook_egress_active()) { + skb = nf_hook_egress(skb, &rc, dev); + if (!skb) + goto out; + } + nf_skip_egress(skb, true); skb = sch_handle_egress(skb, &rc, dev); if (!skb) goto out; + nf_skip_egress(skb, false); } -# endif #endif /* If device/qdisc don't need skb->dst, release it right now while * its hot in this cpu cache. @@@ -5311,7 -5301,6 +5318,7 @@@ skip_taps if (static_branch_unlikely(&ingress_needed_key)) { bool another = false;
+ nf_skip_egress(skb, true); skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, &another); if (another) @@@ -5319,7 -5308,6 +5326,7 @@@ if (!skb) goto out;
+ nf_skip_egress(skb, false); if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) goto out; } @@@ -5856,7 -5844,7 +5863,7 @@@ static void gro_normal_one(struct napi_ gro_normal_list(napi); }
-static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) +static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) { struct packet_offload *ptype; __be16 type = skb->protocol; @@@ -5885,11 -5873,12 +5892,11 @@@ if (err) { WARN_ON(&ptype->list == head); kfree_skb(skb); - return NET_RX_SUCCESS; + return; }
out: gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); - return NET_RX_SUCCESS; }
static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, @@@ -6916,25 -6905,19 +6923,25 @@@ EXPORT_SYMBOL(netif_napi_add)
void napi_disable(struct napi_struct *n) { + unsigned long val, new; + might_sleep(); set_bit(NAPI_STATE_DISABLE, &n->state);
- while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) - msleep(1); - while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) - msleep(1); + do { + val = READ_ONCE(n->state); + if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) { + usleep_range(20, 200); + continue; + } + + new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; + new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); + } while (cmpxchg(&n->state, val, new) != val);
hrtimer_cancel(&n->timer);
- clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); clear_bit(NAPI_STATE_DISABLE, &n->state); - clear_bit(NAPI_STATE_THREADED, &n->state); } EXPORT_SYMBOL(napi_disable);
@@@ -7012,8 -6995,8 +7019,8 @@@ static int __napi_poll(struct napi_stru }
if (unlikely(work > weight)) - pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n", - n->poll, work, weight); + netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", + n->poll, work, weight);
if (likely(work < weight)) return work; @@@ -8567,7 -8550,8 +8574,7 @@@ static int __dev_set_promiscuity(struc dev->flags &= ~IFF_PROMISC; else { dev->promiscuity -= inc; - pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", - dev->name); + netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n"); return -EOVERFLOW; } } @@@ -8637,7 -8621,8 +8644,7 @@@ static int __dev_set_allmulti(struct ne dev->flags &= ~IFF_ALLMULTI; else { dev->allmulti -= inc; - pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", - dev->name); + netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n"); return -EOVERFLOW; } } @@@ -9174,11 -9159,14 +9181,11 @@@ int dev_get_port_parent_id(struct net_d }
err = devlink_compat_switch_id_get(dev, ppid); - if (!err || err != -EOPNOTSUPP) + if (!recurse || err != -EOPNOTSUPP) return err;
- if (!recurse) - return -EOPNOTSUPP; - netdev_for_each_lower_dev(dev, lower_dev, iter) { - err = dev_get_port_parent_id(lower_dev, ppid, recurse); + err = dev_get_port_parent_id(lower_dev, ppid, true); if (err) break; if (!first.id_len) @@@ -9922,11 -9910,6 +9929,11 @@@ static netdev_features_t netdev_fix_fea } }
+ if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) { + netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n"); + features &= ~NETIF_F_LRO; + } + if (features & NETIF_F_HW_TLS_TX) { bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) == (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); @@@ -10884,7 -10867,7 +10891,7 @@@ struct net_device *alloc_netdev_mqs(in if (!dev->ethtool_ops) dev->ethtool_ops = &default_ethtool_ops;
- nf_hook_ingress_init(dev); + nf_hook_netdev_init(dev);
return dev;
@@@ -11170,7 -11153,7 +11177,7 @@@ int __dev_change_net_namespace(struct n * we can use it in the destination network namespace. */ err = -EEXIST; - if (__dev_get_by_name(net, dev->name)) { + if (netdev_name_in_use(net, dev->name)) { /* We get here if we can't use the current device name */ if (!pat) goto out; @@@ -11523,7 -11506,7 +11530,7 @@@ static void __net_exit default_device_e
/* Push remaining network devices to init_net */ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); - if (__dev_get_by_name(&init_net, fb_name)) + if (netdev_name_in_use(&init_net, fb_name)) snprintf(fb_name, IFNAMSIZ, "dev%%d"); err = dev_change_net_namespace(dev, &init_net, fb_name); if (err) { diff --combined net/core/net-sysfs.c index d6e4e0b43beb,b2e49eb7001d..9c01c642cf9e --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@@ -175,14 -175,6 +175,14 @@@ static int change_carrier(struct net_de static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { + struct net_device *netdev = to_net_dev(dev); + + /* The check is also done in change_carrier; this helps returning early + * without hitting the trylock/restart in netdev_store. + */ + if (!netdev->netdev_ops->ndo_change_carrier) + return -EOPNOTSUPP; + return netdev_store(dev, attr, buf, len, change_carrier); }
@@@ -204,12 -196,6 +204,12 @@@ static ssize_t speed_show(struct devic struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps + * returning early without hitting the trylock/restart below. + */ + if (!netdev->ethtool_ops->get_link_ksettings) + return ret; + if (!rtnl_trylock()) return restart_syscall();
@@@ -230,12 -216,6 +230,12 @@@ static ssize_t duplex_show(struct devic struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps + * returning early without hitting the trylock/restart below. + */ + if (!netdev->ethtool_ops->get_link_ksettings) + return ret; + if (!rtnl_trylock()) return restart_syscall();
@@@ -488,14 -468,6 +488,14 @@@ static ssize_t proto_down_store(struct struct device_attribute *attr, const char *buf, size_t len) { + struct net_device *netdev = to_net_dev(dev); + + /* The check is also done in change_proto_down; this helps returning + * early without hitting the trylock/restart in netdev_store. + */ + if (!netdev->netdev_ops->ndo_change_proto_down) + return -EOPNOTSUPP; + return netdev_store(dev, attr, buf, len, change_proto_down); } NETDEVICE_SHOW_RW(proto_down, fmt_dec); @@@ -506,12 -478,6 +506,12 @@@ static ssize_t phys_port_id_show(struc struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL;
+ /* The check is also done in dev_get_phys_port_id; this helps returning + * early without hitting the trylock/restart below. + */ + if (!netdev->netdev_ops->ndo_get_phys_port_id) + return -EOPNOTSUPP; + if (!rtnl_trylock()) return restart_syscall();
@@@ -534,13 -500,6 +534,13 @@@ static ssize_t phys_port_name_show(stru struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps + * returning early without hitting the trylock/restart below. + */ + if (!netdev->netdev_ops->ndo_get_phys_port_name && + !netdev->netdev_ops->ndo_get_devlink_port) + return -EOPNOTSUPP; + if (!rtnl_trylock()) return restart_syscall();
@@@ -563,14 -522,6 +563,14 @@@ static ssize_t phys_switch_id_show(stru struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps + * returning early without hitting the trylock/restart below. This works + * because recurse is false when calling dev_get_port_parent_id. + */ + if (!netdev->netdev_ops->ndo_get_port_parent_id && + !netdev->netdev_ops->ndo_get_devlink_port) + return -EOPNOTSUPP; + if (!rtnl_trylock()) return restart_syscall();
@@@ -1275,12 -1226,6 +1275,12 @@@ static ssize_t tx_maxrate_store(struct if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ /* The check is also done later; this helps returning early without + * hitting the trylock/restart below. + */ + if (!dev->netdev_ops->ndo_set_tx_maxrate) + return -EOPNOTSUPP; + err = kstrtou32(buf, 10, &rate); if (err < 0) return err; @@@ -1924,7 -1869,7 +1924,7 @@@ static struct class net_class __ro_afte .get_ownership = net_get_ownership, };
-#ifdef CONFIG_OF_NET +#ifdef CONFIG_OF static int of_dev_node_match(struct device *dev, const void *data) { for (; dev; dev = dev->parent) { @@@ -2028,9 -1973,9 +2028,9 @@@ int netdev_register_kobject(struct net_ int netdev_change_owner(struct net_device *ndev, const struct net *net_old, const struct net *net_new) { + kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID; + kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID; struct device *dev = &ndev->dev; - kuid_t old_uid, new_uid; - kgid_t old_gid, new_gid; int error;
net_ns_get_ownership(net_old, &old_uid, &old_gid); diff --combined net/core/skbuff.c index 74601bbc56ac,fe9358437380..09b8cf8ab234 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -80,6 -80,7 +80,7 @@@ #include <linux/indirect_call_wrapper.h>
#include "datagram.h" + #include "sock_destructor.h"
struct kmem_cache *skbuff_head_cache __ro_after_init; static struct kmem_cache *skbuff_fclone_cache __ro_after_init; @@@ -134,31 -135,34 +135,31 @@@ struct napi_alloc_cache static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
-static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask, - unsigned int align_mask) +void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask); -} - -void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) -{ fragsz = SKB_DATA_ALIGN(fragsz);
- return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); + return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); } EXPORT_SYMBOL(__napi_alloc_frag_align);
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { - struct page_frag_cache *nc; void *data;
fragsz = SKB_DATA_ALIGN(fragsz); if (in_hardirq() || irqs_disabled()) { - nc = this_cpu_ptr(&netdev_alloc_cache); + struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); + data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); } else { + struct napi_alloc_cache *nc; + local_bh_disable(); - data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); + nc = this_cpu_ptr(&napi_alloc_cache); + data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); local_bh_enable(); } return data; @@@ -394,9 -398,8 +395,9 @@@ struct sk_buff *__alloc_skb(unsigned in { struct kmem_cache *cache; struct sk_buff *skb; - u8 *data; + unsigned int osize; bool pfmemalloc; + u8 *data;
cache = (flags & SKB_ALLOC_FCLONE) ? skbuff_fclone_cache : skbuff_head_cache; @@@ -428,8 -431,7 +429,8 @@@ * Put skb_shared_info exactly at the end of allocated zone, * to allow max possible filling before reallocation. */ - size = SKB_WITH_OVERHEAD(ksize(data)); + osize = ksize(data); + size = SKB_WITH_OVERHEAD(osize); prefetchw(data + size);
/* @@@ -438,7 -440,7 +439,7 @@@ * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); - __build_skb_around(skb, data, 0); + __build_skb_around(skb, data, osize); skb->pfmemalloc = pfmemalloc;
if (flags & SKB_ALLOC_FCLONE) { @@@ -1803,30 -1805,39 +1804,39 @@@ EXPORT_SYMBOL(skb_realloc_headroom) struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) { int delta = headroom - skb_headroom(skb); + int osize = skb_end_offset(skb); + struct sock *sk = skb->sk;
if (WARN_ONCE(delta <= 0, "%s is expecting an increase in the headroom", __func__)) return skb;
- /* pskb_expand_head() might crash, if skb is shared */ - if (skb_shared(skb)) { + delta = SKB_DATA_ALIGN(delta); + /* pskb_expand_head() might crash, if skb is shared. */ + if (skb_shared(skb) || !is_skb_wmem(skb)) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
- if (likely(nskb)) { - if (skb->sk) - skb_set_owner_w(nskb, skb->sk); - consume_skb(skb); - } else { - kfree_skb(skb); - } + if (unlikely(!nskb)) + goto fail; + + if (sk) + skb_set_owner_w(nskb, sk); + consume_skb(skb); skb = nskb; } - if (skb && - pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { - kfree_skb(skb); - skb = NULL; + if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) + goto fail; + + if (sk && is_skb_wmem(skb)) { + delta = skb_end_offset(skb) - osize; + refcount_add(delta, &sk->sk_wmem_alloc); + skb->truesize += delta; } return skb; + + fail: + kfree_skb(skb); + return NULL; } EXPORT_SYMBOL(skb_expand_head);
diff --combined net/ipv4/tcp.c index 7a7b9aa8f19a,f5c336f8b0c8..a7b1138d619c --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -287,8 -287,8 +287,8 @@@ enum TCP_CMSG_TS = 2 };
-struct percpu_counter tcp_orphan_count; -EXPORT_SYMBOL_GPL(tcp_orphan_count); +DEFINE_PER_CPU(unsigned int, tcp_orphan_count); +EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
long sysctl_tcp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_tcp_mem); @@@ -325,6 -325,11 +325,6 @@@ struct tcp_splice_state unsigned long tcp_memory_pressure __read_mostly; EXPORT_SYMBOL_GPL(tcp_memory_pressure);
-DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); -EXPORT_SYMBOL(tcp_rx_skb_cache_key); - -DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); - void tcp_enter_memory_pressure(struct sock *sk) { unsigned long val; @@@ -481,10 -486,7 +481,7 @@@ static bool tcp_stream_is_readable(stru { if (tcp_epollin_ready(sk, target)) return true; - - if (sk->sk_prot->stream_memory_read) - return sk->sk_prot->stream_memory_read(sk); - return false; + return sk_is_readable(sk); }
/* @@@ -642,7 -644,7 +639,7 @@@ int tcp_ioctl(struct sock *sk, int cmd } EXPORT_SYMBOL(tcp_ioctl);
-static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) +void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) { TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; tp->pushed_seq = tp->write_seq; @@@ -653,13 -655,15 +650,13 @@@ static inline bool forced_push(const st return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); }
-static void skb_entail(struct sock *sk, struct sk_buff *skb) +void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
- skb->csum = 0; tcb->seq = tcb->end_seq = tp->write_seq; tcb->tcp_flags = TCPHDR_ACK; - tcb->sacked = 0; __skb_header_release(skb); tcp_add_write_queue_tail(sk, skb); sk_wmem_queued_add(sk, skb->truesize); @@@ -854,15 -858,30 +851,15 @@@ ssize_t tcp_splice_read(struct socket * } EXPORT_SYMBOL(tcp_splice_read);
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, - bool force_schedule) +struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, + bool force_schedule) { struct sk_buff *skb;
- if (likely(!size)) { - skb = sk->sk_tx_skb_cache; - if (skb) { - skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); - sk->sk_tx_skb_cache = NULL; - pskb_trim(skb, 0); - INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); - skb_shinfo(skb)->tx_flags = 0; - memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb)); - return skb; - } - } - /* The TCP header must be at least 32-bit aligned. */ - size = ALIGN(size, 4); - if (unlikely(tcp_under_memory_pressure(sk))) sk_mem_reclaim_partial(sk);
- skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); + skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp); if (likely(skb)) { bool mem_scheduled;
@@@ -873,8 -892,12 +870,8 @@@ mem_scheduled = sk_wmem_schedule(sk, skb->truesize); } if (likely(mem_scheduled)) { - skb_reserve(skb, sk->sk_prot->max_header); - /* - * Make sure that we have exactly size bytes - * available to the caller, no more, no less. - */ - skb->reserved_tailroom = skb->end - skb->tail - size; + skb_reserve(skb, MAX_TCP_HEADER); + skb->ip_summed = CHECKSUM_PARTIAL; INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); return skb; } @@@ -927,11 -950,9 +924,11 @@@ int tcp_send_mss(struct sock *sk, int * * importantly be able to generate EPOLLOUT for Edge Trigger epoll() * users. */ -void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb) +void tcp_remove_empty_skb(struct sock *sk) { - if (skb && !skb->len) { + struct sk_buff *skb = tcp_write_queue_tail(sk); + + if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { tcp_unlink_write_queue(skb, sk); if (tcp_write_queue_empty(sk)) tcp_chrono_stop(sk, TCP_CHRONO_BUSY); @@@ -939,8 -960,8 +936,8 @@@ } }
-struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, - struct page *page, int offset, size_t *size) +static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, + struct page *page, int offset, size_t *size) { struct sk_buff *skb = tcp_write_queue_tail(sk); struct tcp_sock *tp = tcp_sk(sk); @@@ -953,15 -974,15 +950,15 @@@ new_segment if (!sk_stream_memory_free(sk)) return NULL;
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, - tcp_rtx_and_write_queues_empty(sk)); + skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, + tcp_rtx_and_write_queues_empty(sk)); if (!skb) return NULL;
#ifdef CONFIG_TLS_DEVICE skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); #endif - skb_entail(sk, skb); + tcp_skb_entail(sk, skb); copy = size_goal; }
@@@ -992,6 -1013,7 +989,6 @@@ skb->truesize += copy; sk_wmem_queued_add(sk, copy); sk_mem_charge(sk, copy); - skb->ip_summed = CHECKSUM_PARTIAL; WRITE_ONCE(tp->write_seq, tp->write_seq + copy); TCP_SKB_CB(skb)->end_seq += copy; tcp_skb_pcount_set(skb, 0); @@@ -1082,7 -1104,7 +1079,7 @@@ out return copied;
do_error: - tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk)); + tcp_remove_empty_skb(sk); if (copied) goto out; out_err: @@@ -1281,14 -1303,15 +1278,14 @@@ new_segment goto restart; } first_skb = tcp_rtx_and_write_queues_empty(sk); - skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, - first_skb); + skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, + first_skb); if (!skb) goto wait_for_space;
process_backlog++; - skb->ip_summed = CHECKSUM_PARTIAL;
- skb_entail(sk, skb); + tcp_skb_entail(sk, skb); copy = size_goal;
/* All packets are restored as if they have @@@ -1303,7 -1326,14 +1300,7 @@@ if (copy > msg_data_left(msg)) copy = msg_data_left(msg);
- /* Where to copy to? */ - if (skb_availroom(skb) > 0 && !zc) { - /* We have some space in skb head. Superb! */ - copy = min_t(int, copy, skb_availroom(skb)); - err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); - if (err) - goto do_fault; - } else if (!zc) { + if (!zc) { bool merge = true; int i = skb_shinfo(skb)->nr_frags; struct page_frag *pfrag = sk_page_frag(sk); @@@ -1402,7 -1432,9 +1399,7 @@@ out_nopush return copied + copied_syn;
do_error: - skb = tcp_write_queue_tail(sk); -do_fault: - tcp_remove_empty_skb(sk, skb); + tcp_remove_empty_skb(sk);
if (copied + copied_syn) goto out; @@@ -2655,36 -2687,11 +2652,36 @@@ void tcp_shutdown(struct sock *sk, int } EXPORT_SYMBOL(tcp_shutdown);
+int tcp_orphan_count_sum(void) +{ + int i, total = 0; + + for_each_possible_cpu(i) + total += per_cpu(tcp_orphan_count, i); + + return max(total, 0); +} + +static int tcp_orphan_cache; +static struct timer_list tcp_orphan_timer; +#define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) + +static void tcp_orphan_update(struct timer_list *unused) +{ + WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); + mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); +} + +static bool tcp_too_many_orphans(int shift) +{ + return READ_ONCE(tcp_orphan_cache) << shift > sysctl_tcp_max_orphans; +} + bool tcp_check_oom(struct sock *sk, int shift) { bool too_many_orphans, out_of_socket_memory;
- too_many_orphans = tcp_too_many_orphans(sk, shift); + too_many_orphans = tcp_too_many_orphans(shift); out_of_socket_memory = tcp_out_of_memory(sk);
if (too_many_orphans) @@@ -2793,7 -2800,7 +2790,7 @@@ adjudge_to_death /* remove backlog if any, without releasing ownership. */ __release_sock(sk);
- percpu_counter_inc(sk->sk_prot->orphan_count); + this_cpu_inc(tcp_orphan_count);
/* Have we already been destroyed by a softirq or backlog? */ if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) @@@ -2910,6 -2917,11 +2907,6 @@@ void tcp_write_queue_purge(struct sock sk_wmem_free_skb(sk, skb); } tcp_rtx_queue_purge(sk); - skb = sk->sk_tx_skb_cache; - if (skb) { - __kfree_skb(skb); - sk->sk_tx_skb_cache = NULL; - } INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); sk_mem_reclaim(sk); tcp_clear_all_retrans_hints(tcp_sk(sk)); @@@ -2946,6 -2958,10 +2943,6 @@@ int tcp_disconnect(struct sock *sk, in
tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); - if (sk->sk_rx_skb_cache) { - __kfree_skb(sk->sk_rx_skb_cache); - sk->sk_rx_skb_cache = NULL; - } WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); tp->urg_data = 0; tcp_write_queue_purge(sk); @@@ -4486,10 -4502,7 +4483,10 @@@ void __init tcp_init(void sizeof_field(struct sk_buff, cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); - percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL); + + timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); + mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); + inet_hashinfo_init(&tcp_hashinfo); inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", thash_entries, 21, /* one slot per 2 MB*/ diff --combined net/mac80211/mesh.c index a4212a333d61,5dcfd53a4ab6..15ac08d111ea --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@@ -672,7 -672,7 +672,7 @@@ ieee80211_mesh_update_bss_params(struc u8 *ie, u8 ie_len) { struct ieee80211_supported_band *sband; - const u8 *cap; + const struct element *cap; const struct ieee80211_he_operation *he_oper = NULL;
sband = ieee80211_get_sband(sdata); @@@ -687,9 -687,10 +687,10 @@@
sdata->vif.bss_conf.he_support = true;
- cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ie, ie_len); - if (cap && cap[1] >= ieee80211_he_oper_size(&cap[3])) - he_oper = (void *)(cap + 3); + cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ie_len); + if (cap && cap->datalen >= 1 + sizeof(*he_oper) && + cap->datalen >= 1 + ieee80211_he_oper_size(cap->data + 1)) + he_oper = (void *)(cap->data + 1);
if (he_oper) sdata->vif.bss_conf.he_oper.params = @@@ -1246,7 -1247,7 +1247,7 @@@ ieee80211_mesh_rx_probe_req(struct ieee struct sk_buff *presp; struct beacon_data *bcn; struct ieee80211_mgmt *hdr; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; size_t baselen; u8 *pos;
@@@ -1255,24 -1256,22 +1256,24 @@@ if (baselen > len) return;
- ieee802_11_parse_elems(pos, len - baselen, false, &elems, mgmt->bssid, - NULL); - - if (!elems.mesh_id) + elems = ieee802_11_parse_elems(pos, len - baselen, false, mgmt->bssid, + NULL); + if (!elems) return;
+ if (!elems->mesh_id) + goto free; + /* 802.11-2012 10.1.4.3.2 */ if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) && !is_broadcast_ether_addr(mgmt->da)) || - elems.ssid_len != 0) - return; + elems->ssid_len != 0) + goto free;
- if (elems.mesh_id_len != 0 && - (elems.mesh_id_len != ifmsh->mesh_id_len || - memcmp(elems.mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len))) - return; + if (elems->mesh_id_len != 0 && + (elems->mesh_id_len != ifmsh->mesh_id_len || + memcmp(elems->mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len))) + goto free;
rcu_read_lock(); bcn = rcu_dereference(ifmsh->beacon); @@@ -1296,8 -1295,6 +1297,8 @@@ ieee80211_tx_skb(sdata, presp); out: rcu_read_unlock(); +free: + kfree(elems); }
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, @@@ -1308,7 -1305,7 +1309,7 @@@ { struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; struct ieee80211_channel *channel; size_t baselen; int freq; @@@ -1323,47 -1320,42 +1324,47 @@@ if (baselen > len) return;
- ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, - false, &elems, mgmt->bssid, NULL); + elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable, + len - baselen, + false, mgmt->bssid, NULL); + if (!elems) + return;
/* ignore non-mesh or secure / unsecure mismatch */ - if ((!elems.mesh_id || !elems.mesh_config) || - (elems.rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) || - (!elems.rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)) - return; + if ((!elems->mesh_id || !elems->mesh_config) || + (elems->rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) || + (!elems->rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)) + goto free;
- if (elems.ds_params) - freq = ieee80211_channel_to_frequency(elems.ds_params[0], band); + if (elems->ds_params) + freq = ieee80211_channel_to_frequency(elems->ds_params[0], band); else freq = rx_status->freq;
channel = ieee80211_get_channel(local->hw.wiphy, freq);
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) - return; + goto free;
- if (mesh_matches_local(sdata, &elems)) { + if (mesh_matches_local(sdata, elems)) { mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n", sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal); if (!sdata->u.mesh.user_mpm || sdata->u.mesh.mshcfg.rssi_threshold == 0 || sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal) - mesh_neighbour_update(sdata, mgmt->sa, &elems, + mesh_neighbour_update(sdata, mgmt->sa, elems, rx_status);
if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT && !sdata->vif.csa_active) - ieee80211_mesh_process_chnswitch(sdata, &elems, true); + ieee80211_mesh_process_chnswitch(sdata, elems, true); }
if (ifmsh->sync_ops) - ifmsh->sync_ops->rx_bcn_presp(sdata, - stype, mgmt, &elems, rx_status); + ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, len, + elems->mesh_config, rx_status); +free: + kfree(elems); }
int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata) @@@ -1455,7 -1447,7 +1456,7 @@@ static void mesh_rx_csa_frame(struct ie struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; u16 pre_value; bool fwd_csa = true; size_t baselen; @@@ -1468,37 -1460,33 +1469,37 @@@ pos = mgmt->u.action.u.chan_switch.variable; baselen = offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); - ieee802_11_parse_elems(pos, len - baselen, true, &elems, - mgmt->bssid, NULL); - - if (!mesh_matches_local(sdata, &elems)) + elems = ieee802_11_parse_elems(pos, len - baselen, true, + mgmt->bssid, NULL); + if (!elems) return;
- ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl; + if (!mesh_matches_local(sdata, elems)) + goto free; + + ifmsh->chsw_ttl = elems->mesh_chansw_params_ie->mesh_ttl; if (!--ifmsh->chsw_ttl) fwd_csa = false;
- pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value); + pre_value = le16_to_cpu(elems->mesh_chansw_params_ie->mesh_pre_value); if (ifmsh->pre_value >= pre_value) - return; + goto free;
ifmsh->pre_value = pre_value;
if (!sdata->vif.csa_active && - !ieee80211_mesh_process_chnswitch(sdata, &elems, false)) { + !ieee80211_mesh_process_chnswitch(sdata, elems, false)) { mcsa_dbg(sdata, "Failed to process CSA action frame"); - return; + goto free; }
/* forward or re-broadcast the CSA frame */ if (fwd_csa) { - if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0) + if (mesh_fwd_csa_frame(sdata, mgmt, len, elems) < 0) mcsa_dbg(sdata, "Failed to forward the CSA frame"); } +free: + kfree(elems); }
static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, diff --combined net/mptcp/options.c index 422f4acfb3e6,f0f22eb4fd5f..7c3420afb1a0 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@@ -485,11 -485,11 +485,11 @@@ static bool mptcp_established_options_m mpext = mptcp_get_ext(skb); data_len = mpext ? mpext->data_len : 0;
- /* we will check ext_copy.data_len in mptcp_write_options() to + /* we will check ops->data_len in mptcp_write_options() to * discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and * TCPOLEN_MPTCP_MPC_ACK */ - opts->ext_copy.data_len = data_len; + opts->data_len = data_len; opts->suboptions = OPTION_MPTCP_MPC_ACK; opts->sndr_key = subflow->local_key; opts->rcvr_key = subflow->remote_key; @@@ -505,9 -505,9 +505,9 @@@ len = TCPOLEN_MPTCP_MPC_ACK_DATA; if (opts->csum_reqd) { /* we need to propagate more info to csum the pseudo hdr */ - opts->ext_copy.data_seq = mpext->data_seq; - opts->ext_copy.subflow_seq = mpext->subflow_seq; - opts->ext_copy.csum = mpext->csum; + opts->data_seq = mpext->data_seq; + opts->subflow_seq = mpext->subflow_seq; + opts->csum = mpext->csum; len += TCPOLEN_MPTCP_DSS_CHECKSUM; } *size = ALIGN(len, 4); @@@ -748,7 -748,9 +748,7 @@@ static bool mptcp_established_options_m /* can't send MP_PRIO with MPC, as they share the same option space: * 'backup'. Also it makes no sense at all */ - if (!subflow->send_mp_prio || - ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK | - OPTION_MPTCP_MPC_ACK) & opts->suboptions)) + if (!subflow->send_mp_prio || (opts->suboptions & OPTIONS_MPTCP_MPC)) return false;
/* account for the trailing 'nop' option */ @@@ -1017,9 -1019,11 +1017,9 @@@ static void ack_update_msk(struct mptcp old_snd_una = msk->snd_una; new_snd_una = mptcp_expand_seq(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
- /* ACK for data not even sent yet and even above recovery bound? Ignore.*/ - if (unlikely(after64(new_snd_una, snd_nxt))) { - if (!msk->recovery || after64(new_snd_una, msk->recovery_snd_nxt)) - new_snd_una = old_snd_una; - } + /* ACK for data not even sent yet? Ignore.*/ + if (unlikely(after64(new_snd_una, snd_nxt))) + new_snd_una = old_snd_una;
new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
@@@ -1223,7 -1227,7 +1223,7 @@@ static void mptcp_set_rwin(const struc WRITE_ONCE(msk->rcv_wnd_sent, ack_seq); }
- static u16 mptcp_make_csum(const struct mptcp_ext *mpext) + static u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __sum16 sum) { struct csum_pseudo_header header; __wsum csum; @@@ -1233,15 -1237,21 +1233,21 @@@ * always the 64-bit value, irrespective of what length is used in the * DSS option itself. */ - header.data_seq = cpu_to_be64(mpext->data_seq); - header.subflow_seq = htonl(mpext->subflow_seq); - header.data_len = htons(mpext->data_len); + header.data_seq = cpu_to_be64(data_seq); + header.subflow_seq = htonl(subflow_seq); + header.data_len = htons(data_len); header.csum = 0;
- csum = csum_partial(&header, sizeof(header), ~csum_unfold(mpext->csum)); + csum = csum_partial(&header, sizeof(header), ~csum_unfold(sum)); return (__force u16)csum_fold(csum); }
+ static u16 mptcp_make_csum(const struct mptcp_ext *mpext) + { + return __mptcp_make_csum(mpext->data_seq, mpext->subflow_seq, mpext->data_len, + mpext->csum); + } + void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, struct mptcp_out_options *opts) { @@@ -1325,14 -1335,15 +1331,14 @@@ TCPOPT_NOP << 8 | TCPOPT_NOP, ptr); } } - } else if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK | - OPTION_MPTCP_MPC_ACK) & opts->suboptions) { + } else if (OPTIONS_MPTCP_MPC & opts->suboptions) { u8 len, flag = MPTCP_CAP_HMAC_SHA256;
if (OPTION_MPTCP_MPC_SYN & opts->suboptions) { len = TCPOLEN_MPTCP_MPC_SYN; } else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) { len = TCPOLEN_MPTCP_MPC_SYNACK; - } else if (opts->ext_copy.data_len) { + } else if (opts->data_len) { len = TCPOLEN_MPTCP_MPC_ACK_DATA; if (opts->csum_reqd) len += TCPOLEN_MPTCP_DSS_CHECKSUM; @@@ -1361,14 -1372,17 +1367,17 @@@
put_unaligned_be64(opts->rcvr_key, ptr); ptr += 2; - if (!opts->ext_copy.data_len) + if (!opts->data_len) goto mp_capable_done;
if (opts->csum_reqd) { - put_unaligned_be32(opts->ext_copy.data_len << 16 | - mptcp_make_csum(&opts->ext_copy), ptr); + put_unaligned_be32(opts->data_len << 16 | + __mptcp_make_csum(opts->data_seq, + opts->subflow_seq, + opts->data_len, + opts->csum), ptr); } else { - put_unaligned_be32(opts->ext_copy.data_len << 16 | + put_unaligned_be32(opts->data_len << 16 | TCPOPT_NOP << 8 | TCPOPT_NOP, ptr); } ptr += 1; diff --combined net/smc/af_smc.c index 5e50e007a7da,78b663dbfa1f..8dc34388b2c1 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@@ -439,47 -439,6 +439,47 @@@ static int smcr_clnt_conf_first_link(st return 0; }
+static bool smc_isascii(char *hostname) +{ + int i; + + for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++) + if (!isascii(hostname[i])) + return false; + return true; +} + +static void smc_conn_save_peer_info_fce(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *clc) +{ + struct smc_clc_msg_accept_confirm_v2 *clc_v2 = + (struct smc_clc_msg_accept_confirm_v2 *)clc; + struct smc_clc_first_contact_ext *fce; + int clc_v2_len; + + if (clc->hdr.version == SMC_V1 || + !(clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) + return; + + if (smc->conn.lgr->is_smcd) { + memcpy(smc->conn.lgr->negotiated_eid, clc_v2->d1.eid, + SMC_MAX_EID_LEN); + clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2, + d1); + } else { + memcpy(smc->conn.lgr->negotiated_eid, clc_v2->r1.eid, + SMC_MAX_EID_LEN); + clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2, + r1); + } + fce = (struct smc_clc_first_contact_ext *)(((u8 *)clc_v2) + clc_v2_len); + smc->conn.lgr->peer_os = fce->os_type; + smc->conn.lgr->peer_smc_release = fce->release; + if (smc_isascii(fce->hostname)) + memcpy(smc->conn.lgr->peer_hostname, fce->hostname, + SMC_MAX_HOSTNAME_LEN); +} + static void smcr_conn_save_peer_info(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *clc) { @@@ -492,6 -451,16 +492,6 @@@ smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1); }
-static bool smc_isascii(char *hostname) -{ - int i; - - for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++) - if (!isascii(hostname[i])) - return false; - return true; -} - static void smcd_conn_save_peer_info(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *clc) { @@@ -503,6 -472,22 +503,6 @@@ smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg); atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx; - if (clc->hdr.version > SMC_V1 && - (clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) { - struct smc_clc_msg_accept_confirm_v2 *clc_v2 = - (struct smc_clc_msg_accept_confirm_v2 *)clc; - struct smc_clc_first_contact_ext *fce = - (struct smc_clc_first_contact_ext *) - (((u8 *)clc_v2) + sizeof(*clc_v2)); - - memcpy(smc->conn.lgr->negotiated_eid, clc_v2->eid, - SMC_MAX_EID_LEN); - smc->conn.lgr->peer_os = fce->os_type; - smc->conn.lgr->peer_smc_release = fce->release; - if (smc_isascii(fce->hostname)) - memcpy(smc->conn.lgr->peer_hostname, fce->hostname, - SMC_MAX_HOSTNAME_LEN); - } }
static void smc_conn_save_peer_info(struct smc_sock *smc, @@@ -512,16 -497,14 +512,16 @@@ smcd_conn_save_peer_info(smc, clc); else smcr_conn_save_peer_info(smc, clc); + smc_conn_save_peer_info_fce(smc, clc); }
static void smc_link_save_peer_info(struct smc_link *link, - struct smc_clc_msg_accept_confirm *clc) + struct smc_clc_msg_accept_confirm *clc, + struct smc_init_info *ini) { link->peer_qpn = ntoh24(clc->r0.qpn); - memcpy(link->peer_gid, clc->r0.lcl.gid, SMC_GID_SIZE); - memcpy(link->peer_mac, clc->r0.lcl.mac, sizeof(link->peer_mac)); + memcpy(link->peer_gid, ini->peer_gid, SMC_GID_SIZE); + memcpy(link->peer_mac, ini->peer_mac, sizeof(link->peer_mac)); link->peer_psn = ntoh24(clc->r0.psn); link->peer_mtu = clc->r0.qp_mtu; } @@@ -625,9 -608,7 +625,9 @@@ static int smc_find_rdma_device(struct * used for the internal TCP socket */ smc_pnet_find_roce_resource(smc->clcsock->sk, ini); - if (!ini->ib_dev) + if (!ini->check_smcrv2 && !ini->ib_dev) + return SMC_CLC_DECL_NOSMCRDEV; + if (ini->check_smcrv2 && !ini->smcrv2.ib_dev_v2) return SMC_CLC_DECL_NOSMCRDEV; return 0; } @@@ -711,42 -692,27 +711,42 @@@ static int smc_find_proposal_devices(st int rc = 0;
/* check if there is an ism device available */ - if (ini->smcd_version & SMC_V1) { - if (smc_find_ism_device(smc, ini) || - smc_connect_ism_vlan_setup(smc, ini)) { - if (ini->smc_type_v1 == SMC_TYPE_B) - ini->smc_type_v1 = SMC_TYPE_R; - else - ini->smc_type_v1 = SMC_TYPE_N; - } /* else ISM V1 is supported for this connection */ - if (smc_find_rdma_device(smc, ini)) { - if (ini->smc_type_v1 == SMC_TYPE_B) - ini->smc_type_v1 = SMC_TYPE_D; - else - ini->smc_type_v1 = SMC_TYPE_N; - } /* else RDMA is supported for this connection */ - } - if (smc_ism_is_v2_capable() && smc_find_ism_v2_device_clnt(smc, ini)) - ini->smc_type_v2 = SMC_TYPE_N; + if (!(ini->smcd_version & SMC_V1) || + smc_find_ism_device(smc, ini) || + smc_connect_ism_vlan_setup(smc, ini)) + ini->smcd_version &= ~SMC_V1; + /* else ISM V1 is supported for this connection */ + + /* check if there is an rdma device available */ + if (!(ini->smcr_version & SMC_V1) || + smc_find_rdma_device(smc, ini)) + ini->smcr_version &= ~SMC_V1; + /* else RDMA is supported for this connection */ + + ini->smc_type_v1 = smc_indicated_type(ini->smcd_version & SMC_V1, + ini->smcr_version & SMC_V1); + + /* check if there is an ism v2 device available */ + if (!(ini->smcd_version & SMC_V2) || + !smc_ism_is_v2_capable() || + smc_find_ism_v2_device_clnt(smc, ini)) + ini->smcd_version &= ~SMC_V2; + + /* check if there is an rdma v2 device available */ + ini->check_smcrv2 = true; + ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr; + if (!(ini->smcr_version & SMC_V2) || + smc->clcsock->sk->sk_family != AF_INET || + !smc_clc_ueid_count() || + smc_find_rdma_device(smc, ini)) + ini->smcr_version &= ~SMC_V2; + ini->check_smcrv2 = false; + + ini->smc_type_v2 = smc_indicated_type(ini->smcd_version & SMC_V2, + ini->smcr_version & SMC_V2);
/* if neither ISM nor RDMA are supported, fallback */ - if (!smcr_indicated(ini->smc_type_v1) && - ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N) + if (ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N) rc = SMC_CLC_DECL_NOSMCDEV;
return rc; @@@ -786,64 -752,6 +786,64 @@@ static int smc_connect_clc(struct smc_s SMC_CLC_ACCEPT, CLC_WAIT_TIME); }
+void smc_fill_gid_list(struct smc_link_group *lgr, + struct smc_gidlist *gidlist, + struct smc_ib_device *known_dev, u8 *known_gid) +{ + struct smc_init_info *alt_ini = NULL; + + memset(gidlist, 0, sizeof(*gidlist)); + memcpy(gidlist->list[gidlist->len++], known_gid, SMC_GID_SIZE); + + alt_ini = kzalloc(sizeof(*alt_ini), GFP_KERNEL); + if (!alt_ini) + goto out; + + alt_ini->vlan_id = lgr->vlan_id; + alt_ini->check_smcrv2 = true; + alt_ini->smcrv2.saddr = lgr->saddr; + smc_pnet_find_alt_roce(lgr, alt_ini, known_dev); + + if (!alt_ini->smcrv2.ib_dev_v2) + goto out; + + memcpy(gidlist->list[gidlist->len++], alt_ini->smcrv2.ib_gid_v2, + SMC_GID_SIZE); + +out: + kfree(alt_ini); +} + +static int smc_connect_rdma_v2_prepare(struct smc_sock *smc, + struct smc_clc_msg_accept_confirm *aclc, + struct smc_init_info *ini) +{ + struct smc_clc_msg_accept_confirm_v2 *clc_v2 = + (struct smc_clc_msg_accept_confirm_v2 *)aclc; + struct smc_clc_first_contact_ext *fce = + (struct smc_clc_first_contact_ext *) + (((u8 *)clc_v2) + sizeof(*clc_v2)); + + if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1) + return 0; + + if (fce->v2_direct) { + memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN); + ini->smcrv2.uses_gateway = false; + } else { + if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr, + smc_ib_gid_to_ipv4(aclc->r0.lcl.gid), + ini->smcrv2.nexthop_mac, + &ini->smcrv2.uses_gateway)) + return SMC_CLC_DECL_NOROUTE; + if (!ini->smcrv2.uses_gateway) { + /* mismatch: peer claims indirect, but its direct */ + return SMC_CLC_DECL_NOINDIRECT; + } + } + return 0; +} + /* setup for RDMA connection of client */ static int smc_connect_rdma(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *aclc, @@@ -851,18 -759,11 +851,18 @@@ { int i, reason_code = 0; struct smc_link *link; + u8 *eid = NULL;
ini->is_smcd = false; - ini->ib_lcl = &aclc->r0.lcl; ini->ib_clcqpn = ntoh24(aclc->r0.qpn); ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK; + memcpy(ini->peer_systemid, aclc->r0.lcl.id_for_peer, SMC_SYSTEMID_LEN); + memcpy(ini->peer_gid, aclc->r0.lcl.gid, SMC_GID_SIZE); + memcpy(ini->peer_mac, aclc->r0.lcl.mac, ETH_ALEN); + + reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini); + if (reason_code) + return reason_code;
mutex_lock(&smc_client_lgr_pending); reason_code = smc_conn_create(smc, ini); @@@ -884,9 -785,8 +884,9 @@@ if (l->peer_qpn == ntoh24(aclc->r0.qpn) && !memcmp(l->peer_gid, &aclc->r0.lcl.gid, SMC_GID_SIZE) && - !memcmp(l->peer_mac, &aclc->r0.lcl.mac, - sizeof(l->peer_mac))) { + (aclc->hdr.version > SMC_V1 || + !memcmp(l->peer_mac, &aclc->r0.lcl.mac, + sizeof(l->peer_mac)))) { link = l; break; } @@@ -905,7 -805,7 +905,7 @@@ }
if (ini->first_contact_local) - smc_link_save_peer_info(link, aclc); + smc_link_save_peer_info(link, aclc, ini);
if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) { reason_code = SMC_CLC_DECL_ERR_RTOK; @@@ -928,18 -828,8 +928,18 @@@ } smc_rmb_sync_sg_for_device(&smc->conn);
+ if (aclc->hdr.version > SMC_V1) { + struct smc_clc_msg_accept_confirm_v2 *clc_v2 = + (struct smc_clc_msg_accept_confirm_v2 *)aclc; + + eid = clc_v2->r1.eid; + if (ini->first_contact_local) + smc_fill_gid_list(link->lgr, &ini->smcrv2.gidlist, + link->smcibdev, link->gid); + } + reason_code = smc_clc_send_confirm(smc, ini->first_contact_local, - SMC_V1); + aclc->hdr.version, eid, ini); if (reason_code) goto connect_abort;
@@@ -979,7 -869,7 +979,7 @@@ smc_v2_determine_accepted_chid(struct s int i;
for (i = 0; i < ini->ism_offered_cnt + 1; i++) { - if (ini->ism_chid[i] == ntohs(aclc->chid)) { + if (ini->ism_chid[i] == ntohs(aclc->d1.chid)) { ini->ism_selected = i; return 0; } @@@ -993,7 -883,6 +993,7 @@@ static int smc_connect_ism(struct smc_s struct smc_clc_msg_accept_confirm *aclc, struct smc_init_info *ini) { + u8 *eid = NULL; int rc = 0;
ini->is_smcd = true; @@@ -1029,15 -918,8 +1029,15 @@@ smc_rx_init(smc); smc_tx_init(smc);
+ if (aclc->hdr.version > SMC_V1) { + struct smc_clc_msg_accept_confirm_v2 *clc_v2 = + (struct smc_clc_msg_accept_confirm_v2 *)aclc; + + eid = clc_v2->d1.eid; + } + rc = smc_clc_send_confirm(smc, ini->first_contact_local, - aclc->hdr.version); + aclc->hdr.version, eid, NULL); if (rc) goto connect_abort; mutex_unlock(&smc_server_lgr_pending); @@@ -1060,24 -942,17 +1060,24 @@@ connect_abort static int smc_connect_check_aclc(struct smc_init_info *ini, struct smc_clc_msg_accept_confirm *aclc) { - if ((aclc->hdr.typev1 == SMC_TYPE_R && - !smcr_indicated(ini->smc_type_v1)) || - (aclc->hdr.typev1 == SMC_TYPE_D && - ((!smcd_indicated(ini->smc_type_v1) && - !smcd_indicated(ini->smc_type_v2)) || - (aclc->hdr.version == SMC_V1 && - !smcd_indicated(ini->smc_type_v1)) || - (aclc->hdr.version == SMC_V2 && - !smcd_indicated(ini->smc_type_v2))))) + if (aclc->hdr.typev1 != SMC_TYPE_R && + aclc->hdr.typev1 != SMC_TYPE_D) return SMC_CLC_DECL_MODEUNSUPP;
+ if (aclc->hdr.version >= SMC_V2) { + if ((aclc->hdr.typev1 == SMC_TYPE_R && + !smcr_indicated(ini->smc_type_v2)) || + (aclc->hdr.typev1 == SMC_TYPE_D && + !smcd_indicated(ini->smc_type_v2))) + return SMC_CLC_DECL_MODEUNSUPP; + } else { + if ((aclc->hdr.typev1 == SMC_TYPE_R && + !smcr_indicated(ini->smc_type_v1)) || + (aclc->hdr.typev1 == SMC_TYPE_D && + !smcd_indicated(ini->smc_type_v1))) + return SMC_CLC_DECL_MODEUNSUPP; + } + return 0; }
@@@ -1108,15 -983,14 +1108,15 @@@ static int __smc_connect(struct smc_soc return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM, version);
- ini->smcd_version = SMC_V1; - ini->smcd_version |= smc_ism_is_v2_capable() ? SMC_V2 : 0; + ini->smcd_version = SMC_V1 | SMC_V2; + ini->smcr_version = SMC_V1 | SMC_V2; ini->smc_type_v1 = SMC_TYPE_B; - ini->smc_type_v2 = smc_ism_is_v2_capable() ? SMC_TYPE_D : SMC_TYPE_N; + ini->smc_type_v2 = SMC_TYPE_B;
/* get vlan id from IP device */ if (smc_vlan_by_tcpsk(smc->clcsock, ini)) { ini->smcd_version &= ~SMC_V1; + ini->smcr_version = 0; ini->smc_type_v1 = SMC_TYPE_N; if (!ini->smcd_version) { rc = SMC_CLC_DECL_GETVLANERR; @@@ -1144,17 -1018,15 +1144,17 @@@ /* check if smc modes and versions of CLC proposal and accept match */ rc = smc_connect_check_aclc(ini, aclc); version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2; - ini->smcd_version = version; if (rc) goto vlan_cleanup;
/* depending on previous steps, connect using rdma or ism */ - if (aclc->hdr.typev1 == SMC_TYPE_R) + if (aclc->hdr.typev1 == SMC_TYPE_R) { + ini->smcr_version = version; rc = smc_connect_rdma(smc, aclc, ini); - else if (aclc->hdr.typev1 == SMC_TYPE_D) + } else if (aclc->hdr.typev1 == SMC_TYPE_D) { + ini->smcd_version = version; rc = smc_connect_ism(smc, aclc, ini); + } if (rc) goto vlan_cleanup;
@@@ -1185,7 -1057,7 +1185,7 @@@ static void smc_connect_work(struct wor if (smc->clcsock->sk->sk_err) { smc->sk.sk_err = smc->clcsock->sk->sk_err; } else if ((1 << smc->clcsock->sk->sk_state) & - (TCPF_SYN_SENT | TCP_SYN_RECV)) { + (TCPF_SYN_SENT | TCPF_SYN_RECV)) { rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo); if ((rc == -EPIPE) && ((1 << smc->clcsock->sk->sk_state) & @@@ -1435,7 -1307,7 +1435,7 @@@ static int smcr_serv_conf_first_link(st smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
/* initial contact - try to establish second link */ - smc_llc_srv_add_link(link); + smc_llc_srv_add_link(link, NULL); return 0; }
@@@ -1515,48 -1387,33 +1515,48 @@@ static int smc_listen_v2_check(struct s
ini->smc_type_v1 = pclc->hdr.typev1; ini->smc_type_v2 = pclc->hdr.typev2; - ini->smcd_version = ini->smc_type_v1 != SMC_TYPE_N ? SMC_V1 : 0; - if (pclc->hdr.version > SMC_V1) - ini->smcd_version |= - ini->smc_type_v2 != SMC_TYPE_N ? SMC_V2 : 0; - if (!(ini->smcd_version & SMC_V2)) { + ini->smcd_version = smcd_indicated(ini->smc_type_v1) ? SMC_V1 : 0; + ini->smcr_version = smcr_indicated(ini->smc_type_v1) ? SMC_V1 : 0; + if (pclc->hdr.version > SMC_V1) { + if (smcd_indicated(ini->smc_type_v2)) + ini->smcd_version |= SMC_V2; + if (smcr_indicated(ini->smc_type_v2)) + ini->smcr_version |= SMC_V2; + } + if (!(ini->smcd_version & SMC_V2) && !(ini->smcr_version & SMC_V2)) { rc = SMC_CLC_DECL_PEERNOSMC; goto out; } - if (!smc_ism_is_v2_capable()) { - ini->smcd_version &= ~SMC_V2; - rc = SMC_CLC_DECL_NOISM2SUPP; - goto out; - } pclc_v2_ext = smc_get_clc_v2_ext(pclc); if (!pclc_v2_ext) { ini->smcd_version &= ~SMC_V2; + ini->smcr_version &= ~SMC_V2; rc = SMC_CLC_DECL_NOV2EXT; goto out; } pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext); - if (!pclc_smcd_v2_ext) { - ini->smcd_version &= ~SMC_V2; - rc = SMC_CLC_DECL_NOV2DEXT; + if (ini->smcd_version & SMC_V2) { + if (!smc_ism_is_v2_capable()) { + ini->smcd_version &= ~SMC_V2; + rc = SMC_CLC_DECL_NOISM2SUPP; + } else if (!pclc_smcd_v2_ext) { + ini->smcd_version &= ~SMC_V2; + rc = SMC_CLC_DECL_NOV2DEXT; + } else if (!pclc_v2_ext->hdr.eid_cnt && + !pclc_v2_ext->hdr.flag.seid) { + ini->smcd_version &= ~SMC_V2; + rc = SMC_CLC_DECL_NOUEID; + } + } + if (ini->smcr_version & SMC_V2) { + if (!pclc_v2_ext->hdr.eid_cnt) { + ini->smcr_version &= ~SMC_V2; + rc = SMC_CLC_DECL_NOUEID; + } }
out: - if (!ini->smcd_version) + if (!ini->smcd_version && !ini->smcr_version) return rc;
return 0; @@@ -1676,6 -1533,11 +1676,6 @@@ static void smc_find_ism_v2_device_serv pclc_smcd = smc_get_clc_msg_smcd(pclc); smc_v2_ext = smc_get_clc_v2_ext(pclc); smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext); - if (!smcd_v2_ext || - !smc_v2_ext->hdr.flag.seid) { /* no system EID support for SMCD */ - smc_find_ism_store_rc(SMC_CLC_DECL_NOSEID, ini); - goto not_found; - }
mutex_lock(&smcd_dev_list.mutex); if (pclc_smcd->ism.chid) @@@ -1693,16 -1555,14 +1693,16 @@@ } mutex_unlock(&smcd_dev_list.mutex);
- if (ini->ism_dev[0]) { - smc_ism_get_system_eid(ini->ism_dev[0], &eid); - if (memcmp(eid, smcd_v2_ext->system_eid, SMC_MAX_EID_LEN)) - goto not_found; - } else { + if (!ini->ism_dev[0]) { + smc_find_ism_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini); goto not_found; }
+ smc_ism_get_system_eid(&eid); + if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, + smcd_v2_ext->system_eid, eid)) + goto not_found; + /* separate - outside the smcd_dev_list.lock */ smcd_version = ini->smcd_version; for (i = 0; i < matches; i++) { @@@ -1719,7 -1579,6 +1719,7 @@@ } /* no V2 ISM device could be initialized */ ini->smcd_version = smcd_version; /* restore original value */ + ini->negotiated_eid[0] = 0;
not_found: ini->smcd_version &= ~SMC_V2; @@@ -1749,7 -1608,6 +1749,7 @@@ static void smc_find_ism_v1_device_serv
not_found: smc_find_ism_store_rc(rc, ini); + ini->smcd_version &= ~SMC_V1; ini->ism_dev[0] = NULL; ini->is_smcd = false; } @@@ -1768,69 -1626,24 +1768,69 @@@ static int smc_listen_rdma_reg(struct s return 0; }
+static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc, + struct smc_clc_msg_proposal *pclc, + struct smc_init_info *ini) +{ + struct smc_clc_v2_extension *smc_v2_ext; + u8 smcr_version; + int rc; + + if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2)) + goto not_found; + + smc_v2_ext = smc_get_clc_v2_ext(pclc); + if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL)) + goto not_found; + + /* prepare RDMA check */ + memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN); + memcpy(ini->peer_gid, smc_v2_ext->roce, SMC_GID_SIZE); + memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN); + ini->check_smcrv2 = true; + ini->smcrv2.clc_sk = new_smc->clcsock->sk; + ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr; + ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce); + rc = smc_find_rdma_device(new_smc, ini); + if (rc) { + smc_find_ism_store_rc(rc, ini); + goto not_found; + } + if (!ini->smcrv2.uses_gateway) + memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN); + + smcr_version = ini->smcr_version; + ini->smcr_version = SMC_V2; + rc = smc_listen_rdma_init(new_smc, ini); + if (!rc) + rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local); + if (!rc) + return; + ini->smcr_version = smcr_version; + smc_find_ism_store_rc(rc, ini); + +not_found: + ini->smcr_version &= ~SMC_V2; + ini->check_smcrv2 = false; +} + static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc, struct smc_clc_msg_proposal *pclc, struct smc_init_info *ini) { int rc;
- if (!smcr_indicated(ini->smc_type_v1)) + if (!(ini->smcr_version & SMC_V1) || !smcr_indicated(ini->smc_type_v1)) return SMC_CLC_DECL_NOSMCDEV;
/* prepare RDMA check */ - ini->ib_lcl = &pclc->lcl; + memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN); + memcpy(ini->peer_gid, pclc->lcl.gid, SMC_GID_SIZE); + memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN); rc = smc_find_rdma_device(new_smc, ini); if (rc) { /* no RDMA device found */ - if (ini->smc_type_v1 == SMC_TYPE_B) - /* neither ISM nor RDMA device found */ - rc = SMC_CLC_DECL_NOSMCDEV; - return rc; + return SMC_CLC_DECL_NOSMCDEV; } rc = smc_listen_rdma_init(new_smc, ini); if (rc) @@@ -1843,60 -1656,51 +1843,60 @@@ static int smc_listen_find_device(struc struct smc_clc_msg_proposal *pclc, struct smc_init_info *ini) { - int rc; + int prfx_rc;
/* check for ISM device matching V2 proposed device */ smc_find_ism_v2_device_serv(new_smc, pclc, ini); if (ini->ism_dev[0]) return 0;
- if (!(ini->smcd_version & SMC_V1)) - return ini->rc ?: SMC_CLC_DECL_NOSMCD2DEV; - - /* check for matching IP prefix and subnet length */ - rc = smc_listen_prfx_check(new_smc, pclc); - if (rc) - return ini->rc ?: rc; + /* check for matching IP prefix and subnet length (V1) */ + prfx_rc = smc_listen_prfx_check(new_smc, pclc); + if (prfx_rc) + smc_find_ism_store_rc(prfx_rc, ini);
/* get vlan id from IP device */ if (smc_vlan_by_tcpsk(new_smc->clcsock, ini)) return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
/* check for ISM device matching V1 proposed device */ - smc_find_ism_v1_device_serv(new_smc, pclc, ini); + if (!prfx_rc) + smc_find_ism_v1_device_serv(new_smc, pclc, ini); if (ini->ism_dev[0]) return 0;
- if (pclc->hdr.typev1 == SMC_TYPE_D) + if (!smcr_indicated(pclc->hdr.typev1) && + !smcr_indicated(pclc->hdr.typev2)) /* skip RDMA and decline */ return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
- /* check if RDMA is available */ - rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini); - smc_find_ism_store_rc(rc, ini); + /* check if RDMA V2 is available */ + smc_find_rdma_v2_device_serv(new_smc, pclc, ini); + if (ini->smcrv2.ib_dev_v2) + return 0;
- return (!rc) ? 0 : ini->rc; + /* check if RDMA V1 is available */ + if (!prfx_rc) { + int rc; + + rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini); + smc_find_ism_store_rc(rc, ini); + return (!rc) ? 0 : ini->rc; + } + return SMC_CLC_DECL_NOSMCDEV; }
/* listen worker: finish RDMA setup */ static int smc_listen_rdma_finish(struct smc_sock *new_smc, struct smc_clc_msg_accept_confirm *cclc, - bool local_first) + bool local_first, + struct smc_init_info *ini) { struct smc_link *link = new_smc->conn.lnk; int reason_code = 0;
if (local_first) - smc_link_save_peer_info(link, cclc); + smc_link_save_peer_info(link, cclc, ini);
if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc)) return SMC_CLC_DECL_ERR_RTOK; @@@ -1917,13 -1721,12 +1917,13 @@@ static void smc_listen_work(struct work { struct smc_sock *new_smc = container_of(work, struct smc_sock, smc_listen_work); - u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1; struct socket *newclcsock = new_smc->clcsock; struct smc_clc_msg_accept_confirm *cclc; struct smc_clc_msg_proposal_area *buf; struct smc_clc_msg_proposal *pclc; struct smc_init_info *ini = NULL; + u8 proposal_version = SMC_V1; + u8 accept_version; int rc = 0;
if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN) @@@ -1954,9 -1757,7 +1954,9 @@@ SMC_CLC_PROPOSAL, CLC_WAIT_TIME); if (rc) goto out_decl; - version = pclc->hdr.version == SMC_V1 ? SMC_V1 : version; + + if (pclc->hdr.version > SMC_V1) + proposal_version = SMC_V2;
/* IPSec connections opt out of SMC optimizations */ if (using_ipsec(new_smc)) { @@@ -1986,9 -1787,8 +1986,9 @@@ goto out_unlock;
/* send SMC Accept CLC message */ + accept_version = ini->is_smcd ? ini->smcd_version : ini->smcr_version; rc = smc_clc_send_accept(new_smc, ini->first_contact_local, - ini->smcd_version == SMC_V2 ? SMC_V2 : SMC_V1); + accept_version, ini->negotiated_eid); if (rc) goto out_unlock;
@@@ -2010,7 -1810,7 +2010,7 @@@ /* finish worker */ if (!ini->is_smcd) { rc = smc_listen_rdma_finish(new_smc, cclc, - ini->first_contact_local); + ini->first_contact_local, ini); if (rc) goto out_unlock; mutex_unlock(&smc_server_lgr_pending); @@@ -2024,7 -1824,7 +2024,7 @@@ out_unlock mutex_unlock(&smc_server_lgr_pending); out_decl: smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0, - version); + proposal_version); out_free: kfree(ini); kfree(buf); @@@ -2862,7 -2662,6 +2862,7 @@@ static void __exit smc_exit(void proto_unregister(&smc_proto); smc_pnet_exit(); smc_nl_exit(); + smc_clc_exit(); unregister_pernet_subsys(&smc_net_stat_ops); unregister_pernet_subsys(&smc_net_ops); rcu_barrier(); diff --combined net/smc/smc_llc.c index a9623c952007,f1d323439a2a..b102680296b8 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@@ -23,24 -23,16 +23,24 @@@
struct smc_llc_hdr { struct smc_wr_rx_hdr common; - u8 length; /* 44 */ -#if defined(__BIG_ENDIAN_BITFIELD) - u8 reserved:4, - add_link_rej_rsn:4; + union { + struct { + u8 length; /* 44 */ + #if defined(__BIG_ENDIAN_BITFIELD) + u8 reserved:4, + add_link_rej_rsn:4; #elif defined(__LITTLE_ENDIAN_BITFIELD) - u8 add_link_rej_rsn:4, - reserved:4; + u8 add_link_rej_rsn:4, + reserved:4; #endif + }; + u16 length_v2; /* 44 - 8192*/ + }; u8 flags; -}; +} __packed; /* format defined in + * IBM Shared Memory Communications Version 2 + * (https://www.ibm.com/support/pages/node/6326337) + */
#define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03
@@@ -84,32 -76,6 +84,32 @@@ struct smc_llc_msg_add_link_cont_rt __be64 rmb_vaddr_new; };
+struct smc_llc_msg_add_link_v2_ext { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 v2_direct : 1, + reserved : 7; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u8 reserved : 7, + v2_direct : 1; +#endif + u8 reserved2; + u8 client_target_gid[SMC_GID_SIZE]; + u8 reserved3[8]; + u16 num_rkeys; + struct smc_llc_msg_add_link_cont_rt rt[]; +} __packed; /* format defined in + * IBM Shared Memory Communications Version 2 + * (https://www.ibm.com/support/pages/node/6326337) + */ + +struct smc_llc_msg_req_add_link_v2 { + struct smc_llc_hdr hd; + u8 reserved[20]; + u8 gid_cnt; + u8 reserved2[3]; + u8 gid[][SMC_GID_SIZE]; +}; + #define SMC_LLC_RKEYS_PER_CONT_MSG 2
struct smc_llc_msg_add_link_cont { /* type 0x03 */ @@@ -148,8 -114,7 +148,8 @@@ struct smc_rmb_rtoken __be64 rmb_vaddr; } __packed; /* format defined in RFC7609 */
-#define SMC_LLC_RKEYS_PER_MSG 3 +#define SMC_LLC_RKEYS_PER_MSG 3 +#define SMC_LLC_RKEYS_PER_MSG_V2 255
struct smc_llc_msg_confirm_rkey { /* type 0x06 */ struct smc_llc_hdr hd; @@@ -170,18 -135,9 +170,18 @@@ struct smc_llc_msg_delete_rkey { /* typ u8 reserved2[4]; };
+struct smc_llc_msg_delete_rkey_v2 { /* type 0x29 */ + struct smc_llc_hdr hd; + u8 num_rkeys; + u8 num_inval_rkeys; + u8 reserved[2]; + __be32 rkey[]; +}; + union smc_llc_msg { struct smc_llc_msg_confirm_link confirm_link; struct smc_llc_msg_add_link add_link; + struct smc_llc_msg_req_add_link_v2 req_add_link; struct smc_llc_msg_add_link_cont add_link_cont; struct smc_llc_msg_del_link delete_link;
@@@ -233,7 -189,7 +233,7 @@@ static inline void smc_llc_flow_qentry_ static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type, struct smc_llc_qentry *qentry) { - u8 msg_type = qentry->msg.raw.hdr.common.type; + u8 msg_type = qentry->msg.raw.hdr.common.llc_type;
if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) && flow_type != msg_type && !lgr->delayed_event) { @@@ -263,7 -219,7 +263,7 @@@ static bool smc_llc_flow_start(struct s spin_unlock_bh(&lgr->llc_flow_lock); return false; } - switch (qentry->msg.raw.hdr.common.type) { + switch (qentry->msg.raw.hdr.common.llc_type) { case SMC_LLC_ADD_LINK: flow->type = SMC_LLC_FLOW_ADD_LINK; break; @@@ -350,7 -306,7 +350,7 @@@ struct smc_llc_qentry *smc_llc_wait(str smc_llc_flow_qentry_del(flow); goto out; } - rcv_msg = flow->qentry->msg.raw.hdr.common.type; + rcv_msg = flow->qentry->msg.raw.hdr.common.llc_type; if (exp_msg && rcv_msg != exp_msg) { if (exp_msg == SMC_LLC_ADD_LINK && rcv_msg == SMC_LLC_DELETE_LINK) { @@@ -418,30 -374,6 +418,30 @@@ static int smc_llc_add_pending_send(str return 0; }
+static int smc_llc_add_pending_send_v2(struct smc_link *link, + struct smc_wr_v2_buf **wr_buf, + struct smc_wr_tx_pend_priv **pend) +{ + int rc; + + rc = smc_wr_tx_get_v2_slot(link, smc_llc_tx_handler, wr_buf, pend); + if (rc < 0) + return rc; + return 0; +} + +static void smc_llc_init_msg_hdr(struct smc_llc_hdr *hdr, + struct smc_link_group *lgr, size_t len) +{ + if (lgr->smc_version == SMC_V2) { + hdr->common.llc_version = SMC_V2; + hdr->length_v2 = len; + } else { + hdr->common.llc_version = 0; + hdr->length = len; + } +} + /* high-level API to send LLC confirm link */ int smc_llc_send_confirm_link(struct smc_link *link, enum smc_llc_reqresp reqresp) @@@ -458,8 -390,8 +458,8 @@@ goto put_out; confllc = (struct smc_llc_msg_confirm_link *)wr_buf; memset(confllc, 0, sizeof(*confllc)); - confllc->hd.common.type = SMC_LLC_CONFIRM_LINK; - confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link); + confllc->hd.common.llc_type = SMC_LLC_CONFIRM_LINK; + smc_llc_init_msg_hdr(&confllc->hd, link->lgr, sizeof(*confllc)); confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC; if (reqresp == SMC_LLC_RESP) confllc->hd.flags |= SMC_LLC_FLAG_RESP; @@@ -494,8 -426,8 +494,8 @@@ static int smc_llc_send_confirm_rkey(st goto put_out; rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf; memset(rkeyllc, 0, sizeof(*rkeyllc)); - rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY; - rkeyllc->hd.length = sizeof(struct smc_llc_msg_confirm_rkey); + rkeyllc->hd.common.llc_type = SMC_LLC_CONFIRM_RKEY; + smc_llc_init_msg_hdr(&rkeyllc->hd, send_link->lgr, sizeof(*rkeyllc));
rtok_ix = 1; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { @@@ -539,8 -471,8 +539,8 @@@ static int smc_llc_send_delete_rkey(str goto put_out; rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf; memset(rkeyllc, 0, sizeof(*rkeyllc)); - rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY; - rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey); + rkeyllc->hd.common.llc_type = SMC_LLC_DELETE_RKEY; + smc_llc_init_msg_hdr(&rkeyllc->hd, link->lgr, sizeof(*rkeyllc)); rkeyllc->num_rkeys = 1; rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey); /* send llc message */ @@@ -550,116 -482,26 +550,116 @@@ put_out return rc; }
+/* return first buffer from any of the next buf lists */ +static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr, + int *buf_lst) +{ + struct smc_buf_desc *buf_pos; + + while (*buf_lst < SMC_RMBE_SIZES) { + buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst], + struct smc_buf_desc, list); + if (buf_pos) + return buf_pos; + (*buf_lst)++; + } + return NULL; +} + +/* return next rmb from buffer lists */ +static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr, + int *buf_lst, + struct smc_buf_desc *buf_pos) +{ + struct smc_buf_desc *buf_next; + + if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) { + (*buf_lst)++; + return _smc_llc_get_next_rmb(lgr, buf_lst); + } + buf_next = list_next_entry(buf_pos, list); + return buf_next; +} + +static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr, + int *buf_lst) +{ + *buf_lst = 0; + return smc_llc_get_next_rmb(lgr, buf_lst, NULL); +} + +static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext, + struct smc_link *link, struct smc_link *link_new) +{ + struct smc_link_group *lgr = link->lgr; + struct smc_buf_desc *buf_pos; + int prim_lnk_idx, lnk_idx, i; + struct smc_buf_desc *rmb; + int len = sizeof(*ext); + int buf_lst; + + ext->v2_direct = !lgr->uses_gateway; + memcpy(ext->client_target_gid, link_new->gid, SMC_GID_SIZE); + + prim_lnk_idx = link->link_idx; + lnk_idx = link_new->link_idx; + mutex_lock(&lgr->rmbs_lock); + ext->num_rkeys = lgr->conns_num; + if (!ext->num_rkeys) + goto out; + buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); + for (i = 0; i < ext->num_rkeys; i++) { + if (!buf_pos) + break; + rmb = buf_pos; + ext->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey); + ext->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey); + ext->rt[i].rmb_vaddr_new = + cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl)); + buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); + while (buf_pos && !(buf_pos)->used) + buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); + } + len += i * sizeof(ext->rt[0]); +out: + mutex_unlock(&lgr->rmbs_lock); + return len; +} + /* send ADD LINK request or response */ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[], struct smc_link *link_new, enum smc_llc_reqresp reqresp) { + struct smc_llc_msg_add_link_v2_ext *ext = NULL; struct smc_llc_msg_add_link *addllc; struct smc_wr_tx_pend_priv *pend; - struct smc_wr_buf *wr_buf; + int len = sizeof(*addllc); int rc;
if (!smc_wr_tx_link_hold(link)) return -ENOLINK; - rc = smc_llc_add_pending_send(link, &wr_buf, &pend); - if (rc) - goto put_out; - addllc = (struct smc_llc_msg_add_link *)wr_buf; + if (link->lgr->smc_version == SMC_V2) { + struct smc_wr_v2_buf *wr_buf; + + rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend); + if (rc) + goto put_out; + addllc = (struct smc_llc_msg_add_link *)wr_buf; + ext = (struct smc_llc_msg_add_link_v2_ext *) + &wr_buf->raw[sizeof(*addllc)]; + memset(ext, 0, SMC_WR_TX_SIZE); + } else { + struct smc_wr_buf *wr_buf; + + rc = smc_llc_add_pending_send(link, &wr_buf, &pend); + if (rc) + goto put_out; + addllc = (struct smc_llc_msg_add_link *)wr_buf; + }
memset(addllc, 0, sizeof(*addllc)); - addllc->hd.common.type = SMC_LLC_ADD_LINK; - addllc->hd.length = sizeof(struct smc_llc_msg_add_link); + addllc->hd.common.llc_type = SMC_LLC_ADD_LINK; if (reqresp == SMC_LLC_RESP) addllc->hd.flags |= SMC_LLC_FLAG_RESP; memcpy(addllc->sender_mac, mac, ETH_ALEN); @@@ -674,14 -516,8 +674,14 @@@ addllc->qp_mtu = min(link_new->path_mtu, link_new->peer_mtu); } + if (ext && link_new) + len += smc_llc_fill_ext_v2(ext, link, link_new); + smc_llc_init_msg_hdr(&addllc->hd, link->lgr, len); /* send llc message */ - rc = smc_wr_tx_send(link, pend); + if (link->lgr->smc_version == SMC_V2) + rc = smc_wr_tx_v2_send(link, pend, len); + else + rc = smc_wr_tx_send(link, pend); put_out: smc_wr_tx_link_put(link); return rc; @@@ -705,8 -541,8 +705,8 @@@ int smc_llc_send_delete_link(struct smc delllc = (struct smc_llc_msg_del_link *)wr_buf;
memset(delllc, 0, sizeof(*delllc)); - delllc->hd.common.type = SMC_LLC_DELETE_LINK; - delllc->hd.length = sizeof(struct smc_llc_msg_del_link); + delllc->hd.common.llc_type = SMC_LLC_DELETE_LINK; + smc_llc_init_msg_hdr(&delllc->hd, link->lgr, sizeof(*delllc)); if (reqresp == SMC_LLC_RESP) delllc->hd.flags |= SMC_LLC_FLAG_RESP; if (orderly) @@@ -738,8 -574,8 +738,8 @@@ static int smc_llc_send_test_link(struc goto put_out; testllc = (struct smc_llc_msg_test_link *)wr_buf; memset(testllc, 0, sizeof(*testllc)); - testllc->hd.common.type = SMC_LLC_TEST_LINK; - testllc->hd.length = sizeof(struct smc_llc_msg_test_link); + testllc->hd.common.llc_type = SMC_LLC_TEST_LINK; + smc_llc_init_msg_hdr(&testllc->hd, link->lgr, sizeof(*testllc)); memcpy(testllc->user_data, user_data, sizeof(testllc->user_data)); /* send llc message */ rc = smc_wr_tx_send(link, pend); @@@ -815,6 -651,44 +815,6 @@@ static int smc_llc_alloc_alt_link(struc return -EMLINK; }
-/* return first buffer from any of the next buf lists */ -static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr, - int *buf_lst) -{ - struct smc_buf_desc *buf_pos; - - while (*buf_lst < SMC_RMBE_SIZES) { - buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst], - struct smc_buf_desc, list); - if (buf_pos) - return buf_pos; - (*buf_lst)++; - } - return NULL; -} - -/* return next rmb from buffer lists */ -static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr, - int *buf_lst, - struct smc_buf_desc *buf_pos) -{ - struct smc_buf_desc *buf_next; - - if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) { - (*buf_lst)++; - return _smc_llc_get_next_rmb(lgr, buf_lst); - } - buf_next = list_next_entry(buf_pos, list); - return buf_next; -} - -static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr, - int *buf_lst) -{ - *buf_lst = 0; - return smc_llc_get_next_rmb(lgr, buf_lst, NULL); -} - /* send one add_link_continue msg */ static int smc_llc_add_link_cont(struct smc_link *link, struct smc_link *link_new, u8 *num_rkeys_todo, @@@ -860,7 -734,7 +860,7 @@@ while (*buf_pos && !(*buf_pos)->used) *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); } - addc_llc->hd.common.type = SMC_LLC_ADD_LINK_CONT; + addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT; addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont); if (lgr->role == SMC_CLNT) addc_llc->hd.flags |= SMC_LLC_FLAG_RESP; @@@ -919,8 -793,6 +919,8 @@@ static int smc_llc_cli_add_link_reject( qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ; qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH; + smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr, + sizeof(qentry->msg)); return smc_llc_send_message(qentry->link, &qentry->msg); }
@@@ -941,7 -813,7 +941,7 @@@ static int smc_llc_cli_conf_link(struc SMC_LLC_DEL_LOST_PATH); return -ENOLINK; } - if (qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) { + if (qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) { /* received DELETE_LINK instead */ qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; smc_llc_send_message(link, &qentry->msg); @@@ -982,26 -854,6 +982,26 @@@ return 0; }
+static void smc_llc_save_add_link_rkeys(struct smc_link *link, + struct smc_link *link_new) +{ + struct smc_llc_msg_add_link_v2_ext *ext; + struct smc_link_group *lgr = link->lgr; + int max, i; + + ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 + + SMC_WR_TX_SIZE); + max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2); + mutex_lock(&lgr->rmbs_lock); + for (i = 0; i < max; i++) { + smc_rtoken_set(lgr, link->link_idx, link_new->link_idx, + ext->rt[i].rmb_key, + ext->rt[i].rmb_vaddr_new, + ext->rt[i].rmb_key_new); + } + mutex_unlock(&lgr->rmbs_lock); +} + static void smc_llc_save_add_link_info(struct smc_link *link, struct smc_llc_msg_add_link *add_llc) { @@@ -1018,47 -870,31 +1018,47 @@@ int smc_llc_cli_add_link(struct smc_lin struct smc_llc_msg_add_link *llc = &qentry->msg.add_link; enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; struct smc_link_group *lgr = smc_get_lgr(link); + struct smc_init_info *ini = NULL; struct smc_link *lnk_new = NULL; - struct smc_init_info ini; int lnk_idx, rc = 0;
if (!llc->qp_mtu) goto out_reject;
- ini.vlan_id = lgr->vlan_id; - smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); + ini = kzalloc(sizeof(*ini), GFP_KERNEL); + if (!ini) { + rc = -ENOMEM; + goto out_reject; + } + + ini->vlan_id = lgr->vlan_id; + if (lgr->smc_version == SMC_V2) { + ini->check_smcrv2 = true; + ini->smcrv2.saddr = lgr->saddr; + ini->smcrv2.daddr = smc_ib_gid_to_ipv4(llc->sender_gid); + } + smc_pnet_find_alt_roce(lgr, ini, link->smcibdev); if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && - !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN)) { - if (!ini.ib_dev) + (lgr->smc_version == SMC_V2 || + !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN))) { + if (!ini->ib_dev && !ini->smcrv2.ib_dev_v2) goto out_reject; lgr_new_t = SMC_LGR_ASYMMETRIC_PEER; } - if (!ini.ib_dev) { + if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) { lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; - ini.ib_dev = link->smcibdev; - ini.ib_port = link->ibport; + ini->smcrv2.ib_dev_v2 = link->smcibdev; + ini->smcrv2.ib_port_v2 = link->ibport; + } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) { + lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; + ini->ib_dev = link->smcibdev; + ini->ib_port = link->ibport; } lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t); if (lnk_idx < 0) goto out_reject; lnk_new = &lgr->lnk[lnk_idx]; - rc = smcr_link_init(lgr, lnk_new, lnk_idx, &ini); + rc = smcr_link_init(lgr, lnk_new, lnk_idx, ini); if (rc) goto out_reject; smc_llc_save_add_link_info(lnk_new, llc); @@@ -1074,20 -910,16 +1074,20 @@@ goto out_clear_lnk;
rc = smc_llc_send_add_link(link, - lnk_new->smcibdev->mac[ini.ib_port - 1], + lnk_new->smcibdev->mac[lnk_new->ibport - 1], lnk_new->gid, lnk_new, SMC_LLC_RESP); if (rc) goto out_clear_lnk; - rc = smc_llc_cli_rkey_exchange(link, lnk_new); - if (rc) { - rc = 0; - goto out_clear_lnk; + if (lgr->smc_version == SMC_V2) { + smc_llc_save_add_link_rkeys(link, lnk_new); + } else { + rc = smc_llc_cli_rkey_exchange(link, lnk_new); + if (rc) { + rc = 0; + goto out_clear_lnk; + } } - rc = smc_llc_cli_conf_link(link, &ini, lnk_new, lgr_new_t); + rc = smc_llc_cli_conf_link(link, ini, lnk_new, lgr_new_t); if (!rc) goto out; out_clear_lnk: @@@ -1096,78 -928,29 +1096,78 @@@ out_reject: smc_llc_cli_add_link_reject(qentry); out: + kfree(ini); kfree(qentry); return rc; }
+static void smc_llc_send_request_add_link(struct smc_link *link) +{ + struct smc_llc_msg_req_add_link_v2 *llc; + struct smc_wr_tx_pend_priv *pend; + struct smc_wr_v2_buf *wr_buf; + struct smc_gidlist gidlist; + int rc, len, i; + + if (!smc_wr_tx_link_hold(link)) + return; + if (link->lgr->type == SMC_LGR_SYMMETRIC || + link->lgr->type == SMC_LGR_ASYMMETRIC_PEER) + goto put_out; + + smc_fill_gid_list(link->lgr, &gidlist, link->smcibdev, link->gid); + if (gidlist.len <= 1) + goto put_out; + + rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend); + if (rc) + goto put_out; + llc = (struct smc_llc_msg_req_add_link_v2 *)wr_buf; + memset(llc, 0, SMC_WR_TX_SIZE); + + llc->hd.common.llc_type = SMC_LLC_REQ_ADD_LINK; + for (i = 0; i < gidlist.len; i++) + memcpy(llc->gid[i], gidlist.list[i], sizeof(gidlist.list[0])); + llc->gid_cnt = gidlist.len; + len = sizeof(*llc) + (gidlist.len * sizeof(gidlist.list[0])); + smc_llc_init_msg_hdr(&llc->hd, link->lgr, len); + rc = smc_wr_tx_v2_send(link, pend, len); + if (!rc) + /* set REQ_ADD_LINK flow and wait for response from peer */ + link->lgr->llc_flow_lcl.type = SMC_LLC_FLOW_REQ_ADD_LINK; +put_out: + smc_wr_tx_link_put(link); +} + /* as an SMC client, invite server to start the add_link processing */ static void smc_llc_cli_add_link_invite(struct smc_link *link, struct smc_llc_qentry *qentry) { struct smc_link_group *lgr = smc_get_lgr(link); - struct smc_init_info ini; + struct smc_init_info *ini = NULL; + + if (lgr->smc_version == SMC_V2) { + smc_llc_send_request_add_link(link); + goto out; + }
if (lgr->type == SMC_LGR_SYMMETRIC || lgr->type == SMC_LGR_ASYMMETRIC_PEER) goto out;
- ini.vlan_id = lgr->vlan_id; - smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); - if (!ini.ib_dev) + ini = kzalloc(sizeof(*ini), GFP_KERNEL); + if (!ini) + goto out; + + ini->vlan_id = lgr->vlan_id; + smc_pnet_find_alt_roce(lgr, ini, link->smcibdev); + if (!ini->ib_dev) goto out;
- smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1], - ini.ib_gid, NULL, SMC_LLC_REQ); + smc_llc_send_add_link(link, ini->ib_dev->mac[ini->ib_port - 1], + ini->ib_gid, NULL, SMC_LLC_REQ); out: + kfree(ini); kfree(qentry); }
@@@ -1183,7 -966,7 +1183,7 @@@ static bool smc_llc_is_empty_llc_messag
static bool smc_llc_is_local_add_link(union smc_llc_msg *llc) { - if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK && + if (llc->raw.hdr.common.llc_type == SMC_LLC_ADD_LINK && smc_llc_is_empty_llc_message(llc)) return true; return false; @@@ -1350,7 -1133,7 +1350,7 @@@ static int smc_llc_srv_conf_link(struc /* receive CONFIRM LINK response over the RoCE fabric */ qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0); if (!qentry || - qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) { + qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) { /* send DELETE LINK */ smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, false, SMC_LLC_DEL_LOST_PATH); @@@ -1369,80 -1152,37 +1369,80 @@@ return 0; }
-int smc_llc_srv_add_link(struct smc_link *link) +static void smc_llc_send_req_add_link_response(struct smc_llc_qentry *qentry) +{ + qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; + smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr, + sizeof(qentry->msg)); + memset(&qentry->msg.raw.data, 0, sizeof(qentry->msg.raw.data)); + smc_llc_send_message(qentry->link, &qentry->msg); +} + +int smc_llc_srv_add_link(struct smc_link *link, + struct smc_llc_qentry *req_qentry) { enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; struct smc_link_group *lgr = link->lgr; struct smc_llc_msg_add_link *add_llc; struct smc_llc_qentry *qentry = NULL; - struct smc_link *link_new; - struct smc_init_info ini; + bool send_req_add_link_resp = false; + struct smc_link *link_new = NULL; + struct smc_init_info *ini = NULL; int lnk_idx, rc = 0;
+ if (req_qentry && + req_qentry->msg.raw.hdr.common.llc_type == SMC_LLC_REQ_ADD_LINK) + send_req_add_link_resp = true; + + ini = kzalloc(sizeof(*ini), GFP_KERNEL); + if (!ini) { + rc = -ENOMEM; + goto out; + } + /* ignore client add link recommendation, start new flow */ - ini.vlan_id = lgr->vlan_id; - smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); - if (!ini.ib_dev) { + ini->vlan_id = lgr->vlan_id; + if (lgr->smc_version == SMC_V2) { + ini->check_smcrv2 = true; + ini->smcrv2.saddr = lgr->saddr; + if (send_req_add_link_resp) { + struct smc_llc_msg_req_add_link_v2 *req_add = + &req_qentry->msg.req_add_link; + + ini->smcrv2.daddr = smc_ib_gid_to_ipv4(req_add->gid[0]); + } + } + smc_pnet_find_alt_roce(lgr, ini, link->smcibdev); + if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) { + lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; + ini->smcrv2.ib_dev_v2 = link->smcibdev; + ini->smcrv2.ib_port_v2 = link->ibport; + } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) { lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; - ini.ib_dev = link->smcibdev; - ini.ib_port = link->ibport; + ini->ib_dev = link->smcibdev; + ini->ib_port = link->ibport; } lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t); - if (lnk_idx < 0) - return 0; + if (lnk_idx < 0) { + rc = 0; + goto out; + }
- rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, &ini); + rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, ini); if (rc) - return rc; + goto out; link_new = &lgr->lnk[lnk_idx]; + + rc = smcr_buf_map_lgr(link_new); + if (rc) + goto out_err; + rc = smc_llc_send_add_link(link, - link_new->smcibdev->mac[ini.ib_port - 1], + link_new->smcibdev->mac[link_new->ibport-1], link_new->gid, link_new, SMC_LLC_REQ); if (rc) goto out_err; + send_req_add_link_resp = false; /* receive ADD LINK response over the RoCE fabric */ qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK); if (!qentry) { @@@ -1457,59 -1197,48 +1457,59 @@@ } if (lgr->type == SMC_LGR_SINGLE && (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && - !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN))) { + (lgr->smc_version == SMC_V2 || + !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN)))) { lgr_new_t = SMC_LGR_ASYMMETRIC_PEER; } smc_llc_save_add_link_info(link_new, add_llc); smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
rc = smc_ib_ready_link(link_new); - if (rc) - goto out_err; - rc = smcr_buf_map_lgr(link_new); if (rc) goto out_err; rc = smcr_buf_reg_lgr(link_new); if (rc) goto out_err; - rc = smc_llc_srv_rkey_exchange(link, link_new); - if (rc) - goto out_err; + if (lgr->smc_version == SMC_V2) { + smc_llc_save_add_link_rkeys(link, link_new); + } else { + rc = smc_llc_srv_rkey_exchange(link, link_new); + if (rc) + goto out_err; + } rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t); if (rc) goto out_err; + kfree(ini); return 0; out_err: - link_new->state = SMC_LNK_INACTIVE; - smcr_link_clear(link_new, false); + if (link_new) { + link_new->state = SMC_LNK_INACTIVE; + smcr_link_clear(link_new, false); + } +out: + kfree(ini); + if (send_req_add_link_resp) + smc_llc_send_req_add_link_response(req_qentry); return rc; }
static void smc_llc_process_srv_add_link(struct smc_link_group *lgr) { struct smc_link *link = lgr->llc_flow_lcl.qentry->link; + struct smc_llc_qentry *qentry; int rc;
- smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); + qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
mutex_lock(&lgr->llc_conf_mutex); - rc = smc_llc_srv_add_link(link); + rc = smc_llc_srv_add_link(link, qentry); if (!rc && lgr->type == SMC_LGR_SYMMETRIC) { /* delete any asymmetric link */ smc_llc_delete_asym_link(lgr); } mutex_unlock(&lgr->llc_conf_mutex); + kfree(qentry); }
/* enqueue a local add_link req to trigger a new add_link flow */ @@@ -1517,8 -1246,8 +1517,8 @@@ void smc_llc_add_link_local(struct smc_ { struct smc_llc_msg_add_link add_llc = {};
- add_llc.hd.length = sizeof(add_llc); - add_llc.hd.common.type = SMC_LLC_ADD_LINK; + add_llc.hd.common.llc_type = SMC_LLC_ADD_LINK; + smc_llc_init_msg_hdr(&add_llc.hd, link->lgr, sizeof(add_llc)); /* no dev and port needed */ smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc); } @@@ -1540,8 -1269,7 +1540,8 @@@ static void smc_llc_add_link_work(struc else smc_llc_process_srv_add_link(lgr); out: - smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); + if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_REQ_ADD_LINK) + smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); }
/* enqueue a local del_link msg to trigger a new del_link flow, @@@ -1551,8 -1279,8 +1551,8 @@@ void smc_llc_srv_delete_link_local(stru { struct smc_llc_msg_del_link del_llc = {};
- del_llc.hd.length = sizeof(del_llc); - del_llc.hd.common.type = SMC_LLC_DELETE_LINK; + del_llc.hd.common.llc_type = SMC_LLC_DELETE_LINK; + smc_llc_init_msg_hdr(&del_llc.hd, link->lgr, sizeof(del_llc)); del_llc.link_num = del_link_id; del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH); del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; @@@ -1622,8 -1350,8 +1622,8 @@@ void smc_llc_send_link_delete_all(struc struct smc_llc_msg_del_link delllc = {}; int i;
- delllc.hd.common.type = SMC_LLC_DELETE_LINK; - delllc.hd.length = sizeof(delllc); + delllc.hd.common.llc_type = SMC_LLC_DELETE_LINK; + smc_llc_init_msg_hdr(&delllc.hd, lgr, sizeof(delllc)); if (ord) delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL; @@@ -1739,8 -1467,6 +1739,8 @@@ static void smc_llc_rmt_conf_rkey(struc link = qentry->link;
num_entries = llc->rtoken[0].num_rkeys; + if (num_entries > SMC_LLC_RKEYS_PER_MSG) + goto out_err; /* first rkey entry is for receiving link */ rk_idx = smc_rtoken_add(link, llc->rtoken[0].rmb_vaddr, @@@ -1759,7 -1485,6 +1759,7 @@@ out_err llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY; out: llc->hd.flags |= SMC_LLC_FLAG_RESP; + smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc)); smc_llc_send_message(link, &qentry->msg); smc_llc_flow_qentry_del(&lgr->llc_flow_rmt); } @@@ -1777,28 -1502,6 +1777,28 @@@ static void smc_llc_rmt_delete_rkey(str llc = &qentry->msg.delete_rkey; link = qentry->link;
+ if (lgr->smc_version == SMC_V2) { + struct smc_llc_msg_delete_rkey_v2 *llcv2; + + memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc)); + llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2; + llcv2->num_inval_rkeys = 0; + + max = min_t(u8, llcv2->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2); + for (i = 0; i < max; i++) { + if (smc_rtoken_delete(link, llcv2->rkey[i])) + llcv2->num_inval_rkeys++; + } + memset(&llc->rkey[0], 0, sizeof(llc->rkey)); + memset(&llc->reserved2, 0, sizeof(llc->reserved2)); + smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc)); + if (llcv2->num_inval_rkeys) { + llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; + llc->err_mask = llcv2->num_inval_rkeys; + } + goto finish; + } + max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX); for (i = 0; i < max; i++) { if (smc_rtoken_delete(link, llc->rkey[i])) @@@ -1808,7 -1511,6 +1808,7 @@@ llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; llc->err_mask = err_mask; } +finish: llc->hd.flags |= SMC_LLC_FLAG_RESP; smc_llc_send_message(link, &qentry->msg); smc_llc_flow_qentry_del(&lgr->llc_flow_rmt); @@@ -1844,7 -1546,7 +1844,7 @@@ static void smc_llc_event_handler(struc if (!smc_link_usable(link)) goto out;
- switch (llc->raw.hdr.common.type) { + switch (llc->raw.hdr.common.llc_type) { case SMC_LLC_TEST_LINK: llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP; smc_llc_send_message(link, llc); @@@ -1869,18 -1571,8 +1869,18 @@@ smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); wake_up(&lgr->llc_msg_waiter); - } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, - qentry)) { + return; + } + if (lgr->llc_flow_lcl.type == + SMC_LLC_FLOW_REQ_ADD_LINK) { + /* server started add_link processing */ + lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK; + smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, + qentry); + schedule_work(&lgr->llc_add_link_work); + return; + } + if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { schedule_work(&lgr->llc_add_link_work); } } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { @@@ -1928,23 -1620,6 +1928,23 @@@ smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt); } return; + case SMC_LLC_REQ_ADD_LINK: + /* handle response here, smc_llc_flow_stop() cannot be called + * in tasklet context + */ + if (lgr->role == SMC_CLNT && + lgr->llc_flow_lcl.type == SMC_LLC_FLOW_REQ_ADD_LINK && + (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP)) { + smc_llc_flow_stop(link->lgr, &lgr->llc_flow_lcl); + } else if (lgr->role == SMC_SERV) { + if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { + /* as smc server, handle client suggestion */ + lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK; + schedule_work(&lgr->llc_add_link_work); + } + return; + } + break; default: smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type); break; @@@ -1988,7 -1663,7 +1988,7 @@@ static void smc_llc_rx_response(struct { enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type; struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl; - u8 llc_type = qentry->msg.raw.hdr.common.type; + u8 llc_type = qentry->msg.raw.hdr.common.llc_type;
switch (llc_type) { case SMC_LLC_TEST_LINK: @@@ -2014,8 -1689,7 +2014,8 @@@ /* not used because max links is 3 */ break; default: - smc_llc_protocol_violation(link->lgr, llc_type); + smc_llc_protocol_violation(link->lgr, + qentry->msg.raw.hdr.common.type); break; } kfree(qentry); @@@ -2040,8 -1714,7 +2040,8 @@@ static void smc_llc_enqueue(struct smc_ memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
/* process responses immediately */ - if (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) { + if ((llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) && + llc->raw.hdr.common.llc_type != SMC_LLC_REQ_ADD_LINK) { smc_llc_rx_response(link, qentry); return; } @@@ -2061,13 -1734,8 +2061,13 @@@ static void smc_llc_rx_handler(struct i
if (wc->byte_len < sizeof(*llc)) return; /* short message */ - if (llc->raw.hdr.length != sizeof(*llc)) - return; /* invalid message */ + if (!llc->raw.hdr.common.llc_version) { + if (llc->raw.hdr.length != sizeof(*llc)) + return; /* invalid message */ + } else { + if (llc->raw.hdr.length_v2 < sizeof(*llc)) + return; /* invalid message */ + }
smc_llc_enqueue(link, llc); } @@@ -2154,7 -1822,7 +2154,7 @@@ void smc_llc_link_active(struct smc_lin link->smcibdev->ibdev->name, link->ibport); link->state = SMC_LNK_ACTIVE; if (link->lgr->llc_testlink_time) { - link->llc_testlink_time = link->lgr->llc_testlink_time * HZ; + link->llc_testlink_time = link->lgr->llc_testlink_time; schedule_delayed_work(&link->llc_testlink_wrk, link->llc_testlink_time); } @@@ -2286,35 -1954,6 +2286,35 @@@ static struct smc_wr_rx_handler smc_llc .handler = smc_llc_rx_handler, .type = SMC_LLC_DELETE_RKEY }, + /* V2 types */ + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_CONFIRM_LINK_V2 + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_TEST_LINK_V2 + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_ADD_LINK_V2 + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_DELETE_LINK_V2 + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_REQ_ADD_LINK_V2 + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_CONFIRM_RKEY_V2 + }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_DELETE_RKEY_V2 + }, { .handler = NULL, } diff --combined net/tls/tls_main.c index 278192ee133e,9ab81db8a654..acfba9f1ba72 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@@ -421,88 -421,6 +421,88 @@@ static int do_tls_getsockopt_conf(struc rc = -EFAULT; break; } + case TLS_CIPHER_AES_CCM_128: { + struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 = + container_of(crypto_info, + struct tls12_crypto_info_aes_ccm_128, info); + + if (len != sizeof(*aes_ccm_128)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(aes_ccm_128->iv, + cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE, + TLS_CIPHER_AES_CCM_128_IV_SIZE); + memcpy(aes_ccm_128->rec_seq, cctx->rec_seq, + TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_CHACHA20_POLY1305: { + struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 = + container_of(crypto_info, + struct tls12_crypto_info_chacha20_poly1305, + info); + + if (len != sizeof(*chacha20_poly1305)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(chacha20_poly1305->iv, + cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE, + TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE); + memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq, + TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, chacha20_poly1305, + sizeof(*chacha20_poly1305))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_SM4_GCM: { + struct tls12_crypto_info_sm4_gcm *sm4_gcm_info = + container_of(crypto_info, + struct tls12_crypto_info_sm4_gcm, info); + + if (len != sizeof(*sm4_gcm_info)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(sm4_gcm_info->iv, + cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE, + TLS_CIPHER_SM4_GCM_IV_SIZE); + memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq, + TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_SM4_CCM: { + struct tls12_crypto_info_sm4_ccm *sm4_ccm_info = + container_of(crypto_info, + struct tls12_crypto_info_sm4_ccm, info); + + if (len != sizeof(*sm4_ccm_info)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(sm4_ccm_info->iv, + cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE, + TLS_CIPHER_SM4_CCM_IV_SIZE); + memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq, + TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info))) + rc = -EFAULT; + break; + } default: rc = -EINVAL; } @@@ -606,12 -524,6 +606,12 @@@ static int do_tls_setsockopt_conf(struc case TLS_CIPHER_CHACHA20_POLY1305: optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305); break; + case TLS_CIPHER_SM4_GCM: + optsize = sizeof(struct tls12_crypto_info_sm4_gcm); + break; + case TLS_CIPHER_SM4_CCM: + optsize = sizeof(struct tls12_crypto_info_sm4_ccm); + break; default: rc = -EINVAL; goto err_crypto_info; @@@ -769,12 -681,12 +769,12 @@@ static void build_protos(struct proto p
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; - prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; + prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable; prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; - prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; + prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable; prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
#ifdef CONFIG_TLS_DEVICE diff --combined net/tls/tls_sw.c index 4147bb2e7057,1b08b877a890..d81564078557 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@@ -35,6 -35,7 +35,7 @@@ * SOFTWARE. */
+ #include <linux/bug.h> #include <linux/sched/signal.h> #include <linux/module.h> #include <linux/splice.h> @@@ -43,6 -44,14 +44,14 @@@ #include <net/strparser.h> #include <net/tls.h>
+ noinline void tls_err_abort(struct sock *sk, int err) + { + WARN_ON_ONCE(err >= 0); + /* sk->sk_err should contain a positive error code. */ + sk->sk_err = -err; + sk_error_report(sk); + } + static int __skb_nsg(struct sk_buff *skb, int offset, int len, unsigned int recursion_level) { @@@ -419,7 -428,7 +428,7 @@@ int tls_tx_records(struct sock *sk, in
tx_err: if (rc < 0 && rc != -EAGAIN) - tls_err_abort(sk, EBADMSG); + tls_err_abort(sk, -EBADMSG);
return rc; } @@@ -450,7 -459,7 +459,7 @@@ static void tls_encrypt_done(struct cry
/* If err is already set on socket, return the same code */ if (sk->sk_err) { - ctx->async_wait.err = sk->sk_err; + ctx->async_wait.err = -sk->sk_err; } else { ctx->async_wait.err = err; tls_err_abort(sk, err); @@@ -498,15 -507,9 +507,15 @@@ static int tls_do_encryption(struct soc int rc, iv_offset = 0;
/* For CCM based ciphers, first byte of IV is a constant */ - if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { + switch (prot->cipher_type) { + case TLS_CIPHER_AES_CCM_128: rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; iv_offset = 1; + break; + case TLS_CIPHER_SM4_CCM: + rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE; + iv_offset = 1; + break; }
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, @@@ -769,7 -772,7 +778,7 @@@ static int tls_push_record(struct sock msg_pl->sg.size + prot->tail_size, i); if (rc < 0) { if (rc != -EINPROGRESS) { - tls_err_abort(sk, EBADMSG); + tls_err_abort(sk, -EBADMSG); if (split) { tls_ctx->pending_open_record_frags = true; tls_merge_open_record(sk, rec, tmp, orig_end); @@@ -1463,16 -1466,10 +1472,16 @@@ static int decrypt_internal(struct soc aad = (u8 *)(sgout + n_sgout); iv = aad + prot->aad_size;
- /* For CCM based ciphers, first byte of nonce+iv is always '2' */ - if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { - iv[0] = 2; + /* For CCM based ciphers, first byte of nonce+iv is a constant */ + switch (prot->cipher_type) { + case TLS_CIPHER_AES_CCM_128: + iv[0] = TLS_AES_CCM_IV_B0_BYTE; + iv_offset = 1; + break; + case TLS_CIPHER_SM4_CCM: + iv[0] = TLS_SM4_CCM_IV_B0_BYTE; iv_offset = 1; + break; }
/* Prepare IV */ @@@ -1839,7 -1836,7 +1848,7 @@@ int tls_sw_recvmsg(struct sock *sk err = decrypt_skb_update(sk, skb, &msg->msg_iter, &chunk, &zc, async_capable); if (err < 0 && err != -EINPROGRESS) { - tls_err_abort(sk, EBADMSG); + tls_err_abort(sk, -EBADMSG); goto recv_end; }
@@@ -2019,7 -2016,7 +2028,7 @@@ ssize_t tls_sw_splice_read(struct socke }
if (err < 0) { - tls_err_abort(sk, EBADMSG); + tls_err_abort(sk, -EBADMSG); goto splice_read_end; } ctx->decrypted = 1; @@@ -2038,7 -2035,7 +2047,7 @@@ splice_read_end return copied ? : err; }
- bool tls_sw_stream_read(const struct sock *sk) + bool tls_sw_sock_is_readable(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); @@@ -2436,40 -2433,6 +2445,40 @@@ int tls_set_sw_offload(struct sock *sk cipher_name = "rfc7539(chacha20,poly1305)"; break; } + case TLS_CIPHER_SM4_GCM: { + struct tls12_crypto_info_sm4_gcm *sm4_gcm_info; + + sm4_gcm_info = (void *)crypto_info; + nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE; + tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE; + iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE; + iv = sm4_gcm_info->iv; + rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE; + rec_seq = sm4_gcm_info->rec_seq; + keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE; + key = sm4_gcm_info->key; + salt = sm4_gcm_info->salt; + salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE; + cipher_name = "gcm(sm4)"; + break; + } + case TLS_CIPHER_SM4_CCM: { + struct tls12_crypto_info_sm4_ccm *sm4_ccm_info; + + sm4_ccm_info = (void *)crypto_info; + nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE; + tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE; + iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE; + iv = sm4_ccm_info->iv; + rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE; + rec_seq = sm4_ccm_info->rec_seq; + keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE; + key = sm4_ccm_info->key; + salt = sm4_ccm_info->salt; + salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE; + cipher_name = "ccm(sm4)"; + break; + } default: rc = -EINVAL; goto free_priv; diff --combined net/wireless/core.c index 45be124a98f1,aaba847d79eb..eb297e1015e0 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@@ -524,6 -524,7 +524,7 @@@ use_default_name INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk); INIT_WORK(&rdev->mgmt_registrations_update_wk, cfg80211_mgmt_registrations_update_wk); + spin_lock_init(&rdev->mgmt_registrations_lock);
#ifdef CONFIG_CFG80211_DEFAULT_PS rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; @@@ -1080,16 -1081,6 +1081,16 @@@ void cfg80211_dev_free(struct cfg80211_ list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) cfg80211_put_bss(&rdev->wiphy, &scan->pub); mutex_destroy(&rdev->wiphy.mtx); + + /* + * The 'regd' can only be non-NULL if we never finished + * initializing the wiphy and thus never went through the + * unregister path - e.g. in failure scenarios. Thus, it + * cannot have been visible to anyone if non-NULL, so we + * can just free it here. + */ + kfree(rcu_dereference_raw(rdev->wiphy.regd)); + kfree(rdev); }
@@@ -1289,7 -1280,6 +1290,6 @@@ void cfg80211_init_wdev(struct wireless INIT_LIST_HEAD(&wdev->event_list); spin_lock_init(&wdev->event_lock); INIT_LIST_HEAD(&wdev->mgmt_registrations); - spin_lock_init(&wdev->mgmt_registrations_lock); INIT_LIST_HEAD(&wdev->pmsr_list); spin_lock_init(&wdev->pmsr_lock); INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk); diff --combined net/wireless/scan.c index e4f79b23f7f6,adc0d14cfd86..22e92be61938 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@@ -383,7 -383,7 +383,7 @@@ static bool is_bss(struct cfg80211_bss const u8 *ssid, size_t ssid_len) { const struct cfg80211_bss_ies *ies; - const u8 *ssidie; + const struct element *ssid_elem;
if (bssid && !ether_addr_equal(a->bssid, bssid)) return false; @@@ -394,12 -394,12 +394,12 @@@ ies = rcu_access_pointer(a->ies); if (!ies) return false; - ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len); - if (!ssidie) + ssid_elem = cfg80211_find_elem(WLAN_EID_SSID, ies->data, ies->len); + if (!ssid_elem) return false; - if (ssidie[1] != ssid_len) + if (ssid_elem->datalen != ssid_len) return false; - return memcmp(ssidie + 2, ssid, ssid_len) == 0; + return memcmp(ssid_elem->data, ssid, ssid_len) == 0; }
static int @@@ -418,14 -418,17 +418,17 @@@ cfg80211_add_nontrans_list(struct cfg80 } ssid_len = ssid[1]; ssid = ssid + 2; - rcu_read_unlock();
/* check if nontrans_bss is in the list */ list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) { - if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) + if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) { + rcu_read_unlock(); return 0; + } }
+ rcu_read_unlock(); + /* add to the list */ list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list); return 0; @@@ -1791,13 -1794,25 +1794,13 @@@ cfg80211_bss_update(struct cfg80211_reg return NULL; }
-/* - * Update RX channel information based on the available frame payload - * information. This is mainly for the 2.4 GHz band where frames can be received - * from neighboring channels and the Beacon frames use the DSSS Parameter Set - * element to indicate the current (transmitting) channel, but this might also - * be needed on other bands if RX frequency does not match with the actual - * operating channel of a BSS. - */ -static struct ieee80211_channel * -cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, - struct ieee80211_channel *channel, - enum nl80211_bss_scan_width scan_width) +int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, + enum nl80211_band band) { const u8 *tmp; - u32 freq; int channel_number = -1; - struct ieee80211_channel *alt_channel;
- if (channel->band == NL80211_BAND_S1GHZ) { + if (band == NL80211_BAND_S1GHZ) { tmp = cfg80211_find_ie(WLAN_EID_S1G_OPERATION, ie, ielen); if (tmp && tmp[1] >= sizeof(struct ieee80211_s1g_oper_ie)) { struct ieee80211_s1g_oper_ie *s1gop = (void *)(tmp + 2); @@@ -1818,29 -1833,6 +1821,29 @@@ } }
+ return channel_number; +} +EXPORT_SYMBOL(cfg80211_get_ies_channel_number); + +/* + * Update RX channel information based on the available frame payload + * information. This is mainly for the 2.4 GHz band where frames can be received + * from neighboring channels and the Beacon frames use the DSSS Parameter Set + * element to indicate the current (transmitting) channel, but this might also + * be needed on other bands if RX frequency does not match with the actual + * operating channel of a BSS. + */ +static struct ieee80211_channel * +cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, + struct ieee80211_channel *channel, + enum nl80211_bss_scan_width scan_width) +{ + u32 freq; + int channel_number; + struct ieee80211_channel *alt_channel; + + channel_number = cfg80211_get_ies_channel_number(ie, ielen, channel->band); + if (channel_number < 0) { /* No channel information in frame payload */ return channel; @@@ -2083,12 -2075,12 +2086,12 @@@ static void cfg80211_parse_mbssid_data(
if (!non_tx_data) return; - if (!cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) + if (!cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return; if (!wiphy->support_mbssid) return; if (wiphy->support_only_he_mbssid && - !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) + !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) return;
new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp); @@@ -2455,10 -2447,10 +2458,10 @@@ cfg80211_inform_bss_frame_data(struct w res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt, len, gfp); if (!res || !wiphy->support_mbssid || - !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) + !cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return res; if (wiphy->support_only_he_mbssid && - !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) + !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) return res;
non_tx_data.tx_bss = res; diff --combined net/wireless/util.c index 2991f711491a,a1a99a574984..5ff1f8726faf --- a/net/wireless/util.c +++ b/net/wireless/util.c @@@ -80,7 -80,6 +80,7 @@@ u32 ieee80211_channel_to_freq_khz(int c return 0; /* not supported */ switch (band) { case NL80211_BAND_2GHZ: + case NL80211_BAND_LC: if (chan == 14) return MHZ_TO_KHZ(2484); else if (chan < 14) @@@ -210,7 -209,6 +210,7 @@@ static void set_mandatory_flags_band(st WARN_ON(want); break; case NL80211_BAND_2GHZ: + case NL80211_BAND_LC: want = 7; for (i = 0; i < sband->n_bitrates; i++) { switch (sband->bitrates[i].bitrate) { @@@ -1030,14 -1028,14 +1030,14 @@@ int cfg80211_change_iface(struct cfg802 !(rdev->wiphy.interface_modes & (1 << ntype))) return -EOPNOTSUPP;
- /* if it's part of a bridge, reject changing type to station/ibss */ - if (netif_is_bridge_port(dev) && - (ntype == NL80211_IFTYPE_ADHOC || - ntype == NL80211_IFTYPE_STATION || - ntype == NL80211_IFTYPE_P2P_CLIENT)) - return -EBUSY; - if (ntype != otype) { + /* if it's part of a bridge, reject changing type to station/ibss */ + if (netif_is_bridge_port(dev) && + (ntype == NL80211_IFTYPE_ADHOC || + ntype == NL80211_IFTYPE_STATION || + ntype == NL80211_IFTYPE_P2P_CLIENT)) + return -EBUSY; + dev->ieee80211_ptr->use_4addr = false; dev->ieee80211_ptr->mesh_id_up_len = 0; wdev_lock(dev->ieee80211_ptr);
linux-merge@lists.open-mesh.org