From 12c3407883588983ad9f25c4d80e781b4e51093d Mon Sep 17 00:00:00 2001 From: Tal Cohen Date: Sun, 26 Jan 2025 15:51:14 +0200 Subject: [PATCH 1/2] net: hbl_cn: create common cfg lock function For upstream purposes and to have more robust code, a common cfg lock function shall be created. The function shall invoke the asic port's specific function. Signed-off-by: Tal Cohen --- .../net/ethernet/intel/hbl_cn/common/hbl_cn.c | 185 +++++++++--------- .../net/ethernet/intel/hbl_cn/common/hbl_cn.h | 3 + .../ethernet/intel/hbl_cn/common/hbl_cn_qp.c | 6 +- .../ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.c | 19 +- .../intel/hbl_cn/gaudi2/gaudi2_cn_debugfs.c | 8 +- 5 files changed, 110 insertions(+), 111 deletions(-) diff --git a/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.c b/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.c index ef86af2998ca6..87c23b5c4f175 100644 --- a/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.c +++ b/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.c @@ -516,7 +516,7 @@ static int hbl_cn_update_mtu(struct hbl_aux_dev *aux_dev, u32 port, u32 mtu) port_funcs = hdev->asic_funcs->port_funcs; mtu += HBL_EN_MAX_HEADERS_SZ; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); xa_for_each(&cn_port->qp_ids, qp_id, qp) { if (qp->mtu_type == MTU_FROM_NETDEV && qp->mtu != mtu) { rc = port_funcs->update_qp_mtu(cn_port, qp, mtu); @@ -527,7 +527,7 @@ static int hbl_cn_update_mtu(struct hbl_aux_dev *aux_dev, u32 port, u32 mtu) } } } - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -543,9 +543,9 @@ static int hbl_cn_qpc_write(struct hbl_aux_dev *aux_dev, u32 port, void *qpc, cn_port = &hdev->cn_ports[port]; port_funcs = hdev->asic_funcs->port_funcs; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); rc = port_funcs->qpc_write(cn_port, qpc, qpc_mask, qpn, is_req); - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -895,17 +895,15 @@ static char *hbl_cn_ib_qp_syndrome_to_str(struct hbl_aux_dev *aux_dev, u32 syndr static int hbl_cn_ib_verify_qp_id(struct hbl_aux_dev *aux_dev, u32 qp_id, u32 port) { - struct hbl_cn_asic_port_funcs *port_funcs; struct hbl_cn_port *cn_port; struct hbl_cn_device *hdev; struct hbl_cn_qp *qp; int rc = 0; hdev = hbl_cn_aux2nic(aux_dev); - port_funcs = hdev->asic_funcs->port_funcs; cn_port = &hdev->cn_ports[port]; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); qp = xa_load(&cn_port->qp_ids, qp_id); if (IS_ERR_OR_NULL(qp)) { @@ -923,7 +921,7 @@ static int hbl_cn_ib_verify_qp_id(struct hbl_aux_dev *aux_dev, u32 qp_id, u32 po } cfg_unlock: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -1668,6 +1666,33 @@ void hbl_cn_hard_reset_prepare(struct hbl_aux_dev *cn_aux_dev, bool fw_reset, bo __hbl_cn_hard_reset_prepare(hdev, fw_reset, in_teardown); } +void hbl_cn_cfg_lock(struct hbl_cn_port *cn_port) +{ + struct hbl_cn_device *hdev = cn_port->hdev; + struct hbl_cn_asic_port_funcs *port_funcs; + + port_funcs = hdev->asic_funcs->port_funcs; + port_funcs->cfg_lock(cn_port); +} + +void hbl_cn_cfg_unlock(struct hbl_cn_port *cn_port) +{ + struct hbl_cn_device *hdev = cn_port->hdev; + struct hbl_cn_asic_port_funcs *port_funcs; + + port_funcs = hdev->asic_funcs->port_funcs; + port_funcs->cfg_unlock(cn_port); +} + +bool hbl_cn_cfg_is_locked(struct hbl_cn_port *cn_port) +{ + struct hbl_cn_device *hdev = cn_port->hdev; + struct hbl_cn_asic_port_funcs *port_funcs; + + port_funcs = hdev->asic_funcs->port_funcs; + return port_funcs->cfg_is_locked(cn_port); +} + int hbl_cn_send_port_cpucp_status(struct hbl_aux_dev *aux_dev, u32 port, u8 cmd, u8 period) { struct hbl_cn_device *hdev = aux_dev->priv; @@ -2023,7 +2048,7 @@ static int alloc_qp(struct hbl_cn_device *hdev, struct hbl_cn_ctx *ctx, hbl_cn_get_qp_id_range(cn_port, &min_id, &max_id); - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); if (!cn_port->set_app_params) { dev_dbg(hdev->dev, @@ -2065,7 +2090,7 @@ static int alloc_qp(struct hbl_cn_device *hdev, struct hbl_cn_ctx *ctx, atomic_inc(&cn_port->num_of_allocated_qps); - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); out->conn_id = id; @@ -2074,7 +2099,7 @@ static int alloc_qp(struct hbl_cn_device *hdev, struct hbl_cn_ctx *ctx, qp_register_error: xa_erase(&qp->cn_port->qp_ids, qp->qp_id); error_exit: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); kfree(qp); return rc; } @@ -2189,8 +2214,6 @@ static int set_req_qp_ctx(struct hbl_cn_device *hdev, struct hbl_cni_req_conn_ct { struct hbl_cn_wq_array_properties *swq_arr_props, *rwq_arr_props; struct hbl_cn_encap_xarray_pdata *encap_data; - struct hbl_cn_asic_port_funcs *port_funcs; - struct hbl_cn_asic_funcs *asic_funcs; u32 wq_size, port, max_wq_size; struct hbl_cn_port *cn_port; struct hbl_cn_qp *qp; @@ -2211,8 +2234,6 @@ static int set_req_qp_ctx(struct hbl_cn_device *hdev, struct hbl_cni_req_conn_ct if (rc) return rc; - asic_funcs = hdev->asic_funcs; - port_funcs = asic_funcs->port_funcs; cn_port = &hdev->cn_ports[port]; wq_size = in->wq_size; @@ -2225,7 +2246,7 @@ static int set_req_qp_ctx(struct hbl_cn_device *hdev, struct hbl_cni_req_conn_ct return -EINVAL; } - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); qp = xa_load(&cn_port->qp_ids, in->conn_id); if (IS_ERR_OR_NULL(qp)) { @@ -2287,7 +2308,7 @@ static int set_req_qp_ctx(struct hbl_cn_device *hdev, struct hbl_cni_req_conn_ct if (rc) goto err_free_rwq; - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return 0; @@ -2318,7 +2339,7 @@ static int set_req_qp_ctx(struct hbl_cn_device *hdev, struct hbl_cni_req_conn_ct } } cfg_unlock: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -2326,8 +2347,6 @@ static int set_req_qp_ctx(struct hbl_cn_device *hdev, struct hbl_cni_req_conn_ct static int set_res_qp_ctx(struct hbl_cn_device *hdev, struct hbl_cni_res_conn_ctx_in *in) { struct hbl_cn_encap_xarray_pdata *encap_data; - struct hbl_cn_asic_port_funcs *port_funcs; - struct hbl_cn_asic_funcs *asic_funcs; struct hbl_cn_port *cn_port; struct hbl_cn_qp *qp; u32 port; @@ -2339,11 +2358,9 @@ static int set_res_qp_ctx(struct hbl_cn_device *hdev, struct hbl_cni_res_conn_ct if (rc) return rc; - asic_funcs = hdev->asic_funcs; - port_funcs = asic_funcs->port_funcs; cn_port = &hdev->cn_ports[port]; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); qp = xa_load(&cn_port->qp_ids, in->conn_id); if (IS_ERR_OR_NULL(qp)) { @@ -2389,12 +2406,12 @@ static int set_res_qp_ctx(struct hbl_cn_device *hdev, struct hbl_cni_res_conn_ct /* all is well, we are ready to receive */ rc = hbl_cn_qp_modify(cn_port, qp, CN_QP_STATE_RTR, in); - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; unlock_cfg: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -2443,7 +2460,7 @@ static void qp_destroy_work(struct work_struct *work) if (qp->curr_state == CN_QP_STATE_SQD) hbl_cn_qp_modify(cn_port, qp, CN_QP_STATE_SQD, &drain_attr); - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); hbl_cn_qp_modify(cn_port, qp, CN_QP_STATE_RESET, &rst_attr); @@ -2495,7 +2512,7 @@ static void qp_destroy_work(struct work_struct *work) * Lock is to avoid concurrent memory access from a new handle created before freeing * memory. */ - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); kfree(qp); } @@ -2560,7 +2577,7 @@ static int destroy_qp(struct hbl_cn_device *hdev, struct hbl_cni_destroy_conn_in port_funcs = asic_funcs->port_funcs; /* prevent reentrancy by locking the whole process of destroy_qp */ - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); qp = xa_load(&cn_port->qp_ids, in->conn_id); if (IS_ERR_OR_NULL(qp)) { @@ -2570,24 +2587,23 @@ static int destroy_qp(struct hbl_cn_device *hdev, struct hbl_cni_destroy_conn_in hbl_cn_qp_do_release(qp); - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return 0; out_err: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } static void hbl_cn_qps_stop(struct hbl_cn_port *cn_port) { - struct hbl_cn_asic_port_funcs *port_funcs = cn_port->hdev->asic_funcs->port_funcs; struct hbl_cn_qpc_drain_attr drain = { .wait_for_idle = false, }; unsigned long qp_id = 0; struct hbl_cn_qp *qp; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); xa_for_each(&cn_port->qp_ids, qp_id, qp) { if (IS_ERR_OR_NULL(qp)) @@ -2596,7 +2612,7 @@ static void hbl_cn_qps_stop(struct hbl_cn_port *cn_port) hbl_cn_qp_modify(cn_port, qp, CN_QP_STATE_QPD, (void *)&drain); } - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); } static void qps_stop(struct hbl_cn_device *hdev) @@ -2696,7 +2712,7 @@ static int user_wq_arr_set(struct hbl_cn_device *hdev, struct hbl_cni_user_wq_ar return -EINVAL; } - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); if (!cn_port->set_app_params) { dev_dbg(hdev->dev, @@ -2761,7 +2777,7 @@ static int user_wq_arr_set(struct hbl_cn_device *hdev, struct hbl_cni_user_wq_ar wq_arr_props->enabled = true; out: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -2800,14 +2816,12 @@ static int user_wq_arr_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_wq_ struct hbl_cn_ctx *ctx) { struct hbl_cn_wq_array_properties *wq_arr_props; - struct hbl_cn_asic_port_funcs *port_funcs; struct hbl_cn_properties *cn_props; struct hbl_cn_port *cn_port; u32 port, type; char *type_str; int rc; - port_funcs = hdev->asic_funcs->port_funcs; cn_props = &hdev->cn_props; type = in->type; @@ -2832,7 +2846,7 @@ static int user_wq_arr_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_wq_ wq_arr_props = &cn_port->wq_arr_props[type]; type_str = wq_arr_props->type_str; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); if (!wq_arr_props->enabled) { dev_dbg(hdev->dev, "%s WQ array is disabled, port %d\n", type_str, port); @@ -2855,7 +2869,7 @@ static int user_wq_arr_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_wq_ rc = __user_wq_arr_unset(ctx, cn_port, type); out: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -2894,7 +2908,7 @@ static int alloc_user_cq_id(struct hbl_cn_device *hdev, struct hbl_cni_alloc_use port_funcs->get_cq_id_range(cn_port, &min_id, &max_id); - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); if (!cn_port->set_app_params) { dev_dbg(hdev->dev, @@ -2915,7 +2929,7 @@ static int alloc_user_cq_id(struct hbl_cn_device *hdev, struct hbl_cni_alloc_use mutex_init(&user_cq->overrun_lock); - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); dev_dbg(hdev->dev, "Allocating CQ id %d in port %d", id, port); @@ -2924,7 +2938,7 @@ static int alloc_user_cq_id(struct hbl_cn_device *hdev, struct hbl_cni_alloc_use return 0; cfg_unlock: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); kfree(user_cq); return rc; @@ -2992,7 +3006,7 @@ static int __user_cq_set(struct hbl_cn_device *hdev, struct hbl_cni_user_cq_set_ return -EINVAL; } - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); /* Validate if user CQ is allocated. */ user_cq = xa_load(&cn_port->cq_ids, id); @@ -3018,7 +3032,7 @@ static int __user_cq_set(struct hbl_cn_device *hdev, struct hbl_cni_user_cq_set_ user_cq->state = USER_CQ_STATE_SET; out: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -3118,7 +3132,6 @@ static int user_cq_unset_locked(struct hbl_cn_user_cq *user_cq, bool warn_if_ali static int __user_cq_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_cq_unset_in_params *in) { - struct hbl_cn_asic_port_funcs *port_funcs = hdev->asic_funcs->port_funcs; struct hbl_cn_properties *cn_props = &hdev->cn_props; struct hbl_cn_user_cq *user_cq; struct hbl_cn_port *cn_port; @@ -3152,7 +3165,7 @@ static int __user_cq_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_cq_un return -EINVAL; } - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); /* Validate if user CQ is allocated. */ user_cq = xa_load(&cn_port->cq_ids, id); @@ -3164,7 +3177,7 @@ static int __user_cq_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_cq_un rc = user_cq_unset_locked(user_cq, false); out: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -3205,7 +3218,7 @@ static int user_set_app_params(struct hbl_cn_device *hdev, * will first obtain rtnl_lock and then will try to take a cfg_lock, hence a deadlock. */ rtnl_lock(); - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); rc = asic_funcs->user_set_app_params(hdev, in, &modify_wqe_checkers, ctx); if (rc) @@ -3223,7 +3236,7 @@ static int user_set_app_params(struct hbl_cn_device *hdev, cn_port->set_app_params = true; out: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); rtnl_unlock(); return rc; @@ -3234,13 +3247,10 @@ static int user_get_app_params(struct hbl_cn_device *hdev, struct hbl_cni_get_user_app_params_out *out) { struct hbl_cn_asic_funcs *asic_funcs = hdev->asic_funcs; - struct hbl_cn_asic_port_funcs *port_funcs; struct hbl_cn_port *cn_port; u32 port; int rc; - port_funcs = asic_funcs->port_funcs; - port = in->port; rc = hbl_cn_cmd_port_check(hdev, port, NIC_PORT_CHECK_OPEN | NIC_PORT_PRINT_ON_ERR); @@ -3249,9 +3259,9 @@ static int user_get_app_params(struct hbl_cn_device *hdev, cn_port = &hdev->cn_ports[port]; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); asic_funcs->user_get_app_params(hdev, in, out); - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return 0; } @@ -3322,7 +3332,6 @@ static int alloc_user_db_fifo(struct hbl_cn_device *hdev, struct hbl_cn_ctx *ctx struct hbl_cni_alloc_user_db_fifo_out *out) { struct hbl_cn_db_fifo_xarray_pdata *xa_pdata; - struct hbl_cn_asic_port_funcs *port_funcs; struct hbl_cn_port *cn_port; struct xa_limit id_limit; u32 min_id, max_id; @@ -3335,7 +3344,6 @@ static int alloc_user_db_fifo(struct hbl_cn_device *hdev, struct hbl_cn_ctx *ctx return rc; cn_port = &hdev->cn_ports[port]; - port_funcs = hdev->asic_funcs->port_funcs; get_user_db_fifo_id_range(cn_port, &min_id, &max_id, in->id_hint); @@ -3348,7 +3356,7 @@ static int alloc_user_db_fifo(struct hbl_cn_device *hdev, struct hbl_cn_ctx *ctx xa_pdata->state = DB_FIFO_STATE_ALLOC; xa_pdata->port = port; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); if (!cn_port->set_app_params) { dev_dbg(hdev->dev, @@ -3367,14 +3375,14 @@ static int alloc_user_db_fifo(struct hbl_cn_device *hdev, struct hbl_cn_ctx *ctx xa_pdata->id = id; - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); out->id = id; return 0; cfg_unlock: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); kfree(xa_pdata); return rc; } @@ -3480,7 +3488,7 @@ static int user_db_fifo_set(struct hbl_cn_device *hdev, struct hbl_cn_ctx *ctx, /* Get allocated ID private data. Having meta data associated with IDR also helps validate * that user do not trick kernel into configuring db fifo HW for an unallocated ID. */ - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); xa_pdata = xa_load(&cn_port->db_fifo_ids, id); if (!xa_pdata) { dev_dbg_ratelimited(hdev->dev, "DB FIFO ID %d is not allocated, port: %d\n", id, @@ -3566,7 +3574,7 @@ static int user_db_fifo_set(struct hbl_cn_device *hdev, struct hbl_cn_ctx *ctx, out->fifo_size = xa_pdata->fifo_size; out->fifo_bp_thresh = xa_pdata->fifo_size / 2; - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return 0; @@ -3576,7 +3584,7 @@ static int user_db_fifo_set(struct hbl_cn_device *hdev, struct hbl_cn_ctx *ctx, free_db_fifo: port_funcs->db_fifo_free(cn_port, xa_pdata->db_pool_addr, xa_pdata->fifo_size); cfg_unlock: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -3605,7 +3613,6 @@ static int __user_db_fifo_unset(struct hbl_cn_port *cn_port, static int user_db_fifo_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_db_fifo_unset_in *in) { struct hbl_cn_db_fifo_xarray_pdata *xa_pdata; - struct hbl_cn_asic_port_funcs *port_funcs; struct hbl_cn_port *cn_port; int rc; u32 id; @@ -3615,14 +3622,13 @@ static int user_db_fifo_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_db return rc; cn_port = &hdev->cn_ports[in->port]; - port_funcs = hdev->asic_funcs->port_funcs; id = in->id; rc = validate_db_fifo_ioctl(cn_port, id); if (rc) return rc; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); xa_pdata = xa_load(&cn_port->db_fifo_ids, id); if (!xa_pdata) { @@ -3634,7 +3640,7 @@ static int user_db_fifo_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_db rc = __user_db_fifo_unset(cn_port, xa_pdata); out: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -3667,7 +3673,7 @@ static int user_encap_alloc(struct hbl_cn_device *hdev, struct hbl_cni_user_enca xa_pdata->port = port; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); if (!cn_port->set_app_params) { dev_dbg(hdev->dev, @@ -3686,14 +3692,14 @@ static int user_encap_alloc(struct hbl_cn_device *hdev, struct hbl_cni_user_enca } xa_pdata->id = id; - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); out->id = id; return 0; cfg_unlock: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); kfree(xa_pdata); return rc; @@ -3773,7 +3779,7 @@ static int user_encap_set(struct hbl_cn_device *hdev, struct hbl_cni_user_encap_ return -EINVAL; } - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); xa_pdata = xa_load(&cn_port->encap_ids, id); if (!xa_pdata) { @@ -3829,7 +3835,7 @@ static int user_encap_set(struct hbl_cn_device *hdev, struct hbl_cni_user_encap_ if (rc) goto free_header; - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return 0; @@ -3837,7 +3843,7 @@ static int user_encap_set(struct hbl_cn_device *hdev, struct hbl_cni_user_encap_ if (in->encap_type != HBL_CNI_ENCAP_NONE) kfree(encap_header); cfg_unlock: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -3862,7 +3868,7 @@ static int user_encap_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_enca if (rc) return rc; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); xa_pdata = xa_load(&cn_port->encap_ids, id); if (!xa_pdata) { @@ -3882,7 +3888,7 @@ static int user_encap_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_enca kfree(xa_pdata); out: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -3918,7 +3924,7 @@ static int user_ccq_set(struct hbl_cn_device *hdev, struct hbl_cni_user_ccq_set_ cn_port = &hdev->cn_ports[port]; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); if (!cn_port->set_app_params) { dev_dbg(hdev->dev, @@ -3979,7 +3985,7 @@ static int user_ccq_set(struct hbl_cn_device *hdev, struct hbl_cni_user_ccq_set_ cn_port->ccq_enable = true; - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return 0; @@ -3988,7 +3994,7 @@ static int user_ccq_set(struct hbl_cn_device *hdev, struct hbl_cni_user_ccq_set_ free_ccq: hbl_cn_mem_destroy(hdev, ccq_mmap_handle); cfg_unlock: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -4036,7 +4042,6 @@ static int __user_ccq_unset(struct hbl_cn_device *hdev, struct hbl_cn_ctx *ctx, static int user_ccq_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_ccq_unset_in *in, struct hbl_cn_ctx *ctx) { - struct hbl_cn_asic_port_funcs *port_funcs = hdev->asic_funcs->port_funcs; struct hbl_cn_port *cn_port; u32 port; int rc; @@ -4049,7 +4054,7 @@ static int user_ccq_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_ccq_un cn_port = &hdev->cn_ports[port]; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); if (!cn_port->ccq_enable) { dev_dbg(hdev->dev, "Failed unsetting CCQ handler - it is already unset, port %u\n", @@ -4060,7 +4065,7 @@ static int user_ccq_unset(struct hbl_cn_device *hdev, struct hbl_cni_user_ccq_un rc = __user_ccq_unset(hdev, ctx, in->port); out: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return rc; } @@ -4268,7 +4273,6 @@ static int hbl_cn_ib_query_mem_handle(struct hbl_aux_dev *ib_aux_dev, u64 mem_ha static void qps_destroy(struct hbl_cn_device *hdev) { - struct hbl_cn_asic_port_funcs *port_funcs = hdev->asic_funcs->port_funcs; struct hbl_cn_port *cn_port; unsigned long qp_id = 0; struct hbl_cn_qp *qp; @@ -4282,7 +4286,7 @@ static void qps_destroy(struct hbl_cn_device *hdev) cn_port = &hdev->cn_ports[i]; /* protect against destroy_qp occurring in parallel */ - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); xa_for_each(&cn_port->qp_ids, qp_id, qp) { if (IS_ERR_OR_NULL(qp)) @@ -4291,7 +4295,7 @@ static void qps_destroy(struct hbl_cn_device *hdev) hbl_cn_qp_do_release(qp); } - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); } /* wait for the workers to complete */ @@ -4304,13 +4308,13 @@ static void qps_destroy(struct hbl_cn_device *hdev) cn_port = &hdev->cn_ports[i]; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); xa_for_each(&cn_port->qp_ids, qp_id, qp) dev_err_ratelimited(hdev->dev, "Port %d QP %ld is still alive\n", cn_port->port, qp_id); - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); } } @@ -4382,14 +4386,12 @@ static void ccqs_destroy(struct hbl_cn_ctx *ctx) static void user_db_fifos_destroy(struct hbl_cn_ctx *ctx) { struct hbl_cn_db_fifo_xarray_pdata *xa_pdata; - struct hbl_cn_asic_port_funcs *port_funcs; struct hbl_cn_port *cn_port; struct hbl_cn_device *hdev; unsigned long id; int i; hdev = ctx->hdev; - port_funcs = hdev->asic_funcs->port_funcs; for (i = 0; i < hdev->cn_props.max_num_of_ports; i++) { if (!(hdev->ports_mask & BIT(i))) @@ -4397,19 +4399,18 @@ static void user_db_fifos_destroy(struct hbl_cn_ctx *ctx) cn_port = &hdev->cn_ports[i]; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); xa_for_each(&cn_port->db_fifo_ids, id, xa_pdata) if (xa_pdata->asid == ctx->asid) __user_db_fifo_unset(cn_port, xa_pdata); - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); } } static void encap_ids_destroy(struct hbl_cn_device *hdev) { - struct hbl_cn_asic_port_funcs *port_funcs = hdev->asic_funcs->port_funcs; struct hbl_cn_asic_funcs *asic_funcs = hdev->asic_funcs; struct hbl_cn_encap_xarray_pdata *xa_pdata; struct hbl_cn_port *cn_port; @@ -4422,7 +4423,7 @@ static void encap_ids_destroy(struct hbl_cn_device *hdev) cn_port = &hdev->cn_ports[i]; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); xa_for_each(&cn_port->encap_ids, encap_id, xa_pdata) { asic_funcs->port_funcs->encap_unset(cn_port, encap_id, xa_pdata); @@ -4434,7 +4435,7 @@ static void encap_ids_destroy(struct hbl_cn_device *hdev) xa_erase(&cn_port->encap_ids, encap_id); } - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); } } diff --git a/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.h b/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.h index 3d1532bfd9b56..b2ba3ad642d76 100644 --- a/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.h +++ b/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.h @@ -1630,6 +1630,9 @@ void hbl_cn_eq_handler(struct hbl_cn_port *cn_port); int hbl_cn_alloc_ring(struct hbl_cn_device *hdev, struct hbl_cn_ring *ring, int elem_size, int count); void hbl_cn_free_ring(struct hbl_cn_device *hdev, struct hbl_cn_ring *ring); +void hbl_cn_cfg_lock(struct hbl_cn_port *cn_port); +void hbl_cn_cfg_unlock(struct hbl_cn_port *cn_port); +bool hbl_cn_cfg_is_locked(struct hbl_cn_port *cn_port); struct hbl_cn_user_cq *hbl_cn_user_cq_get(struct hbl_cn_port *cn_port, u8 cq_id); int hbl_cn_user_cq_put(struct hbl_cn_user_cq *user_cq); diff --git a/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn_qp.c b/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn_qp.c index 26ebdf4481934..e89e225c1a163 100644 --- a/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn_qp.c +++ b/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn_qp.c @@ -112,11 +112,11 @@ static inline int wait_for_qpc_idle(struct hbl_cn_port *cn_port, struct hbl_cn_q /* Release lock while we wait before retry. * Note, we can assert that we are already locked. */ - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); msleep(20); - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); } rc = port_funcs->qpc_query(cn_port, qp->qp_id, is_req, &qpc_attr); @@ -410,7 +410,7 @@ int hbl_cn_qp_modify(struct hbl_cn_port *cn_port, struct hbl_cn_qp *qp, /* only SQD->SQD transition can be executed without holding the configuration lock */ if (prev_state != CN_QP_STATE_SQD || new_state != CN_QP_STATE_SQD) { - if (!port_funcs->cfg_is_locked(cn_port)) { + if (!hbl_cn_cfg_is_locked(cn_port)) { dev_err(hdev->dev, "Configuration lock must be held while moving Port %u QP %u from state %s to %s\n", qp->port, qp->qp_id, cn_qp_state_2name(prev_state), diff --git a/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.c b/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.c index 91407a0f8a202..b6fc83184a756 100644 --- a/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.c +++ b/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.c @@ -1822,7 +1822,6 @@ static int gaudi2_cn_qpc_write_masked(struct hbl_cn_port *cn_port, const void *q bool gaudi2_handle_qp_error_retry(struct hbl_cn_port *cn_port, u32 qpn) { struct hbl_cn_device *hdev = cn_port->hdev; - struct hbl_cn_asic_port_funcs *port_funcs; struct gaudi2_qpc_requester req_qpc = {}; struct qpc_mask mask = {}; int port = cn_port->port; @@ -1832,12 +1831,11 @@ bool gaudi2_handle_qp_error_retry(struct hbl_cn_port *cn_port, u32 qpn) u8 timeout_max; u64 wq_delay; - port_funcs = hdev->asic_funcs->port_funcs; - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); qp = xa_load(&cn_port->qp_ids, qpn); if (!qp) { - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); dev_err(hdev->dev, "adaptive retry, port %d, QP: %d is null\n", port, qpn); @@ -1874,7 +1872,7 @@ bool gaudi2_handle_qp_error_retry(struct hbl_cn_port *cn_port, u32 qpn) } while (retry); if (!retry) { - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); dev_err(hdev->dev, "failed to clear QPC error port %d, %d\n", port, qpn); return false; @@ -1889,14 +1887,14 @@ bool gaudi2_handle_qp_error_retry(struct hbl_cn_port *cn_port, u32 qpn) queue_delayed_work(cn_port->qp_wq, &qp->adaptive_tmr_reset, msecs_to_jiffies(wq_delay / 1000)); - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return true; } qp->timeout_curr = qp->timeout_granularity - (NIC_ADAPTIVE_TIMEOUT_RANGE >> 1); - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); return false; } @@ -5312,7 +5310,6 @@ static void gaudi2_cn_set_port_status(struct hbl_cn_port *cn_port, bool up) static void gaudi2_cn_adaptive_tmr_reset(struct hbl_cn_qp *qp) { struct hbl_cn_port *cn_port = qp->cn_port; - struct hbl_cn_asic_port_funcs *port_funcs; struct gaudi2_qpc_requester req_qpc; struct hbl_cn_device *hdev; u64 retry_count; @@ -5322,9 +5319,7 @@ static void gaudi2_cn_adaptive_tmr_reset(struct hbl_cn_qp *qp) hdev = cn_port->hdev; user_gran = qp->timeout_granularity - NIC_ADAPTIVE_TIMEOUT_RANGE / 2; - port_funcs = hdev->asic_funcs->port_funcs; - - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); rc = gaudi2_cn_qpc_read(cn_port, &req_qpc, qp->qp_id, true); if (rc) @@ -5344,7 +5339,7 @@ static void gaudi2_cn_adaptive_tmr_reset(struct hbl_cn_qp *qp) } out: - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); } static int gaudi2_cn_send_cpucp_packet(struct hbl_cn_port *cn_port, enum cpucp_packet_id packet_id, diff --git a/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn_debugfs.c b/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn_debugfs.c index 3acd8b8b0f4a3..8c50cbdc62f4f 100644 --- a/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn_debugfs.c +++ b/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn_debugfs.c @@ -177,9 +177,9 @@ int gaudi2_cn_debugfs_qp_read(struct hbl_cn_device *hdev, struct hbl_cn_qp_info return -EPERM; } - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); rc = gaudi2_cn_qpc_read(cn_port, qpc, qpn, req); - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); if (rc) return rc; @@ -283,9 +283,9 @@ int gaudi2_cn_debugfs_wqe_read(struct hbl_cn_device *hdev, char *buf, size_t bsi return -EPERM; } - port_funcs->cfg_lock(cn_port); + hbl_cn_cfg_lock(cn_port); rc = gaudi2_cn_wqe_read(cn_port, wqe, qpn, wqe_idx, tx); - port_funcs->cfg_unlock(cn_port); + hbl_cn_cfg_unlock(cn_port); if (rc) goto exit; From 20d130baa47605fa8adddc5facc061b6edbf8af7 Mon Sep 17 00:00:00 2001 From: Tal Cohen Date: Mon, 27 Jan 2025 11:06:47 +0200 Subject: [PATCH 2/2] net: hbl_cn: use port common cfg lock instead of asic specific Replace the port's cfg lock from port specific into port's common code. This change is further to the common cfg lock function change. Signed-off-by: Tal Cohen --- .../net/ethernet/intel/hbl_cn/common/hbl_cn.c | 22 ++++--------- .../net/ethernet/intel/hbl_cn/common/hbl_cn.h | 3 ++ .../ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.c | 32 ++----------------- .../ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.h | 3 -- 4 files changed, 12 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.c b/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.c index 87c23b5c4f175..4a713bb2a8ea1 100644 --- a/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.c +++ b/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.c @@ -1667,30 +1667,20 @@ void hbl_cn_hard_reset_prepare(struct hbl_aux_dev *cn_aux_dev, bool fw_reset, bo } void hbl_cn_cfg_lock(struct hbl_cn_port *cn_port) + __acquires(&cn_port->cfg_lock) { - struct hbl_cn_device *hdev = cn_port->hdev; - struct hbl_cn_asic_port_funcs *port_funcs; - - port_funcs = hdev->asic_funcs->port_funcs; - port_funcs->cfg_lock(cn_port); + mutex_lock(&cn_port->cfg_lock); } void hbl_cn_cfg_unlock(struct hbl_cn_port *cn_port) + __releases(&cn_port->cfg_lock) { - struct hbl_cn_device *hdev = cn_port->hdev; - struct hbl_cn_asic_port_funcs *port_funcs; - - port_funcs = hdev->asic_funcs->port_funcs; - port_funcs->cfg_unlock(cn_port); + mutex_unlock(&cn_port->cfg_lock); } bool hbl_cn_cfg_is_locked(struct hbl_cn_port *cn_port) { - struct hbl_cn_device *hdev = cn_port->hdev; - struct hbl_cn_asic_port_funcs *port_funcs; - - port_funcs = hdev->asic_funcs->port_funcs; - return port_funcs->cfg_is_locked(cn_port); + return mutex_is_locked(&cn_port->cfg_lock); } int hbl_cn_send_port_cpucp_status(struct hbl_aux_dev *aux_dev, u32 port, u8 cmd, u8 period) @@ -4740,6 +4730,7 @@ static int cn_port_sw_init(struct hbl_cn_port *cn_port) mutex_init(&cn_port->control_lock); mutex_init(&cn_port->cnt_lock); + mutex_init(&cn_port->cfg_lock); xa_init_flags(&cn_port->qp_ids, XA_FLAGS_ALLOC); xa_init_flags(&cn_port->db_fifo_ids, XA_FLAGS_ALLOC); @@ -4768,6 +4759,7 @@ static int cn_port_sw_init(struct hbl_cn_port *cn_port) xa_destroy(&cn_port->db_fifo_ids); xa_destroy(&cn_port->qp_ids); + mutex_destroy(&cn_port->cfg_lock); mutex_destroy(&cn_port->cnt_lock); mutex_destroy(&cn_port->control_lock); diff --git a/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.h b/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.h index b2ba3ad642d76..b20b823fde7ba 100644 --- a/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.h +++ b/drivers/net/ethernet/intel/hbl_cn/common/hbl_cn.h @@ -1107,6 +1107,7 @@ struct hbl_cn_macro { * @control_lock: protects from a race between port open/close and other stuff that might run in * parallel (such as event handling). * @cnt_lock: protects the counters from concurrent reading. Needed for SPMU and XPCS91 counters. + * @cfg_lock: serializes the port configuration * @qp_ids: xarray to hold all QP IDs. * @db_fifo_ids: Allocated doorbell fifo IDs. * @cq_ids: xarray to hold all CQ IDs. @@ -1171,6 +1172,8 @@ struct hbl_cn_port { struct mutex control_lock; /* protects the counters from concurrent reading */ struct mutex cnt_lock; + /* serializes the port configuration */ + struct mutex cfg_lock; struct xarray qp_ids; struct xarray db_fifo_ids; struct xarray cq_ids; diff --git a/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.c b/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.c index b6fc83184a756..514b1ab010b29 100644 --- a/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.c +++ b/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.c @@ -755,7 +755,6 @@ static void gaudi2_cn_port_sw_fini(struct hbl_cn_port *cn_port) struct gaudi2_cn_port *gaudi2_port = cn_port->cn_specific; mutex_destroy(&gaudi2_port->qp_destroy_lock); - mutex_destroy(&gaudi2_port->cfg_lock); hbl_cn_eq_dispatcher_fini(cn_port); gaudi2_cn_free_rings_resources(gaudi2_port); @@ -795,7 +794,6 @@ static int gaudi2_cn_port_sw_init(struct hbl_cn_port *cn_port) hbl_cn_eq_dispatcher_init(gaudi2_port->cn_port); - mutex_init(&gaudi2_port->cfg_lock); mutex_init(&gaudi2_port->qp_destroy_lock); /* Userspace might not be notified immediately of link event from HW. @@ -5091,11 +5089,11 @@ static void gaudi2_qp_sanity_work(struct work_struct *work) gaudi2_port->qp_timeout_cnt = timeout_cnt; - mutex_lock(&gaudi2_port->cfg_lock); + hbl_cn_cfg_lock(cn_port); xa_for_each(&cn_port->qp_ids, qp_id, qp) if (qp && qp->is_req) __qpc_sanity_check(gaudi2_port, qp_id); - mutex_unlock(&gaudi2_port->cfg_lock); + hbl_cn_cfg_unlock(cn_port); done: queue_delayed_work(gaudi2_port->qp_sanity_wq, &gaudi2_port->qp_sanity_work, @@ -5270,29 +5268,6 @@ static void gaudi2_cn_get_status(struct hbl_cn_port *cn_port, struct hbl_cn_cpuc status->high_ber_cnt = high_ber_cnt; } -static void gaudi2_cn_cfg_lock(struct hbl_cn_port *cn_port) - __acquires(&gaudi2_port->cfg_lock) -{ - struct gaudi2_cn_port *gaudi2_port = cn_port->cn_specific; - - mutex_lock(&gaudi2_port->cfg_lock); -} - -static void gaudi2_cn_cfg_unlock(struct hbl_cn_port *cn_port) - __releases(&gaudi2_port->cfg_lock) -{ - struct gaudi2_cn_port *gaudi2_port = cn_port->cn_specific; - - mutex_unlock(&gaudi2_port->cfg_lock); -} - -static bool gaudi2_cn_cfg_is_locked(struct hbl_cn_port *cn_port) -{ - struct gaudi2_cn_port *gaudi2_port = cn_port->cn_specific; - - return mutex_is_locked(&gaudi2_port->cfg_lock); -} - static u32 gaudi2_cn_get_max_msg_sz(struct hbl_cn_device *hdev) { return SZ_1G; @@ -5594,9 +5569,6 @@ static struct hbl_cn_asic_port_funcs gaudi2_cn_port_funcs = { .collect_fec_stats = gaudi2_cn_debugfs_collect_fec_stats, .disable_wqe_index_checker = gaudi2_cn_disable_wqe_index_checker, .get_status = gaudi2_cn_get_status, - .cfg_lock = gaudi2_cn_cfg_lock, - .cfg_unlock = gaudi2_cn_cfg_unlock, - .cfg_is_locked = gaudi2_cn_cfg_is_locked, .qp_pre_destroy = gaudi2_cn_qp_pre_destroy, .qp_post_destroy = gaudi2_cn_qp_post_destroy, .set_port_status = gaudi2_cn_set_port_status, diff --git a/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.h b/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.h index 58a0d4e86d47b..b3017c2cf5b80 100644 --- a/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.h +++ b/drivers/net/ethernet/intel/hbl_cn/gaudi2/gaudi2_cn.h @@ -305,7 +305,6 @@ struct gaudi2_cn_port; * @eq_work: EQ work for processing events (e.g Tx completion). * @qp_sanity_work: QPC sanity check worker. * @qp_sanity_wq: QPC sanity worker thread. - * @cfg_lock: Serializes the port configuration. * @qp_destroy_lock: protects the MAC loopback switching for QP destroy flow. * @pcs_link_stady_state_ts: the timestamp to move to the pcs link steady state. * @pcs_link_state: the current pcs link state. @@ -332,8 +331,6 @@ struct gaudi2_cn_port { struct delayed_work eq_work; struct delayed_work qp_sanity_work; struct workqueue_struct *qp_sanity_wq; - /* Serializes the port configuration */ - struct mutex cfg_lock; /* protects the MAC loopback switching for QP destroy flow */ struct mutex qp_destroy_lock; ktime_t pcs_link_stady_state_ts;