Skip to content

Commit 08d5397

Browse files
Leon Romanovskyjgunthorpe
authored andcommitted
RDMA/mlx5: Copy response to the user in one place
Update all the places in create QP flows to copy response to the user in one place. Link: https://lore.kernel.org/r/20200427154636.381474-34-leon@kernel.org Reviewed-by: Maor Gottlieb <maorg@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
1 parent 6f2cf76 commit 08d5397

1 file changed

Lines changed: 52 additions & 61 deletions

File tree

  • drivers/infiniband/hw/mlx5

drivers/infiniband/hw/mlx5/qp.c

Lines changed: 52 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -1015,17 +1015,8 @@ static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
10151015
goto err_free;
10161016
}
10171017

1018-
err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
1019-
if (err) {
1020-
mlx5_ib_dbg(dev, "copy failed\n");
1021-
goto err_unmap;
1022-
}
1023-
10241018
return 0;
10251019

1026-
err_unmap:
1027-
mlx5_ib_db_unmap_user(context, &qp->db);
1028-
10291020
err_free:
10301021
kvfree(*in);
10311022

@@ -1551,14 +1542,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
15511542

15521543
qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
15531544
rq->base.mqp.qpn;
1554-
err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
1555-
if (err)
1556-
goto err_destroy_tir;
1557-
15581545
return 0;
15591546

1560-
err_destroy_tir:
1561-
destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, pd);
15621547
err_destroy_rq:
15631548
destroy_raw_packet_qp_rq(dev, rq);
15641549
err_destroy_sq:
@@ -1618,6 +1603,7 @@ struct mlx5_create_qp_params {
16181603
u8 is_rss_raw : 1;
16191604
struct ib_qp_init_attr *attr;
16201605
u32 uidx;
1606+
struct mlx5_ib_create_qp_resp resp;
16211607
};
16221608

16231609
static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
@@ -1629,7 +1615,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
16291615
struct ib_udata *udata = params->udata;
16301616
struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
16311617
udata, struct mlx5_ib_ucontext, ibucontext);
1632-
struct mlx5_ib_create_qp_resp resp = {};
16331618
int inlen;
16341619
int outlen;
16351620
int err;
@@ -1662,12 +1647,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
16621647
if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
16631648
lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
16641649

1665-
err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
1666-
if (err) {
1667-
mlx5_ib_dbg(dev, "copy failed\n");
1668-
return -EINVAL;
1669-
}
1670-
16711650
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
16721651
outlen = MLX5_ST_SZ_BYTES(create_tir_out);
16731652
in = kvzalloc(inlen + outlen, GFP_KERNEL);
@@ -1803,34 +1782,30 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
18031782
goto err;
18041783

18051784
if (mucontext->devx_uid) {
1806-
resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
1807-
resp.tirn = qp->rss_qp.tirn;
1785+
params->resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
1786+
params->resp.tirn = qp->rss_qp.tirn;
18081787
if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
1809-
resp.tir_icm_addr =
1788+
params->resp.tir_icm_addr =
18101789
MLX5_GET(create_tir_out, out, icm_address_31_0);
1811-
resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
1812-
icm_address_39_32)
1813-
<< 32;
1814-
resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
1815-
icm_address_63_40)
1816-
<< 40;
1817-
resp.comp_mask |=
1790+
params->resp.tir_icm_addr |=
1791+
(u64)MLX5_GET(create_tir_out, out,
1792+
icm_address_39_32)
1793+
<< 32;
1794+
params->resp.tir_icm_addr |=
1795+
(u64)MLX5_GET(create_tir_out, out,
1796+
icm_address_63_40)
1797+
<< 40;
1798+
params->resp.comp_mask |=
18181799
MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
18191800
}
18201801
}
18211802

1822-
err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
1823-
if (err)
1824-
goto err_copy;
1825-
18261803
kvfree(in);
18271804
/* qpn is reserved for that QP */
18281805
qp->trans_qp.base.mqp.qpn = 0;
18291806
qp->is_rss = true;
18301807
return 0;
18311808

1832-
err_copy:
1833-
mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, mucontext->devx_uid);
18341809
err:
18351810
kvfree(in);
18361811
return err;
@@ -1995,7 +1970,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
19951970
struct mlx5_ib_resources *devr = &dev->devr;
19961971
int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
19971972
struct mlx5_core_dev *mdev = dev->mdev;
1998-
struct mlx5_ib_create_qp_resp resp = {};
19991973
struct mlx5_ib_cq *send_cq;
20001974
struct mlx5_ib_cq *recv_cq;
20011975
unsigned long flags;
@@ -2038,8 +2012,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
20382012
if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
20392013
return -EINVAL;
20402014

2041-
err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &resp, &inlen,
2042-
base, ucmd);
2015+
err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
2016+
&inlen, base, ucmd);
20432017
if (err)
20442018
return err;
20452019

@@ -2139,7 +2113,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
21392113
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
21402114
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
21412115
err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
2142-
&resp);
2116+
&params->resp);
21432117
} else
21442118
err = mlx5_core_create_qp(dev, &base->mqp, in, inlen);
21452119

@@ -2865,6 +2839,25 @@ static int get_qp_uidx(struct mlx5_ib_qp *qp,
28652839
return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &params->uidx);
28662840
}
28672841

2842+
static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
2843+
{
2844+
struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
2845+
2846+
if (mqp->state == IB_QPS_RTR) {
2847+
int err;
2848+
2849+
err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct);
2850+
if (err) {
2851+
mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
2852+
return err;
2853+
}
2854+
}
2855+
2856+
kfree(mqp->dct.in);
2857+
kfree(mqp);
2858+
return 0;
2859+
}
2860+
28682861
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
28692862
struct ib_udata *udata)
28702863
{
@@ -2955,6 +2948,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
29552948
}
29562949

29572950
kfree(params.ucmd);
2951+
params.ucmd = NULL;
29582952

29592953
if (is_qp0(attr->qp_type))
29602954
qp->ibqp.qp_num = 0;
@@ -2965,34 +2959,31 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
29652959

29662960
qp->trans_qp.xrcdn = xrcdn;
29672961

2962+
if (udata)
2963+
/*
2964+
* It is safe to copy response for all user create QP flows,
2965+
* including MLX5_IB_QPT_DCT, which doesn't need it.
2966+
* In that case, resp will be filled with zeros.
2967+
*/
2968+
err = ib_copy_to_udata(udata, &params.resp, params.outlen);
2969+
if (err)
2970+
goto destroy_qp;
2971+
29682972
return &qp->ibqp;
29692973

2974+
destroy_qp:
2975+
if (qp->type == MLX5_IB_QPT_DCT)
2976+
mlx5_ib_destroy_dct(qp);
2977+
else
2978+
destroy_qp_common(dev, qp, udata);
2979+
qp = NULL;
29702980
free_qp:
29712981
kfree(qp);
29722982
free_ucmd:
29732983
kfree(params.ucmd);
29742984
return ERR_PTR(err);
29752985
}
29762986

2977-
static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
2978-
{
2979-
struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
2980-
2981-
if (mqp->state == IB_QPS_RTR) {
2982-
int err;
2983-
2984-
err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct);
2985-
if (err) {
2986-
mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
2987-
return err;
2988-
}
2989-
}
2990-
2991-
kfree(mqp->dct.in);
2992-
kfree(mqp);
2993-
return 0;
2994-
}
2995-
29962987
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
29972988
{
29982989
struct mlx5_ib_dev *dev = to_mdev(qp->device);

0 commit comments

Comments
 (0)