@@ -101,6 +101,77 @@ void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
101101 return get_wqe (qp , qp -> sq .offset + (n << MLX5_IB_SQ_STRIDE ));
102102}
103103
104+ /**
105+ * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
106+ *
107+ * @qp: QP to copy from.
108+ * @send: copy from the send queue when non-zero, use the receive queue
109+ * otherwise.
110+ * @wqe_index: index to start copying from. For send work queues, the
111+ * wqe_index is in units of MLX5_SEND_WQE_BB.
112+ * For receive work queue, it is the number of work queue
113+ * element in the queue.
114+ * @buffer: destination buffer.
115+ * @length: maximum number of bytes to copy.
116+ *
117+ * Copies at least a single WQE, but may copy more data.
118+ *
119+ * Return: the number of bytes copied, or an error code.
120+ */
121+ int mlx5_ib_read_user_wqe (struct mlx5_ib_qp * qp , int send , int wqe_index ,
122+ void * buffer , u32 length )
123+ {
124+ struct ib_device * ibdev = qp -> ibqp .device ;
125+ struct mlx5_ib_dev * dev = to_mdev (ibdev );
126+ struct mlx5_ib_wq * wq = send ? & qp -> sq : & qp -> rq ;
127+ size_t offset ;
128+ size_t wq_end ;
129+ struct ib_umem * umem = qp -> umem ;
130+ u32 first_copy_length ;
131+ int wqe_length ;
132+ int ret ;
133+
134+ if (wq -> wqe_cnt == 0 ) {
135+ mlx5_ib_dbg (dev , "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n" ,
136+ qp -> ibqp .qp_type );
137+ return - EINVAL ;
138+ }
139+
140+ offset = wq -> offset + ((wqe_index % wq -> wqe_cnt ) << wq -> wqe_shift );
141+ wq_end = wq -> offset + (wq -> wqe_cnt << wq -> wqe_shift );
142+
143+ if (send && length < sizeof (struct mlx5_wqe_ctrl_seg ))
144+ return - EINVAL ;
145+
146+ if (offset > umem -> length ||
147+ (send && offset + sizeof (struct mlx5_wqe_ctrl_seg ) > umem -> length ))
148+ return - EINVAL ;
149+
150+ first_copy_length = min_t (u32 , offset + length , wq_end ) - offset ;
151+ ret = ib_umem_copy_from (buffer , umem , offset , first_copy_length );
152+ if (ret )
153+ return ret ;
154+
155+ if (send ) {
156+ struct mlx5_wqe_ctrl_seg * ctrl = buffer ;
157+ int ds = be32_to_cpu (ctrl -> qpn_ds ) & MLX5_WQE_CTRL_DS_MASK ;
158+
159+ wqe_length = ds * MLX5_WQE_DS_UNITS ;
160+ } else {
161+ wqe_length = 1 << wq -> wqe_shift ;
162+ }
163+
164+ if (wqe_length <= first_copy_length )
165+ return first_copy_length ;
166+
167+ ret = ib_umem_copy_from (buffer + first_copy_length , umem , wq -> offset ,
168+ wqe_length - first_copy_length );
169+ if (ret )
170+ return ret ;
171+
172+ return wqe_length ;
173+ }
174+
104175static void mlx5_ib_qp_event (struct mlx5_core_qp * qp , int type )
105176{
106177 struct ib_qp * ibqp = & to_mibqp (qp )-> ibqp ;
0 commit comments