|
35 | 35 |
|
36 | 36 | #include <linux/mlx4/cmd.h> |
37 | 37 | #include <linux/gfp.h> |
| 38 | +#include <rdma/ib_pma.h> |
38 | 39 |
|
39 | 40 | #include "mlx4_ib.h" |
40 | 41 |
|
@@ -232,7 +233,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma |
232 | 233 | } |
233 | 234 | } |
234 | 235 |
|
235 | | -int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
| 236 | +static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
236 | 237 | struct ib_wc *in_wc, struct ib_grh *in_grh, |
237 | 238 | struct ib_mad *in_mad, struct ib_mad *out_mad) |
238 | 239 | { |
@@ -302,6 +303,71 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
302 | 303 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; |
303 | 304 | } |
304 | 305 |
|
| 306 | +static void edit_counter(struct mlx4_counter *cnt, |
| 307 | + struct ib_pma_portcounters *pma_cnt) |
| 308 | +{ |
| 309 | + pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); |
| 310 | + pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); |
| 311 | + pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); |
| 312 | + pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); |
| 313 | +} |
| 314 | + |
| 315 | +static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
| 316 | + struct ib_wc *in_wc, struct ib_grh *in_grh, |
| 317 | + struct ib_mad *in_mad, struct ib_mad *out_mad) |
| 318 | +{ |
| 319 | + struct mlx4_cmd_mailbox *mailbox; |
| 320 | + struct mlx4_ib_dev *dev = to_mdev(ibdev); |
| 321 | + int err; |
| 322 | + u32 inmod = dev->counters[port_num - 1] & 0xffff; |
| 323 | + u8 mode; |
| 324 | + |
| 325 | + if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) |
| 326 | + return -EINVAL; |
| 327 | + |
| 328 | + mailbox = mlx4_alloc_cmd_mailbox(dev->dev); |
| 329 | + if (IS_ERR(mailbox)) |
| 330 | + return IB_MAD_RESULT_FAILURE; |
| 331 | + |
| 332 | + err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0, |
| 333 | + MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C); |
| 334 | + if (err) |
| 335 | + err = IB_MAD_RESULT_FAILURE; |
| 336 | + else { |
| 337 | + memset(out_mad->data, 0, sizeof out_mad->data); |
| 338 | + mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode; |
| 339 | + switch (mode & 0xf) { |
| 340 | + case 0: |
| 341 | + edit_counter(mailbox->buf, |
| 342 | + (void *)(out_mad->data + 40)); |
| 343 | + err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; |
| 344 | + break; |
| 345 | + default: |
| 346 | + err = IB_MAD_RESULT_FAILURE; |
| 347 | + } |
| 348 | + } |
| 349 | + |
| 350 | + mlx4_free_cmd_mailbox(dev->dev, mailbox); |
| 351 | + |
| 352 | + return err; |
| 353 | +} |
| 354 | + |
| 355 | +int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
| 356 | + struct ib_wc *in_wc, struct ib_grh *in_grh, |
| 357 | + struct ib_mad *in_mad, struct ib_mad *out_mad) |
| 358 | +{ |
| 359 | + switch (rdma_port_get_link_layer(ibdev, port_num)) { |
| 360 | + case IB_LINK_LAYER_INFINIBAND: |
| 361 | + return ib_process_mad(ibdev, mad_flags, port_num, in_wc, |
| 362 | + in_grh, in_mad, out_mad); |
| 363 | + case IB_LINK_LAYER_ETHERNET: |
| 364 | + return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, |
| 365 | + in_grh, in_mad, out_mad); |
| 366 | + default: |
| 367 | + return -EINVAL; |
| 368 | + } |
| 369 | +} |
| 370 | + |
305 | 371 | static void send_handler(struct ib_mad_agent *agent, |
306 | 372 | struct ib_mad_send_wc *mad_send_wc) |
307 | 373 | { |
|
0 commit comments