@@ -469,7 +469,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
469
469
spin_unlock_irq (& ent -> lock );
470
470
471
471
err = add_keys (dev , entry , 1 );
472
- if (err )
472
+ if (err && err != - EAGAIN )
473
473
return ERR_PTR (err );
474
474
475
475
wait_for_completion (& ent -> compl );
@@ -669,8 +669,10 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
669
669
INIT_DELAYED_WORK (& ent -> dwork , delayed_cache_work_func );
670
670
queue_work (cache -> wq , & ent -> work );
671
671
672
- if (i > MAX_UMR_CACHE_ENTRY )
672
+ if (i > MAX_UMR_CACHE_ENTRY ) {
673
+ mlx5_odp_init_mr_cache_entry (ent );
673
674
continue ;
675
+ }
674
676
675
677
if (!use_umr (dev , ent -> order ))
676
678
continue ;
@@ -935,6 +937,10 @@ static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
935
937
{
936
938
struct mlx5_ib_dev * dev = mr -> dev ;
937
939
struct ib_umem * umem = mr -> umem ;
940
+ if (flags & MLX5_IB_UPD_XLT_INDIRECT ) {
941
+ mlx5_odp_populate_klm (xlt , idx , npages , mr , flags );
942
+ return npages ;
943
+ }
938
944
939
945
npages = min_t (size_t , npages , ib_umem_num_pages (umem ) - idx );
940
946
@@ -968,7 +974,9 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
968
974
struct mlx5_umr_wr wr ;
969
975
struct ib_sge sg ;
970
976
int err = 0 ;
971
- int desc_size = sizeof (struct mlx5_mtt );
977
+ int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT )
978
+ ? sizeof (struct mlx5_klm )
979
+ : sizeof (struct mlx5_mtt );
972
980
const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size ;
973
981
const int page_mask = page_align - 1 ;
974
982
size_t pages_mapped = 0 ;
@@ -1186,6 +1194,18 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1186
1194
1187
1195
mlx5_ib_dbg (dev , "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n" ,
1188
1196
start , virt_addr , length , access_flags );
1197
+
1198
+ #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1199
+ if (!start && length == U64_MAX ) {
1200
+ if (!(access_flags & IB_ACCESS_ON_DEMAND ) ||
1201
+ !(dev -> odp_caps .general_caps & IB_ODP_SUPPORT_IMPLICIT ))
1202
+ return ERR_PTR (- EINVAL );
1203
+
1204
+ mr = mlx5_ib_alloc_implicit_mr (to_mpd (pd ), access_flags );
1205
+ return & mr -> ibmr ;
1206
+ }
1207
+ #endif
1208
+
1189
1209
err = mr_umem_get (pd , start , length , access_flags , & umem , & npages ,
1190
1210
& page_shift , & ncont , & order );
1191
1211
@@ -1471,8 +1491,11 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1471
1491
/* Wait for all running page-fault handlers to finish. */
1472
1492
synchronize_srcu (& dev -> mr_srcu );
1473
1493
/* Destroy all page mappings */
1474
- mlx5_ib_invalidate_range (umem , ib_umem_start (umem ),
1475
- ib_umem_end (umem ));
1494
+ if (umem -> odp_data -> page_list )
1495
+ mlx5_ib_invalidate_range (umem , ib_umem_start (umem ),
1496
+ ib_umem_end (umem ));
1497
+ else
1498
+ mlx5_ib_free_implicit_mr (mr );
1476
1499
/*
1477
1500
* We kill the umem before the MR for ODP,
1478
1501
* so that there will not be any invalidations in
0 commit comments