LCOV - code coverage report
Current view: top level - spdk/test/unit/lib/nvmf/rdma.c - rdma_ut.c (source / functions) Hit Total Coverage
Test: Combined Lines: 1038 1082 95.9 %
Date: 2024-11-17 13:58:19 Functions: 23 41 56.1 %
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: 2243 4468 50.2 %

           Branch data     Line data    Source code
       1                 :            : /*   SPDX-License-Identifier: BSD-3-Clause
       2                 :            :  *   Copyright (C) 2018 Intel Corporation. All rights reserved.
       3                 :            :  *   Copyright (c) 2019, 2021 Mellanox Technologies LTD. All rights reserved.
       4                 :            :  *   Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
       5                 :            :  */
       6                 :            : 
       7                 :            : #include "spdk/stdinc.h"
       8                 :            : #include "spdk_internal/cunit.h"
       9                 :            : #include "common/lib/test_env.c"
      10                 :            : #include "common/lib/test_iobuf.c"
      11                 :            : #include "common/lib/test_rdma.c"
      12                 :            : #include "nvmf/rdma.c"
      13                 :            : #include "nvmf/transport.c"
      14                 :            : 
      15                 :            : #define RDMA_UT_UNITS_IN_MAX_IO 16
      16                 :            : 
      17                 :            : struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
      18                 :            :         .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
      19                 :            :         .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
      20                 :            :         .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
      21                 :            :         .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
      22                 :            :         .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE,
      23                 :            :         .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
      24                 :            :         .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
      25                 :            : };
      26                 :            : 
      27                 :          6 : SPDK_LOG_REGISTER_COMPONENT(nvmf)
      28                 :          0 : DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
      29                 :            :                 uint64_t size, uint64_t translation), 0);
      30                 :          0 : DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
      31                 :            :                 uint64_t size), 0);
      32                 :          0 : DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
      33                 :            :                 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
      34                 :          0 : DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
      35                 :            :                 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
      36                 :          0 : DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int,
      37                 :            :             (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 0);
      38                 :          0 : DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
      39                 :            : 
      40                 :         30 : DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
      41                 :          0 : DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
      42                 :          0 : DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
      43                 :            :                 const struct spdk_nvme_transport_id *trid2), 0);
      44                 :          0 : DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
      45         [ +  + ]:         36 : DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
      46                 :            :                 struct spdk_dif_ctx *dif_ctx), false);
      47                 :          0 : DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
      48                 :            :                 enum spdk_nvme_transport_type trtype));
      49                 :          0 : DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
      50                 :          0 : DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
      51                 :          0 : DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
      52                 :          0 : DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0);
      53                 :         12 : DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0);
      54                 :          0 : DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *, (const char *name), NULL);
      55                 :            : 
      56                 :            : /* ibv_reg_mr can be a macro, need to undefine it */
      57                 :            : #ifdef ibv_reg_mr
      58                 :            : #undef ibv_reg_mr
      59                 :            : #endif
      60                 :            : 
      61                 :            : DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *);
      62                 :            : struct ibv_mr *
      63                 :          0 : ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
      64                 :            : {
      65   [ #  #  #  # ]:          0 :         HANDLE_RETURN_MOCK(ibv_reg_mr);
      66         [ #  # ]:          0 :         if (length > 0) {
      67                 :          0 :                 return &g_rdma_mr;
      68                 :            :         } else {
      69                 :          0 :                 return NULL;
      70                 :            :         }
      71                 :          0 : }
      72                 :            : 
      73                 :            : int
      74                 :          3 : ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
      75                 :            :              int attr_mask, struct ibv_qp_init_attr *init_attr)
      76                 :            : {
      77         [ +  + ]:         18 :         if (qp == NULL) {
      78                 :          6 :                 return -1;
      79                 :            :         } else {
      80   [ +  -  +  - ]:         12 :                 attr->port_num = 80;
      81                 :            : 
      82   [ +  +  +  -  :         12 :                 if (qp->state == IBV_QPS_ERR) {
                   +  + ]
      83   [ +  -  +  - ]:          6 :                         attr->qp_state = 10;
      84                 :          1 :                 } else {
      85   [ +  -  +  - ]:          6 :                         attr->qp_state = IBV_QPS_INIT;
      86                 :            :                 }
      87                 :            : 
      88                 :         12 :                 return 0;
      89                 :            :         }
      90                 :          3 : }
      91                 :            : 
      92                 :            : const char *
      93                 :          0 : spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
      94                 :            : {
      95   [ #  #  #  # ]:          0 :         switch (trtype) {
      96                 :          0 :         case SPDK_NVME_TRANSPORT_PCIE:
      97                 :          0 :                 return "PCIe";
      98                 :          0 :         case SPDK_NVME_TRANSPORT_RDMA:
      99                 :          0 :                 return "RDMA";
     100                 :          0 :         case SPDK_NVME_TRANSPORT_FC:
     101                 :          0 :                 return "FC";
     102                 :          0 :         default:
     103                 :          0 :                 return NULL;
     104                 :            :         }
     105                 :          0 : }
     106                 :            : 
     107                 :            : int
     108                 :          0 : spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
     109                 :            : {
     110                 :          0 :         int len, i;
     111                 :            : 
     112         [ #  # ]:          0 :         if (trstring == NULL) {
     113                 :          0 :                 return -EINVAL;
     114                 :            :         }
     115                 :            : 
     116         [ #  # ]:          0 :         len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
     117         [ #  # ]:          0 :         if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
     118                 :          0 :                 return -EINVAL;
     119                 :            :         }
     120                 :            : 
     121                 :            :         /* cast official trstring to uppercase version of input. */
     122   [ #  #  #  # ]:          0 :         for (i = 0; i < len; i++) {
     123   [ #  #  #  #  :          0 :                 trid->trstring[i] = toupper(trstring[i]);
          #  #  #  #  #  
                #  #  # ]
     124                 :          0 :         }
     125                 :          0 :         return 0;
     126                 :          0 : }
     127                 :            : 
     128                 :            : static void
     129                 :        132 : reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
     130                 :            : {
     131                 :         22 :         int i;
     132                 :            : 
     133   [ +  -  +  -  :        132 :         rdma_req->req.length = 0;
                   +  - ]
     134   [ +  -  +  -  :        132 :         rdma_req->req.data_from_pool = false;
             +  -  +  - ]
     135   [ +  -  +  -  :        132 :         rdma_req->data.wr.num_sge = 0;
             +  -  +  - ]
     136   [ +  -  +  -  :        132 :         rdma_req->data.wr.wr.rdma.remote_addr = 0;
          +  -  +  -  +  
                -  +  - ]
     137   [ +  -  +  -  :        132 :         rdma_req->data.wr.wr.rdma.rkey = 0;
          +  -  +  -  +  
                -  +  - ]
     138   [ +  -  +  - ]:        132 :         rdma_req->offset = 0;
     139   [ +  +  +  -  :        132 :         memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif));
                   +  - ]
     140                 :            : 
     141   [ +  +  +  - ]:       2244 :         for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
     142   [ +  -  +  -  :       2112 :                 rdma_req->req.iov[i].iov_base = 0;
          +  -  +  -  +  
                -  +  - ]
     143   [ +  -  +  -  :       2112 :                 rdma_req->req.iov[i].iov_len = 0;
          +  -  +  -  +  
                -  +  - ]
     144   [ +  -  +  -  :       2112 :                 rdma_req->data.wr.sg_list[i].addr = 0;
          +  -  +  -  +  
             -  +  -  +  
                      - ]
     145   [ +  -  +  -  :       2112 :                 rdma_req->data.wr.sg_list[i].length = 0;
          +  -  +  -  +  
             -  +  -  +  
                      - ]
     146   [ +  -  +  -  :       2112 :                 rdma_req->data.wr.sg_list[i].lkey = 0;
          +  -  +  -  +  
             -  +  -  +  
                      - ]
     147                 :        352 :         }
     148   [ +  -  +  -  :        132 :         rdma_req->req.iovcnt = 0;
                   +  - ]
     149   [ +  +  +  -  :        132 :         if (rdma_req->req.stripped_data) {
             +  -  +  + ]
     150   [ +  -  +  -  :         24 :                 free(rdma_req->req.stripped_data);
                   +  - ]
     151   [ +  -  +  -  :         24 :                 rdma_req->req.stripped_data = NULL;
                   +  - ]
     152                 :          4 :         }
     153                 :        132 : }
     154                 :            : 
     155                 :            : static void
     156                 :          6 : test_spdk_nvmf_rdma_request_parse_sgl(void)
     157                 :            : {
     158                 :          5 :         struct spdk_nvmf_rdma_transport rtransport;
     159                 :          5 :         struct spdk_nvmf_rdma_device device;
     160                 :          6 :         struct spdk_nvmf_rdma_request rdma_req = {};
     161                 :          5 :         struct spdk_nvmf_rdma_recv recv;
     162                 :          5 :         struct spdk_nvmf_rdma_poll_group group;
     163                 :          5 :         struct spdk_nvmf_rdma_qpair rqpair;
     164                 :          5 :         struct spdk_nvmf_rdma_poller poller;
     165                 :          5 :         union nvmf_c2h_msg cpl;
     166                 :          5 :         union nvmf_h2c_msg cmd;
     167                 :          1 :         struct spdk_nvme_sgl_descriptor *sgl;
     168                 :          6 :         struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
     169                 :          5 :         struct spdk_nvmf_rdma_request_data data;
     170                 :          1 :         int rc, i;
     171                 :          1 :         uint32_t sgl_length;
     172                 :            : 
     173         [ +  - ]:          6 :         data.wr.sg_list = data.sgl;
     174                 :          6 :         group.group.transport = &rtransport.transport;
     175         [ +  - ]:          6 :         poller.group = &group;
     176         [ +  - ]:          6 :         rqpair.poller = &poller;
     177         [ +  - ]:          6 :         rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
     178                 :            : 
     179         [ +  - ]:          6 :         sgl = &cmd.nvme_cmd.dptr.sgl1;
     180         [ +  - ]:          6 :         rdma_req.recv = &recv;
     181         [ +  - ]:          6 :         rdma_req.req.cmd = &cmd;
     182         [ +  - ]:          6 :         rdma_req.req.rsp = &cpl;
     183   [ +  -  +  -  :          6 :         rdma_req.data.wr.sg_list = rdma_req.data.sgl;
             +  -  +  - ]
     184                 :          6 :         rdma_req.req.qpair = &rqpair.qpair;
     185         [ +  - ]:          6 :         rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
     186                 :            : 
     187                 :          6 :         rtransport.transport.opts = g_rdma_ut_transport_opts;
     188         [ +  - ]:          6 :         rtransport.data_wr_pool = NULL;
     189                 :            : 
     190         [ +  - ]:          6 :         device.attr.device_cap_flags = 0;
     191   [ +  -  +  -  :          6 :         sgl->keyed.key = 0xEEEE;
                   +  - ]
     192   [ +  -  +  - ]:          6 :         sgl->address = 0xFFFF;
     193   [ +  -  +  -  :          6 :         rdma_req.recv->buf = (void *)0xDDDD;
                   +  - ]
     194                 :            : 
     195                 :            :         /* Test 1: sgl type: keyed data block subtype: address */
     196   [ +  -  +  -  :          6 :         sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
                   +  - ]
     197   [ +  -  +  -  :          6 :         sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
                   +  - ]
     198                 :            : 
     199                 :            :         /* Part 1: simple I/O, one SGL smaller than the transport io unit size */
     200                 :          6 :         MOCK_SET(spdk_iobuf_get, (void *)0x2000);
     201                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     202   [ +  -  +  -  :          6 :         sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
          +  -  +  -  +  
                -  +  - ]
     203                 :            : 
     204         [ +  - ]:          6 :         device.map = (void *)0x0;
     205                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     206                 :          6 :         CU_ASSERT(rc == 0);
     207   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
     208   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
             +  -  +  - ]
     209         [ +  - ]:          6 :         CU_ASSERT((uint64_t)rdma_req.req.iovcnt == 1);
     210   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
     211   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 1);
                   +  - ]
     212   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
          +  -  +  -  +  
                      - ]
     213   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
          +  -  +  -  +  
                      - ]
     214   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
     215   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
          +  -  +  -  +  
                -  +  - ]
     216   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
          +  -  +  -  +  
          -  +  -  +  -  
             +  -  +  - ]
     217   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
     218                 :            : 
     219                 :            :         /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */
     220                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     221   [ +  -  +  -  :          6 :         sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
          +  -  +  -  +  
                      - ]
     222                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     223                 :            : 
     224                 :          6 :         CU_ASSERT(rc == 0);
     225   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
     226   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO);
                   +  - ]
     227   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO);
                   +  - ]
     228   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
          +  -  +  -  +  
                      - ]
     229   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
          +  -  +  -  +  
                      - ]
     230   [ +  +  +  - ]:        102 :         for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
     231   [ +  -  +  -  :         96 :                 CU_ASSERT((uint64_t)rdma_req.req.iov[i].iov_base == 0x2000);
             +  -  +  - ]
     232   [ +  -  +  -  :         96 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
          +  -  +  -  +  
                -  +  - ]
     233   [ +  -  +  -  :         96 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     234   [ +  -  +  -  :         96 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
     235                 :         16 :         }
     236                 :            : 
     237                 :            :         /* Part 3: simple I/O one SGL larger than the transport max io size */
     238                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     239   [ +  -  +  -  :          6 :         sgl->keyed.length = rtransport.transport.opts.max_io_size * 2;
          +  -  +  -  +  
                      - ]
     240                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     241                 :            : 
     242                 :          6 :         CU_ASSERT(rc == -1);
     243                 :            : 
     244                 :            :         /* Part 4: Pretend there are no buffer pools */
     245                 :          6 :         MOCK_SET(spdk_iobuf_get, NULL);
     246                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     247   [ +  -  +  -  :          6 :         sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
          +  -  +  -  +  
                      - ]
     248                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     249                 :            : 
     250                 :          6 :         CU_ASSERT(rc == 0);
     251   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == false);
     252         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 0);
     253   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 0);
                   +  - ]
     254   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.iov[0].iov_base == NULL);
             +  -  +  - ]
     255   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
          +  -  +  -  +  
                -  +  - ]
     256   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
          +  -  +  -  +  
                -  +  - ]
     257   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
          +  -  +  -  +  
                -  +  - ]
     258                 :            : 
     259   [ +  -  +  -  :          6 :         rdma_req.recv->buf = (void *)0xDDDD;
                   +  - ]
     260                 :            :         /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */
     261   [ +  -  +  -  :          6 :         sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
                   +  - ]
     262   [ +  -  +  -  :          6 :         sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
                   +  - ]
     263                 :            : 
     264                 :            :         /* Part 1: Normal I/O smaller than in capsule data size no offset */
     265                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     266   [ +  -  +  - ]:          6 :         sgl->address = 0;
     267   [ +  -  +  -  :          6 :         sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
          +  -  +  -  +  
                -  +  - ]
     268                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     269                 :            : 
     270                 :          6 :         CU_ASSERT(rc == 0);
     271         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 1);
     272   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)0xDDDD);
             +  -  +  - ]
     273   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size);
                   +  - ]
     274   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == false);
     275                 :            : 
     276                 :            :         /* Part 2: I/O offset + length too large */
     277                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     278   [ +  -  +  -  :          6 :         sgl->address = rtransport.transport.opts.in_capsule_data_size;
             +  -  +  - ]
     279   [ +  -  +  -  :          6 :         sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
          +  -  +  -  +  
                -  +  - ]
     280                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     281                 :            : 
     282                 :          6 :         CU_ASSERT(rc == -1);
     283                 :            : 
     284                 :            :         /* Part 3: I/O too large */
     285                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     286   [ +  -  +  - ]:          6 :         sgl->address = 0;
     287   [ +  -  +  -  :          6 :         sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2;
          +  -  +  -  +  
                -  +  - ]
     288                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     289                 :            : 
     290                 :          6 :         CU_ASSERT(rc == -1);
     291                 :            : 
     292                 :            :         /* Test 3: Multi SGL */
     293   [ +  -  +  -  :          6 :         sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
                   +  - ]
     294   [ +  -  +  -  :          6 :         sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
                   +  - ]
     295   [ +  -  +  - ]:          6 :         sgl->address = 0;
     296   [ +  -  +  -  :          6 :         rdma_req.recv->buf = (void *)&sgl_desc;
                   +  - ]
     297                 :          6 :         MOCK_SET(spdk_iobuf_get, &data);
     298                 :          6 :         MOCK_SET(spdk_mempool_get, &data);
     299                 :            : 
     300                 :            :         /* part 1: 2 segments each with 1 wr. */
     301                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     302   [ +  -  +  -  :          6 :         sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
             +  -  +  - ]
     303   [ +  +  +  - ]:         18 :         for (i = 0; i < 2; i++) {
     304   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
          +  -  +  -  +  
                      - ]
     305   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
          +  -  +  -  +  
                      - ]
     306   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size;
          +  -  +  -  +  
             -  +  -  +  
                      - ]
     307   [ +  -  +  -  :         12 :                 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size;
          +  -  +  -  +  
                -  +  - ]
     308   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.key = 0x44;
          +  -  +  -  +  
                      - ]
     309                 :          2 :         }
     310                 :            : 
     311                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     312                 :            : 
     313                 :          6 :         CU_ASSERT(rc == 0);
     314   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
     315   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2);
                   +  - ]
     316   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 1);
                   +  - ]
     317   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
          +  -  +  -  +  
                      - ]
     318   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
          +  -  +  -  +  
                      - ]
     319   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.next == &data.wr);
                   +  - ]
     320   [ +  -  +  -  :          6 :         CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
                   +  - ]
     321   [ +  -  +  -  :          6 :         CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size);
          +  -  +  -  +  
                      - ]
     322         [ +  - ]:          6 :         CU_ASSERT(data.wr.num_sge == 1);
     323   [ +  -  +  - ]:          6 :         CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
     324                 :            : 
     325                 :            :         /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */
     326                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     327   [ +  -  +  -  :          6 :         sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
             +  -  +  - ]
     328   [ +  +  +  - ]:         18 :         for (i = 0; i < 2; i++) {
     329   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
          +  -  +  -  +  
                      - ]
     330   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
          +  -  +  -  +  
                      - ]
     331   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8;
          +  -  +  -  +  
             -  +  -  +  
                      - ]
     332   [ +  -  +  -  :         12 :                 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size;
          +  -  +  -  +  
             -  +  -  +  
                      - ]
     333   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.key = 0x44;
          +  -  +  -  +  
                      - ]
     334                 :          2 :         }
     335                 :            : 
     336                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     337                 :            : 
     338                 :          6 :         CU_ASSERT(rc == 0);
     339   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
     340   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
                   +  - ]
     341         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 16);
     342   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 8);
                   +  - ]
     343   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
          +  -  +  -  +  
                      - ]
     344   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
          +  -  +  -  +  
                      - ]
     345   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.next == &data.wr);
                   +  - ]
     346   [ +  -  +  -  :          6 :         CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
                   +  - ]
     347   [ +  -  +  -  :          6 :         CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8);
          +  -  +  -  +  
                      - ]
     348         [ +  - ]:          6 :         CU_ASSERT(data.wr.num_sge == 8);
     349   [ +  -  +  - ]:          6 :         CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
     350                 :            : 
     351                 :            :         /* part 3: 2 segments, one very large, one very small */
     352                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     353   [ +  +  +  - ]:         18 :         for (i = 0; i < 2; i++) {
     354   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
          +  -  +  -  +  
                      - ]
     355   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
          +  -  +  -  +  
                      - ]
     356   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.key = 0x44;
          +  -  +  -  +  
                      - ]
     357                 :          2 :         }
     358                 :            : 
     359   [ +  -  +  -  :          7 :         sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 +
          +  -  +  -  +  
                -  +  - ]
     360   [ +  -  +  -  :          6 :                                    rtransport.transport.opts.io_unit_size / 2;
                   +  - ]
     361   [ +  -  +  - ]:          6 :         sgl_desc[0].address = 0x4000;
     362   [ +  -  +  -  :          6 :         sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2;
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     363   [ +  -  +  -  :          7 :         sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
          +  -  +  -  +  
                -  +  - ]
     364   [ +  -  +  -  :          6 :                               rtransport.transport.opts.io_unit_size / 2;
                   +  - ]
     365                 :            : 
     366                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     367                 :            : 
     368                 :          6 :         CU_ASSERT(rc == 0);
     369   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
     370   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
                   +  - ]
     371         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 16);
     372   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 16);
                   +  - ]
     373   [ +  +  +  - ]:         96 :         for (i = 0; i < 15; i++) {
     374   [ +  -  +  -  :         90 :                 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size);
          +  -  +  -  +  
                -  +  - ]
     375                 :         15 :         }
     376   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
     377   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
          +  -  +  -  +  
                      - ]
     378   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
          +  -  +  -  +  
                      - ]
     379   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.next == &data.wr);
                   +  - ]
     380   [ +  -  +  -  :          6 :         CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
                   +  - ]
     381   [ +  -  +  -  :          6 :         CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     382                 :            :                   rtransport.transport.opts.io_unit_size / 2);
     383   [ +  -  +  -  :          6 :         CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2);
          +  -  +  -  +  
                -  +  - ]
     384         [ +  - ]:          6 :         CU_ASSERT(data.wr.num_sge == 1);
     385   [ +  -  +  - ]:          6 :         CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
     386                 :            : 
     387                 :            :         /* part 4: 2 SGL descriptors, each length is transport buffer / 2
     388                 :            :          * 1 transport buffers should be allocated */
     389                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     390   [ +  -  +  -  :          6 :         sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
             +  -  +  - ]
     391   [ +  -  +  -  :          6 :         sgl_length = rtransport.transport.opts.io_unit_size / 2;
                   +  - ]
     392   [ +  +  +  - ]:         18 :         for (i = 0; i < 2; i++) {
     393   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.length = sgl_length;
          +  -  +  -  +  
                      - ]
     394   [ +  -  +  -  :         12 :                 sgl_desc[i].address = 0x4000 + i * sgl_length;
             +  -  +  - ]
     395                 :          2 :         }
     396                 :            : 
     397                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     398                 :            : 
     399                 :          6 :         CU_ASSERT(rc == 0);
     400   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
     401   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size);
                   +  - ]
     402         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 1);
     403                 :            : 
     404   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.sgl[0].length == sgl_length);
             +  -  +  - ]
     405                 :            :         /* We mocked mempool_get to return address of data variable. Mempool is used
     406                 :            :          * to get both additional WRs and data buffers, so data points to &data */
     407   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.sgl[0].addr == (uint64_t)&data);
             +  -  +  - ]
     408   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
          +  -  +  -  +  
                      - ]
     409   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
          +  -  +  -  +  
                      - ]
     410   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 1);
                   +  - ]
     411   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.next == &data.wr);
                   +  - ]
     412                 :            : 
     413   [ +  -  +  -  :          6 :         CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
                   +  - ]
     414   [ +  -  +  -  :          6 :         CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + sgl_length);
                   +  - ]
     415   [ +  -  +  -  :          6 :         CU_ASSERT(data.sgl[0].length == sgl_length);
                   +  - ]
     416   [ +  -  +  -  :          6 :         CU_ASSERT(data.sgl[0].addr == (uint64_t)&data + sgl_length);
                   +  - ]
     417         [ +  - ]:          6 :         CU_ASSERT(data.wr.num_sge == 1);
     418                 :            : 
     419                 :          6 :         MOCK_CLEAR(spdk_mempool_get);
     420                 :          6 :         MOCK_CLEAR(spdk_iobuf_get);
     421                 :            : 
     422                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     423                 :          6 : }
     424                 :            : 
     425                 :            : static struct spdk_nvmf_rdma_recv *
     426                 :         36 : create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc)
     427                 :            : {
     428                 :          6 :         struct spdk_nvmf_rdma_recv *rdma_recv;
     429                 :          6 :         union nvmf_h2c_msg *cmd;
     430                 :          6 :         struct spdk_nvme_sgl_descriptor *sgl;
     431                 :            : 
     432                 :         36 :         rdma_recv = calloc(1, sizeof(*rdma_recv));
     433   [ +  -  +  - ]:         36 :         rdma_recv->qpair = rqpair;
     434                 :         36 :         cmd = calloc(1, sizeof(*cmd));
     435   [ +  -  +  -  :         36 :         rdma_recv->sgl[0].addr = (uintptr_t)cmd;
          +  -  +  -  +  
                      - ]
     436   [ +  -  +  - ]:         36 :         cmd->nvme_cmd.opc = opc;
     437   [ +  -  +  -  :         36 :         sgl = &cmd->nvme_cmd.dptr.sgl1;
                   +  - ]
     438   [ +  -  +  -  :         36 :         sgl->keyed.key = 0xEEEE;
                   +  - ]
     439   [ +  -  +  - ]:         36 :         sgl->address = 0xFFFF;
     440   [ +  -  +  -  :         36 :         sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
                   +  - ]
     441   [ +  -  +  -  :         36 :         sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
                   +  - ]
     442   [ +  -  +  -  :         36 :         sgl->keyed.length = 1;
                   +  - ]
     443                 :            : 
     444                 :         42 :         return rdma_recv;
     445                 :          6 : }
     446                 :            : 
     447                 :            : static void
     448                 :         36 : free_recv(struct spdk_nvmf_rdma_recv *rdma_recv)
     449                 :            : {
     450   [ +  -  +  -  :         36 :         free((void *)rdma_recv->sgl[0].addr);
          +  -  +  -  +  
                      - ]
     451                 :         36 :         free(rdma_recv);
     452                 :         36 : }
     453                 :            : 
     454                 :            : static struct spdk_nvmf_rdma_request *
     455                 :         36 : create_req(struct spdk_nvmf_rdma_qpair *rqpair,
     456                 :            :            struct spdk_nvmf_rdma_recv *rdma_recv)
     457                 :            : {
     458                 :          6 :         struct spdk_nvmf_rdma_request *rdma_req;
     459                 :          6 :         union nvmf_c2h_msg *cpl;
     460                 :            : 
     461                 :         36 :         rdma_req = calloc(1, sizeof(*rdma_req));
     462   [ +  -  +  - ]:         36 :         rdma_req->recv = rdma_recv;
     463   [ +  -  +  -  :         36 :         rdma_req->req.qpair = &rqpair->qpair;
             +  -  +  - ]
     464   [ +  -  +  - ]:         36 :         rdma_req->state = RDMA_REQUEST_STATE_NEW;
     465   [ +  -  +  -  :         36 :         rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data_wr;
          +  -  +  -  +  
                      - ]
     466   [ +  -  +  -  :         36 :         rdma_req->data.wr.sg_list = rdma_req->data.sgl;
          +  -  +  -  +  
                -  +  - ]
     467                 :         36 :         cpl = calloc(1, sizeof(*cpl));
     468   [ +  -  +  -  :         36 :         rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl;
          +  -  +  -  +  
                      - ]
     469   [ +  -  +  -  :         36 :         rdma_req->req.rsp = cpl;
                   +  - ]
     470                 :            : 
     471                 :         42 :         return rdma_req;
     472                 :          6 : }
     473                 :            : 
     474                 :            : static void
     475                 :         36 : free_req(struct spdk_nvmf_rdma_request *rdma_req)
     476                 :            : {
     477   [ +  -  +  -  :         36 :         free((void *)rdma_req->rsp.sgl[0].addr);
          +  -  +  -  +  
                      - ]
     478                 :         36 :         free(rdma_req);
     479                 :         36 : }
     480                 :            : 
     481                 :            : static void
     482                 :         36 : qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair,
     483                 :            :             struct spdk_nvmf_rdma_poller *poller,
     484                 :            :             struct spdk_nvmf_rdma_device *device,
     485                 :            :             struct spdk_nvmf_rdma_resources *resources,
     486                 :            :             struct spdk_nvmf_transport *transport)
     487                 :            : {
     488         [ +  + ]:         36 :         memset(rqpair, 0, sizeof(*rqpair));
     489   [ +  -  +  -  :         36 :         STAILQ_INIT(&rqpair->pending_rdma_write_queue);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     490   [ +  -  +  -  :         36 :         STAILQ_INIT(&rqpair->pending_rdma_read_queue);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     491   [ +  -  +  -  :         36 :         STAILQ_INIT(&rqpair->pending_rdma_send_queue);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     492   [ +  -  +  - ]:         36 :         rqpair->poller = poller;
     493   [ +  -  +  - ]:         36 :         rqpair->device = device;
     494   [ +  -  +  - ]:         36 :         rqpair->resources = resources;
     495   [ +  -  +  -  :         36 :         rqpair->qpair.qid = 1;
                   +  - ]
     496   [ +  -  +  - ]:         36 :         rqpair->ibv_state = IBV_QPS_RTS;
     497   [ +  -  +  -  :         36 :         rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
                   +  - ]
     498   [ +  -  +  - ]:         36 :         rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
     499   [ +  -  +  - ]:         36 :         rqpair->max_send_depth = 16;
     500   [ +  -  +  - ]:         36 :         rqpair->max_read_depth = 16;
     501   [ +  -  +  -  :         36 :         rqpair->qpair.transport = transport;
                   +  - ]
     502                 :         36 : }
     503                 :            : 
     504                 :            : static void
     505                 :         36 : poller_reset(struct spdk_nvmf_rdma_poller *poller,
     506                 :            :              struct spdk_nvmf_rdma_poll_group *group)
     507                 :            : {
     508         [ +  + ]:         36 :         memset(poller, 0, sizeof(*poller));
     509   [ +  -  +  -  :         36 :         STAILQ_INIT(&poller->qpairs_pending_recv);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     510   [ +  -  +  -  :         36 :         STAILQ_INIT(&poller->qpairs_pending_send);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     511   [ +  -  +  - ]:         36 :         poller->group = group;
     512                 :         36 : }
     513                 :            : 
     514                 :            : static void
     515                 :          6 : test_spdk_nvmf_rdma_request_process(void)
     516                 :            : {
     517                 :          6 :         struct spdk_nvmf_rdma_transport rtransport = {};
     518                 :          6 :         struct spdk_nvmf_rdma_poll_group group = {};
     519                 :          6 :         struct spdk_nvmf_rdma_poller poller = {};
     520                 :          6 :         struct spdk_nvmf_rdma_device device = {};
     521                 :          6 :         struct spdk_nvmf_rdma_resources resources = {};
     522                 :          6 :         struct spdk_nvmf_rdma_qpair rqpair = {};
     523                 :          1 :         struct spdk_nvmf_rdma_recv *rdma_recv;
     524                 :          1 :         struct spdk_nvmf_rdma_request *rdma_req;
     525                 :          6 :         struct spdk_iobuf_channel ch = {};
     526                 :          1 :         bool progress;
     527                 :            : 
     528         [ +  - ]:          6 :         group.group.buf_cache = &ch;
     529                 :            : 
     530   [ +  -  +  -  :          6 :         STAILQ_INIT(&group.group.pending_buf_queue);
          +  -  +  -  +  
                      - ]
     531                 :          6 :         poller_reset(&poller, &group);
     532                 :          6 :         qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
     533                 :            : 
     534                 :          6 :         rtransport.transport.opts = g_rdma_ut_transport_opts;
     535         [ +  - ]:          6 :         rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128,
     536                 :            :                                   sizeof(struct spdk_nvmf_rdma_request_data),
     537                 :            :                                   0, 0);
     538                 :          6 :         MOCK_CLEAR(spdk_iobuf_get);
     539                 :            : 
     540         [ +  - ]:          6 :         device.attr.device_cap_flags = 0;
     541         [ +  - ]:          6 :         device.map = (void *)0x0;
     542                 :            : 
     543                 :            :         /* Test 1: single SGL READ request */
     544                 :          6 :         rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ);
     545                 :          6 :         rdma_req = create_req(&rqpair, rdma_recv);
     546         [ +  - ]:          6 :         rqpair.current_recv_depth = 1;
     547                 :            :         /* NEW -> EXECUTING */
     548                 :          6 :         progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     549         [ +  - ]:          6 :         CU_ASSERT(progress == true);
     550   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
     551   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
                   +  - ]
     552                 :            :         /* EXECUTED -> TRANSFERRING_C2H */
     553   [ +  -  +  - ]:          6 :         rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
     554                 :          6 :         progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     555         [ +  - ]:          6 :         CU_ASSERT(progress == true);
     556   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
     557   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req->recv == NULL);
     558                 :            :         /* COMPLETED -> FREE */
     559   [ +  -  +  - ]:          6 :         rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
     560                 :          6 :         progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     561         [ +  - ]:          6 :         CU_ASSERT(progress == true);
     562   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
     563                 :            : 
     564                 :          6 :         free_recv(rdma_recv);
     565                 :          6 :         free_req(rdma_req);
     566                 :          6 :         poller_reset(&poller, &group);
     567                 :          6 :         qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
     568                 :            : 
     569                 :            :         /* Test 2: single SGL WRITE request */
     570                 :          6 :         rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
     571                 :          6 :         rdma_req = create_req(&rqpair, rdma_recv);
     572         [ +  - ]:          6 :         rqpair.current_recv_depth = 1;
     573                 :            :         /* NEW -> TRANSFERRING_H2C */
     574                 :          6 :         progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     575         [ +  - ]:          6 :         CU_ASSERT(progress == true);
     576   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
     577   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
                   +  - ]
     578   [ +  -  +  -  :          6 :         STAILQ_INIT(&poller.qpairs_pending_send);
          +  -  +  -  +  
                      - ]
     579                 :            :         /* READY_TO_EXECUTE -> EXECUTING */
     580   [ +  -  +  - ]:          6 :         rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
     581                 :          6 :         progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     582         [ +  - ]:          6 :         CU_ASSERT(progress == true);
     583   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
     584                 :            :         /* EXECUTED -> COMPLETING */
     585   [ +  -  +  - ]:          6 :         rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
     586                 :          6 :         progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     587         [ +  - ]:          6 :         CU_ASSERT(progress == true);
     588   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
     589   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req->recv == NULL);
     590                 :            :         /* COMPLETED -> FREE */
     591   [ +  -  +  - ]:          6 :         rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
     592                 :          6 :         progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     593         [ +  - ]:          6 :         CU_ASSERT(progress == true);
     594   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
     595                 :            : 
     596                 :          6 :         free_recv(rdma_recv);
     597                 :          6 :         free_req(rdma_req);
     598                 :          6 :         poller_reset(&poller, &group);
     599                 :          6 :         qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
     600                 :            : 
     601                 :            :         /* Test 3: WRITE+WRITE ibv_send batching */
     602                 :            :         {
     603                 :          1 :                 struct spdk_nvmf_rdma_recv *recv1, *recv2;
     604                 :          1 :                 struct spdk_nvmf_rdma_request *req1, *req2;
     605                 :          6 :                 recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
     606                 :          6 :                 req1 = create_req(&rqpair, recv1);
     607                 :          6 :                 recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
     608                 :          6 :                 req2 = create_req(&rqpair, recv2);
     609                 :            : 
     610                 :            :                 /* WRITE 1: NEW -> TRANSFERRING_H2C */
     611         [ +  - ]:          6 :                 rqpair.current_recv_depth = 1;
     612                 :          6 :                 nvmf_rdma_request_process(&rtransport, req1);
     613   [ +  -  +  - ]:          6 :                 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
     614                 :            : 
     615                 :            :                 /* WRITE 2: NEW -> TRANSFERRING_H2C */
     616         [ +  - ]:          6 :                 rqpair.current_recv_depth = 2;
     617                 :          6 :                 nvmf_rdma_request_process(&rtransport, req2);
     618   [ +  -  +  - ]:          6 :                 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
     619                 :            : 
     620   [ +  -  +  -  :          6 :                 STAILQ_INIT(&poller.qpairs_pending_send);
          +  -  +  -  +  
                      - ]
     621                 :            : 
     622                 :            :                 /* WRITE 1 completes before WRITE 2 has finished RDMA reading */
     623                 :            :                 /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */
     624   [ +  -  +  - ]:          6 :                 req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
     625                 :          6 :                 nvmf_rdma_request_process(&rtransport, req1);
     626   [ +  -  +  - ]:          6 :                 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING);
     627                 :            :                 /* WRITE 1: EXECUTED -> COMPLETING */
     628   [ +  -  +  - ]:          6 :                 req1->state = RDMA_REQUEST_STATE_EXECUTED;
     629                 :          6 :                 nvmf_rdma_request_process(&rtransport, req1);
     630   [ +  -  +  - ]:          6 :                 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING);
     631   [ +  -  +  -  :          6 :                 STAILQ_INIT(&poller.qpairs_pending_send);
          +  -  +  -  +  
                      - ]
     632                 :            :                 /* WRITE 1: COMPLETED -> FREE */
     633   [ +  -  +  - ]:          6 :                 req1->state = RDMA_REQUEST_STATE_COMPLETED;
     634                 :          6 :                 nvmf_rdma_request_process(&rtransport, req1);
     635   [ +  -  +  - ]:          6 :                 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE);
     636                 :            : 
     637                 :            :                 /* Now WRITE 2 has finished reading and completes */
     638                 :            :                 /* WRITE 2: COMPLETED -> FREE */
     639                 :            :                 /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */
     640   [ +  -  +  - ]:          6 :                 req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
     641                 :          6 :                 nvmf_rdma_request_process(&rtransport, req2);
     642   [ +  -  +  - ]:          6 :                 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING);
     643                 :            :                 /* WRITE 1: EXECUTED -> COMPLETING */
     644   [ +  -  +  - ]:          6 :                 req2->state = RDMA_REQUEST_STATE_EXECUTED;
     645                 :          6 :                 nvmf_rdma_request_process(&rtransport, req2);
     646   [ +  -  +  - ]:          6 :                 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING);
     647   [ +  -  +  -  :          6 :                 STAILQ_INIT(&poller.qpairs_pending_send);
          +  -  +  -  +  
                      - ]
     648                 :            :                 /* WRITE 1: COMPLETED -> FREE */
     649   [ +  -  +  - ]:          6 :                 req2->state = RDMA_REQUEST_STATE_COMPLETED;
     650                 :          6 :                 nvmf_rdma_request_process(&rtransport, req2);
     651   [ +  -  +  - ]:          6 :                 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE);
     652                 :            : 
     653                 :          6 :                 free_recv(recv1);
     654                 :          6 :                 free_req(req1);
     655                 :          6 :                 free_recv(recv2);
     656                 :          6 :                 free_req(req2);
     657                 :          6 :                 poller_reset(&poller, &group);
     658                 :          6 :                 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
     659                 :          1 :         }
     660                 :            : 
     661                 :            :         /* Test 4, invalid command, check xfer type */
     662                 :            :         {
     663                 :          1 :                 struct spdk_nvmf_rdma_recv *rdma_recv_inv;
     664                 :          1 :                 struct spdk_nvmf_rdma_request *rdma_req_inv;
     665                 :            :                 /* construct an opcode that specifies BIDIRECTIONAL transfer */
     666                 :          6 :                 uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL;
     667                 :            : 
     668                 :          6 :                 rdma_recv_inv = create_recv(&rqpair, opc);
     669                 :          6 :                 rdma_req_inv = create_req(&rqpair, rdma_recv_inv);
     670                 :            : 
     671                 :            :                 /* NEW -> RDMA_REQUEST_STATE_COMPLETING */
     672         [ +  - ]:          6 :                 rqpair.current_recv_depth = 1;
     673                 :          6 :                 progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv);
     674         [ +  - ]:          6 :                 CU_ASSERT(progress == true);
     675   [ +  -  +  - ]:          6 :                 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING);
     676   [ +  -  +  -  :          6 :                 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
     677   [ +  -  +  -  :          6 :                 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
     678                 :            : 
     679                 :            :                 /* RDMA_REQUEST_STATE_COMPLETED -> FREE */
     680   [ +  -  +  - ]:          6 :                 rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED;
     681                 :          6 :                 nvmf_rdma_request_process(&rtransport, rdma_req_inv);
     682   [ +  -  +  - ]:          6 :                 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE);
     683                 :            : 
     684                 :          6 :                 free_recv(rdma_recv_inv);
     685                 :          6 :                 free_req(rdma_req_inv);
     686                 :          6 :                 poller_reset(&poller, &group);
     687                 :          6 :                 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
     688                 :          1 :         }
     689                 :            : 
     690                 :            :         /* Test 5: Write response waits in queue */
     691                 :            :         {
     692                 :          6 :                 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
     693                 :          6 :                 rdma_req = create_req(&rqpair, rdma_recv);
     694         [ +  - ]:          6 :                 rqpair.current_recv_depth = 1;
     695                 :            :                 /* NEW -> TRANSFERRING_H2C */
     696                 :          6 :                 progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     697         [ +  - ]:          6 :                 CU_ASSERT(progress == true);
     698   [ +  -  +  - ]:          6 :                 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
     699   [ +  -  +  -  :          6 :                 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
                   +  - ]
     700   [ +  -  +  -  :          6 :                 STAILQ_INIT(&poller.qpairs_pending_send);
          +  -  +  -  +  
                      - ]
     701                 :            :                 /* READY_TO_EXECUTE -> EXECUTING */
     702   [ +  -  +  - ]:          6 :                 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
     703                 :          6 :                 progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     704         [ +  - ]:          6 :                 CU_ASSERT(progress == true);
     705   [ +  -  +  - ]:          6 :                 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
     706                 :            :                 /* EXECUTED -> COMPLETING */
     707   [ +  -  +  - ]:          6 :                 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
     708                 :            :                 /* Send queue is full */
     709   [ +  -  +  - ]:          6 :                 rqpair.current_send_depth = rqpair.max_send_depth;
     710                 :          6 :                 progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     711         [ +  - ]:          6 :                 CU_ASSERT(progress == true);
     712   [ +  -  +  - ]:          6 :                 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_READY_TO_COMPLETE_PENDING);
     713   [ +  -  +  - ]:          6 :                 CU_ASSERT(rdma_req == STAILQ_FIRST(&rqpair.pending_rdma_send_queue));
     714                 :            : 
     715                 :            :                 /* Send queue is still full */
     716                 :          6 :                 progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     717         [ +  - ]:          6 :                 CU_ASSERT(progress == false);
     718   [ +  -  +  - ]:          6 :                 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_READY_TO_COMPLETE_PENDING);
     719   [ +  -  +  - ]:          6 :                 CU_ASSERT(rdma_req == STAILQ_FIRST(&rqpair.pending_rdma_send_queue));
     720                 :            : 
     721                 :            :                 /* Slot is available */
     722   [ +  -  +  - ]:          6 :                 rqpair.current_send_depth = rqpair.max_send_depth - 1;
     723                 :          6 :                 progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     724         [ +  - ]:          6 :                 CU_ASSERT(progress == true);
     725   [ +  -  +  - ]:          6 :                 CU_ASSERT(STAILQ_EMPTY(&rqpair.pending_rdma_send_queue));
     726   [ +  -  +  - ]:          6 :                 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
     727   [ +  -  +  - ]:          6 :                 CU_ASSERT(rdma_req->recv == NULL);
     728                 :            :                 /* COMPLETED -> FREE */
     729   [ +  -  +  - ]:          6 :                 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
     730                 :          6 :                 progress = nvmf_rdma_request_process(&rtransport, rdma_req);
     731         [ +  - ]:          6 :                 CU_ASSERT(progress == true);
     732   [ +  -  +  - ]:          6 :                 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
     733                 :            : 
     734                 :          6 :                 free_recv(rdma_recv);
     735                 :          6 :                 free_req(rdma_req);
     736                 :          6 :                 poller_reset(&poller, &group);
     737                 :          6 :                 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
     738                 :            : 
     739                 :            :         }
     740                 :            : 
     741         [ +  - ]:          6 :         spdk_mempool_free(rtransport.data_wr_pool);
     742                 :          6 : }
     743                 :            : 
     744                 :            : #define TEST_GROUPS_COUNT 5
     745                 :            : static void
     746                 :          6 : test_nvmf_rdma_get_optimal_poll_group(void)
     747                 :            : {
     748                 :          6 :         struct spdk_nvmf_rdma_transport rtransport = {};
     749                 :          6 :         struct spdk_nvmf_transport *transport = &rtransport.transport;
     750                 :          6 :         struct spdk_nvmf_rdma_qpair rqpair = {};
     751                 :          5 :         struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT];
     752                 :          5 :         struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT];
     753                 :          1 :         struct spdk_nvmf_transport_poll_group *result;
     754                 :          6 :         struct spdk_nvmf_poll_group group = {};
     755                 :          1 :         uint32_t i;
     756                 :            : 
     757         [ +  - ]:          6 :         rqpair.qpair.transport = transport;
     758   [ +  -  +  -  :          6 :         TAILQ_INIT(&rtransport.poll_groups);
          +  -  +  -  +  
                      - ]
     759                 :            : 
     760         [ +  + ]:         36 :         for (i = 0; i < TEST_GROUPS_COUNT; i++) {
     761   [ +  -  +  -  :         30 :                 groups[i] = nvmf_rdma_poll_group_create(transport, NULL);
                   +  - ]
     762   [ +  -  +  -  :         30 :                 CU_ASSERT(groups[i] != NULL);
                   +  - ]
     763   [ +  -  +  -  :         30 :                 groups[i]->group = &group;
          +  -  +  -  +  
                      - ]
     764   [ +  -  +  -  :         30 :                 rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group);
          +  -  +  -  +  
                -  +  - ]
     765   [ +  -  +  -  :         30 :                 groups[i]->transport = transport;
          +  -  +  -  +  
                      - ]
     766                 :          5 :         }
     767   [ +  -  +  -  :          6 :         CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]);
             +  -  +  - ]
     768   [ +  -  +  -  :          6 :         CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]);
             +  -  +  - ]
     769                 :            : 
     770                 :            :         /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */
     771         [ +  + ]:         36 :         for (i = 0; i < TEST_GROUPS_COUNT; i++) {
     772         [ +  - ]:         30 :                 rqpair.qpair.qid = 0;
     773                 :         30 :                 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
     774   [ +  -  +  -  :         30 :                 CU_ASSERT(result == groups[i]);
                   +  - ]
     775   [ +  -  +  -  :         30 :                 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
          +  -  +  -  +  
                -  +  - ]
     776   [ +  -  +  -  :         30 :                 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]);
          +  -  +  -  +  
                      - ]
     777                 :            : 
     778         [ +  - ]:         30 :                 rqpair.qpair.qid = 1;
     779                 :         30 :                 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
     780   [ +  -  +  -  :         30 :                 CU_ASSERT(result == groups[i]);
                   +  - ]
     781   [ +  -  +  -  :         30 :                 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
          +  -  +  -  +  
                -  +  - ]
     782   [ +  -  +  -  :         30 :                 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
          +  -  +  -  +  
                -  +  - ]
     783                 :          5 :         }
     784                 :            :         /* wrap around, admin/io pg point to the first pg
     785                 :            :            Destroy all poll groups except of the last one */
     786         [ +  + ]:         30 :         for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) {
     787   [ +  -  +  -  :         24 :                 nvmf_rdma_poll_group_destroy(groups[i]);
                   +  - ]
     788   [ +  -  +  -  :         24 :                 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]);
          +  -  +  -  +  
                      - ]
     789   [ +  -  +  -  :         24 :                 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]);
          +  -  +  -  +  
                      - ]
     790                 :          4 :         }
     791                 :            : 
     792   [ +  -  +  -  :          6 :         CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
          +  -  +  -  +  
                      - ]
     793   [ +  -  +  -  :          6 :         CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
          +  -  +  -  +  
                      - ]
     794                 :            : 
     795                 :            :         /* Check that pointers to the next admin/io poll groups are not changed */
     796         [ +  - ]:          6 :         rqpair.qpair.qid = 0;
     797                 :          6 :         result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
     798   [ +  -  +  -  :          6 :         CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
                   +  - ]
     799   [ +  -  +  -  :          6 :         CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
          +  -  +  -  +  
                      - ]
     800   [ +  -  +  -  :          6 :         CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
          +  -  +  -  +  
                      - ]
     801                 :            : 
     802         [ +  - ]:          6 :         rqpair.qpair.qid = 1;
     803                 :          6 :         result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
     804   [ +  -  +  -  :          6 :         CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
                   +  - ]
     805   [ +  -  +  -  :          6 :         CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
          +  -  +  -  +  
                      - ]
     806   [ +  -  +  -  :          6 :         CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
          +  -  +  -  +  
                      - ]
     807                 :            : 
     808                 :            :         /* Remove the last poll group, check that pointers are NULL */
     809   [ +  -  +  -  :          6 :         nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]);
                   +  - ]
     810   [ +  -  +  - ]:          6 :         CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL);
     811   [ +  -  +  - ]:          6 :         CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL);
     812                 :            : 
     813                 :            :         /* Request optimal poll group, result must be NULL */
     814         [ +  - ]:          6 :         rqpair.qpair.qid = 0;
     815                 :          6 :         result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
     816                 :          6 :         CU_ASSERT(result == NULL);
     817                 :            : 
     818         [ +  - ]:          6 :         rqpair.qpair.qid = 1;
     819                 :          6 :         result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
     820                 :          6 :         CU_ASSERT(result == NULL);
     821                 :          6 : }
     822                 :            : #undef TEST_GROUPS_COUNT
     823                 :            : 
     824                 :            : static void
     825                 :          6 : test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
     826                 :            : {
     827                 :          5 :         struct spdk_nvmf_rdma_transport rtransport;
     828                 :          5 :         struct spdk_nvmf_rdma_device device;
     829                 :          6 :         struct spdk_nvmf_rdma_request rdma_req = {};
     830                 :          5 :         struct spdk_nvmf_rdma_recv recv;
     831                 :          5 :         struct spdk_nvmf_rdma_poll_group group;
     832                 :          5 :         struct spdk_nvmf_rdma_qpair rqpair;
     833                 :          5 :         struct spdk_nvmf_rdma_poller poller;
     834                 :          5 :         union nvmf_c2h_msg cpl;
     835                 :          5 :         union nvmf_h2c_msg cmd;
     836                 :          1 :         struct spdk_nvme_sgl_descriptor *sgl;
     837                 :          6 :         struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
     838                 :          5 :         char data_buffer[8192];
     839                 :          6 :         struct spdk_nvmf_rdma_request_data *data = (struct spdk_nvmf_rdma_request_data *)data_buffer;
     840                 :          5 :         char data2_buffer[8192];
     841                 :          6 :         struct spdk_nvmf_rdma_request_data *data2 = (struct spdk_nvmf_rdma_request_data *)data2_buffer;
     842                 :          6 :         const uint32_t data_bs = 512;
     843                 :          6 :         const uint32_t md_size = 8;
     844                 :          1 :         int rc, i;
     845                 :          5 :         struct spdk_dif_ctx_init_ext_opts dif_opts;
     846                 :            : 
     847                 :          6 :         MOCK_CLEAR(spdk_mempool_get);
     848                 :          6 :         MOCK_CLEAR(spdk_iobuf_get);
     849                 :            : 
     850   [ +  -  +  -  :          6 :         data->wr.sg_list = data->sgl;
             +  -  +  - ]
     851                 :          6 :         group.group.transport = &rtransport.transport;
     852         [ +  - ]:          6 :         poller.group = &group;
     853         [ +  - ]:          6 :         rqpair.poller = &poller;
     854         [ +  - ]:          6 :         rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
     855                 :            : 
     856         [ +  - ]:          6 :         sgl = &cmd.nvme_cmd.dptr.sgl1;
     857         [ +  - ]:          6 :         rdma_req.recv = &recv;
     858         [ +  - ]:          6 :         rdma_req.req.cmd = &cmd;
     859         [ +  - ]:          6 :         rdma_req.req.rsp = &cpl;
     860   [ +  -  +  -  :          6 :         rdma_req.data.wr.sg_list = rdma_req.data.sgl;
             +  -  +  - ]
     861                 :          6 :         rdma_req.req.qpair = &rqpair.qpair;
     862         [ +  - ]:          6 :         rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
     863                 :            : 
     864                 :          6 :         rtransport.transport.opts = g_rdma_ut_transport_opts;
     865         [ +  - ]:          6 :         rtransport.data_wr_pool = NULL;
     866                 :            : 
     867         [ +  - ]:          6 :         device.attr.device_cap_flags = 0;
     868         [ +  - ]:          6 :         device.map = NULL;
     869   [ +  -  +  -  :          6 :         sgl->keyed.key = 0xEEEE;
                   +  - ]
     870   [ +  -  +  - ]:          6 :         sgl->address = 0xFFFF;
     871   [ +  -  +  -  :          6 :         rdma_req.recv->buf = (void *)0xDDDD;
                   +  - ]
     872                 :            : 
     873                 :            :         /* Test 1: sgl type: keyed data block subtype: address */
     874   [ +  -  +  -  :          6 :         sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
                   +  - ]
     875   [ +  -  +  -  :          6 :         sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
                   +  - ]
     876                 :            : 
     877                 :            :         /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */
     878                 :          6 :         MOCK_SET(spdk_iobuf_get, (void *)0x2000);
     879                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     880                 :          6 :         dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
     881         [ +  - ]:          6 :         dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
     882         [ +  - ]:          7 :         spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
     883   [ +  -  +  - ]:          1 :                           SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
     884                 :            :                           0, 0, 0, 0, 0, &dif_opts);
     885   [ +  -  +  - ]:          6 :         rdma_req.req.dif_enabled = true;
     886   [ +  -  +  - ]:          6 :         rtransport.transport.opts.io_unit_size = data_bs * 8;
     887   [ +  -  +  - ]:          6 :         rdma_req.req.qpair->transport = &rtransport.transport;
     888   [ +  -  +  -  :          6 :         sgl->keyed.length = data_bs * 4;
                   +  - ]
     889                 :            : 
     890                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     891                 :            : 
     892                 :          6 :         CU_ASSERT(rc == 0);
     893   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
     894         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.length == data_bs * 4);
     895   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
                   +  - ]
     896   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
     897         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 1);
     898   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
     899   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 1);
                   +  - ]
     900   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
          +  -  +  -  +  
                      - ]
     901   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
          +  -  +  -  +  
                      - ]
     902   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
     903                 :            : 
     904   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
          +  -  +  -  +  
                -  +  - ]
     905   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rdma_req.req.length);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
     906   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
     907                 :            : 
     908                 :            :         /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size,
     909                 :            :                 block size 512 */
     910                 :          6 :         MOCK_SET(spdk_iobuf_get, (void *)0x2000);
     911                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     912         [ +  - ]:          7 :         spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
     913   [ +  -  +  - ]:          1 :                           SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
     914                 :            :                           0, 0, 0, 0, 0, &dif_opts);
     915   [ +  -  +  - ]:          6 :         rdma_req.req.dif_enabled = true;
     916   [ +  -  +  - ]:          6 :         rtransport.transport.opts.io_unit_size = data_bs * 4;
     917   [ +  -  +  -  :          6 :         sgl->keyed.length = data_bs * 4;
                   +  - ]
     918                 :            : 
     919                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     920                 :            : 
     921                 :          6 :         CU_ASSERT(rc == 0);
     922   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
     923         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.length == data_bs * 4);
     924   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
                   +  - ]
     925   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
     926         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 2);
     927   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
     928   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 5);
                   +  - ]
     929   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
          +  -  +  -  +  
                      - ]
     930   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
          +  -  +  -  +  
                      - ]
     931   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
     932                 :            : 
     933   [ +  +  +  - ]:         24 :         for (i = 0; i < 3; ++i) {
     934   [ +  -  +  -  :         18 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
          +  -  +  -  +  
                -  +  - ]
     935   [ +  -  +  -  :         18 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
          +  -  +  -  +  
                -  +  - ]
     936   [ +  -  +  -  :         18 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
     937                 :          3 :         }
     938   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
          +  -  +  -  +  
                -  +  - ]
     939   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
          +  -  +  -  +  
                -  +  - ]
     940   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
     941                 :            : 
     942                 :            :         /* 2nd buffer consumed */
     943   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
          +  -  +  -  +  
                -  +  - ]
     944   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
          +  -  +  -  +  
                -  +  - ]
     945   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
     946                 :            : 
     947                 :            :         /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */
     948                 :          6 :         MOCK_SET(spdk_iobuf_get, (void *)0x2000);
     949                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     950         [ +  - ]:          7 :         spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
     951   [ +  -  +  - ]:          1 :                           SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
     952                 :            :                           0, 0, 0, 0, 0, &dif_opts);
     953   [ +  -  +  - ]:          6 :         rdma_req.req.dif_enabled = true;
     954   [ +  -  +  - ]:          6 :         rtransport.transport.opts.io_unit_size = data_bs;
     955   [ +  -  +  -  :          6 :         sgl->keyed.length = data_bs;
                   +  - ]
     956                 :            : 
     957                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     958                 :            : 
     959                 :          6 :         CU_ASSERT(rc == 0);
     960   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
     961         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.length == data_bs);
     962   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
                   +  - ]
     963   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size);
     964         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 2);
     965   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
     966   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 1);
                   +  - ]
     967   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
          +  -  +  -  +  
                      - ]
     968   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
          +  -  +  -  +  
                      - ]
     969   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
     970                 :            : 
     971   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
          +  -  +  -  +  
                -  +  - ]
     972   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs);
          +  -  +  -  +  
                -  +  - ]
     973   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
     974                 :            : 
     975         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 2);
     976   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000));
             +  -  +  - ]
     977   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs);
             +  -  +  - ]
     978                 :            :         /* 2nd buffer consumed for metadata */
     979   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000));
             +  -  +  - ]
     980   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size);
             +  -  +  - ]
     981                 :            : 
     982                 :            :         /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size,
     983                 :            :            block size 512 */
     984                 :          6 :         MOCK_SET(spdk_iobuf_get, (void *)0x2000);
     985                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
     986         [ +  - ]:          7 :         spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
     987   [ +  -  +  - ]:          1 :                           SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
     988                 :            :                           0, 0, 0, 0, 0, &dif_opts);
     989   [ +  -  +  - ]:          6 :         rdma_req.req.dif_enabled = true;
     990   [ +  -  +  - ]:          6 :         rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
     991   [ +  -  +  -  :          6 :         sgl->keyed.length = data_bs * 4;
                   +  - ]
     992                 :            : 
     993                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
     994                 :            : 
     995                 :          6 :         CU_ASSERT(rc == 0);
     996   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
     997         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.length == data_bs * 4);
     998   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
                   +  - ]
     999   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
    1000         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 1);
    1001   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
    1002   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 1);
                   +  - ]
    1003   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
          +  -  +  -  +  
                      - ]
    1004   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
          +  -  +  -  +  
                      - ]
    1005   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
    1006                 :            : 
    1007   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
          +  -  +  -  +  
                -  +  - ]
    1008   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rdma_req.req.length);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1009   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
    1010                 :            : 
    1011                 :            :         /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size,
    1012                 :            :            block size 512 */
    1013                 :          6 :         MOCK_SET(spdk_iobuf_get, (void *)0x2000);
    1014                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
    1015         [ +  - ]:          7 :         spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
    1016   [ +  -  +  - ]:          1 :                           SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
    1017                 :            :                           0, 0, 0, 0, 0, &dif_opts);
    1018   [ +  -  +  - ]:          6 :         rdma_req.req.dif_enabled = true;
    1019   [ +  -  +  - ]:          6 :         rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2;
    1020   [ +  -  +  -  :          6 :         sgl->keyed.length = data_bs * 4;
                   +  - ]
    1021                 :            : 
    1022                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
    1023                 :            : 
    1024                 :          6 :         CU_ASSERT(rc == 0);
    1025   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
    1026         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.length == data_bs * 4);
    1027   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
                   +  - ]
    1028   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
    1029         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 2);
    1030   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
    1031   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 2);
                   +  - ]
    1032   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
          +  -  +  -  +  
                      - ]
    1033   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
          +  -  +  -  +  
                      - ]
    1034   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
    1035                 :            : 
    1036   [ +  +  +  - ]:         18 :         for (i = 0; i < 2; ++i) {
    1037   [ +  -  +  -  :         12 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
          +  -  +  -  +  
                -  +  - ]
    1038   [ +  -  +  -  :         12 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs * 2);
          +  -  +  -  +  
                -  +  - ]
    1039                 :          2 :         }
    1040                 :            : 
    1041                 :            :         /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size,
    1042                 :            :            block size 512 */
    1043                 :          6 :         MOCK_SET(spdk_iobuf_get, (void *)0x2000);
    1044                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
    1045         [ +  - ]:          7 :         spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
    1046   [ +  -  +  - ]:          1 :                           SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
    1047                 :            :                           0, 0, 0, 0, 0, &dif_opts);
    1048   [ +  -  +  - ]:          6 :         rdma_req.req.dif_enabled = true;
    1049   [ +  -  +  - ]:          6 :         rtransport.transport.opts.io_unit_size = data_bs * 4;
    1050   [ +  -  +  -  :          6 :         sgl->keyed.length = data_bs * 6;
                   +  - ]
    1051                 :            : 
    1052                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
    1053                 :            : 
    1054                 :          6 :         CU_ASSERT(rc == 0);
    1055   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
    1056         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.length == data_bs * 6);
    1057   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
                   +  - ]
    1058   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6);
    1059         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 2);
    1060   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
    1061   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 7);
                   +  - ]
    1062   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
          +  -  +  -  +  
                      - ]
    1063   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
          +  -  +  -  +  
                      - ]
    1064   [ +  -  +  -  :          6 :         CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
             +  -  +  - ]
    1065                 :            : 
    1066   [ +  +  +  - ]:         24 :         for (i = 0; i < 3; ++i) {
    1067   [ +  -  +  -  :         18 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
          +  -  +  -  +  
                -  +  - ]
    1068   [ +  -  +  -  :         18 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
          +  -  +  -  +  
                -  +  - ]
    1069   [ +  -  +  -  :         18 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
    1070                 :          3 :         }
    1071   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
          +  -  +  -  +  
                -  +  - ]
    1072   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
          +  -  +  -  +  
                -  +  - ]
    1073   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
    1074                 :            : 
    1075                 :            :         /* 2nd IO buffer consumed */
    1076   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
          +  -  +  -  +  
                -  +  - ]
    1077   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
          +  -  +  -  +  
                -  +  - ]
    1078   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
    1079                 :            : 
    1080   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size);
          +  -  +  -  +  
                -  +  - ]
    1081   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512);
          +  -  +  -  +  
                -  +  - ]
    1082   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
    1083                 :            : 
    1084   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2);
          +  -  +  -  +  
                -  +  - ]
    1085   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512);
          +  -  +  -  +  
                -  +  - ]
    1086   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
    1087                 :            : 
    1088                 :            :         /* Part 7: simple I/O, number of SGL entries exceeds the number of entries
    1089                 :            :            one WR can hold. Additional WR is chained */
    1090                 :          6 :         MOCK_SET(spdk_iobuf_get, data2_buffer);
    1091                 :          6 :         MOCK_SET(spdk_mempool_get, data2_buffer);
    1092                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
    1093         [ +  - ]:          7 :         spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
    1094   [ +  -  +  - ]:          1 :                           SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
    1095                 :            :                           0, 0, 0, 0, 0, &dif_opts);
    1096   [ +  -  +  - ]:          6 :         rdma_req.req.dif_enabled = true;
    1097   [ +  -  +  - ]:          6 :         rtransport.transport.opts.io_unit_size = data_bs * 16;
    1098   [ +  -  +  -  :          6 :         sgl->keyed.length = data_bs * 16;
                   +  - ]
    1099                 :            : 
    1100                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
    1101                 :            : 
    1102                 :          6 :         CU_ASSERT(rc == 0);
    1103   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
    1104         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.length == data_bs * 16);
    1105         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 2);
    1106   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
                   +  - ]
    1107   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16);
    1108   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.iov[0].iov_base == data2_buffer);
             +  -  +  - ]
    1109   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 16);
                   +  - ]
    1110   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
          +  -  +  -  +  
                      - ]
    1111   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
          +  -  +  -  +  
                      - ]
    1112                 :            : 
    1113   [ +  +  +  - ]:         96 :         for (i = 0; i < 15; ++i) {
    1114   [ +  -  +  -  :         90 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)data2_buffer + i * (data_bs + md_size));
          +  -  +  -  +  
                -  +  - ]
    1115   [ +  -  +  -  :         90 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
          +  -  +  -  +  
                -  +  - ]
    1116   [ +  -  +  -  :         90 :                 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
    1117                 :         15 :         }
    1118                 :            : 
    1119                 :            :         /* 8192 - (512 + 8) * 15 = 392 */
    1120   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)data2_buffer + i * (data_bs + md_size));
          +  -  +  -  +  
                -  +  - ]
    1121   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[i].length == 392);
          +  -  +  -  +  
                -  +  - ]
    1122   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
    1123                 :            : 
    1124                 :            :         /* additional wr from pool */
    1125   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.next == (void *)&data2->wr);
             +  -  +  - ]
    1126   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.next->num_sge == 1);
          +  -  +  -  +  
                      - ]
    1127   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr);
          +  -  +  -  +  
                -  +  - ]
    1128                 :            :         /* 2nd IO buffer */
    1129   [ +  -  +  -  :          6 :         CU_ASSERT(data2->wr.sg_list[0].addr == (uintptr_t)data2_buffer);
          +  -  +  -  +  
                -  +  - ]
    1130   [ +  -  +  -  :          6 :         CU_ASSERT(data2->wr.sg_list[0].length == 120);
          +  -  +  -  +  
                -  +  - ]
    1131   [ +  -  +  -  :          6 :         CU_ASSERT(data2->wr.sg_list[0].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
    1132                 :            : 
    1133                 :            :         /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */
    1134                 :          6 :         MOCK_SET(spdk_iobuf_get, (void *)0x2000);
    1135                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
    1136         [ +  - ]:          7 :         spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
    1137   [ +  -  +  - ]:          1 :                           SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
    1138                 :            :                           0, 0, 0, 0, 0, &dif_opts);
    1139   [ +  -  +  - ]:          6 :         rdma_req.req.dif_enabled = true;
    1140   [ +  -  +  - ]:          6 :         rtransport.transport.opts.io_unit_size = 516;
    1141   [ +  -  +  -  :          6 :         sgl->keyed.length = data_bs * 2;
                   +  - ]
    1142                 :            : 
    1143                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
    1144                 :            : 
    1145                 :          6 :         CU_ASSERT(rc == 0);
    1146   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
    1147         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.length == data_bs * 2);
    1148         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.iovcnt == 3);
    1149   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
                   +  - ]
    1150   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2);
    1151   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)0x2000);
             +  -  +  - ]
    1152   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 2);
                   +  - ]
    1153   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
          +  -  +  -  +  
                      - ]
    1154   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
          +  -  +  -  +  
                      - ]
    1155                 :            : 
    1156   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
          +  -  +  -  +  
                -  +  - ]
    1157   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512);
          +  -  +  -  +  
                -  +  - ]
    1158   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
    1159                 :            : 
    1160                 :            :         /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata
    1161                 :            :           is located at the beginning of that buffer */
    1162   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4);
          +  -  +  -  +  
                -  +  - ]
    1163   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512);
          +  -  +  -  +  
                -  +  - ]
    1164   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                -  +  - ]
    1165                 :            : 
    1166                 :            :         /* Test 2: Multi SGL */
    1167   [ +  -  +  -  :          6 :         sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
                   +  - ]
    1168   [ +  -  +  -  :          6 :         sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
                   +  - ]
    1169   [ +  -  +  - ]:          6 :         sgl->address = 0;
    1170   [ +  -  +  -  :          6 :         rdma_req.recv->buf = (void *)&sgl_desc;
                   +  - ]
    1171                 :          6 :         MOCK_SET(spdk_mempool_get, data_buffer);
    1172                 :          6 :         MOCK_SET(spdk_iobuf_get, data_buffer);
    1173                 :            : 
    1174                 :            :         /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */
    1175                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
    1176         [ +  - ]:          7 :         spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
    1177                 :            :                           SPDK_DIF_TYPE1,
    1178   [ +  -  +  - ]:          1 :                           SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
    1179                 :            :                           0, 0, 0, 0, 0, &dif_opts);
    1180   [ +  -  +  - ]:          6 :         rdma_req.req.dif_enabled = true;
    1181   [ +  -  +  - ]:          6 :         rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
    1182   [ +  -  +  -  :          6 :         sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
             +  -  +  - ]
    1183                 :            : 
    1184   [ +  +  +  - ]:         18 :         for (i = 0; i < 2; i++) {
    1185   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
          +  -  +  -  +  
                      - ]
    1186   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
          +  -  +  -  +  
                      - ]
    1187   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.length = data_bs * 4;
          +  -  +  -  +  
                      - ]
    1188   [ +  -  +  -  :         12 :                 sgl_desc[i].address = 0x4000 + i * data_bs * 4;
             +  -  +  - ]
    1189   [ +  -  +  -  :         12 :                 sgl_desc[i].keyed.key = 0x44;
          +  -  +  -  +  
                      - ]
    1190                 :          2 :         }
    1191                 :            : 
    1192                 :          6 :         rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
    1193                 :            : 
    1194                 :          6 :         CU_ASSERT(rc == 0);
    1195   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.data_from_pool == true);
    1196         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2);
    1197   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
                   +  - ]
    1198   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2);
    1199   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 1);
                   +  - ]
    1200   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (uintptr_t)(data_buffer));
          +  -  +  -  +  
                -  +  - ]
    1201   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs * 4);
          +  -  +  -  +  
                -  +  - ]
    1202                 :            : 
    1203   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
          +  -  +  -  +  
                      - ]
    1204   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
          +  -  +  -  +  
                      - ]
    1205   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.next == &data->wr);
             +  -  +  - ]
    1206   [ +  -  +  -  :          6 :         CU_ASSERT(data->wr.wr.rdma.rkey == 0x44);
          +  -  +  -  +  
                      - ]
    1207   [ +  -  +  -  :          6 :         CU_ASSERT(data->wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4);
          +  -  +  -  +  
                      - ]
    1208   [ +  -  +  -  :          6 :         CU_ASSERT(data->wr.num_sge == 1);
                   +  - ]
    1209   [ +  -  +  -  :          6 :         CU_ASSERT(data->wr.sg_list[0].addr == (uintptr_t)(data_buffer));
          +  -  +  -  +  
                -  +  - ]
    1210   [ +  -  +  -  :          6 :         CU_ASSERT(data->wr.sg_list[0].length == data_bs * 4);
          +  -  +  -  +  
                -  +  - ]
    1211                 :            : 
    1212   [ +  -  +  -  :          6 :         CU_ASSERT(data->wr.next == &rdma_req.rsp.wr);
             +  -  +  - ]
    1213                 :          6 :         reset_nvmf_rdma_request(&rdma_req);
    1214                 :          6 : }
    1215                 :            : 
    1216                 :            : static void
    1217                 :          6 : test_nvmf_rdma_opts_init(void)
    1218                 :            : {
    1219                 :          6 :         struct spdk_nvmf_transport_opts opts = {};
    1220                 :            : 
    1221                 :          6 :         nvmf_rdma_opts_init(&opts);
    1222                 :          6 :         CU_ASSERT(opts.max_queue_depth == SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH);
    1223                 :          6 :         CU_ASSERT(opts.max_qpairs_per_ctrlr ==  SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR);
    1224                 :          6 :         CU_ASSERT(opts.in_capsule_data_size ==  SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE);
    1225                 :          6 :         CU_ASSERT(opts.max_io_size == SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE);
    1226                 :          6 :         CU_ASSERT(opts.io_unit_size == SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE);
    1227                 :          6 :         CU_ASSERT(opts.max_aq_depth == SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH);
    1228                 :          6 :         CU_ASSERT(opts.num_shared_buffers == SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS);
    1229                 :          6 :         CU_ASSERT(opts.buf_cache_size == SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE);
    1230         [ +  + ]:          6 :         CU_ASSERT(opts.dif_insert_or_strip == SPDK_NVMF_RDMA_DIF_INSERT_OR_STRIP);
    1231                 :          6 :         CU_ASSERT(opts.abort_timeout_sec == SPDK_NVMF_RDMA_DEFAULT_ABORT_TIMEOUT_SEC);
    1232                 :          6 :         CU_ASSERT(opts.transport_specific == NULL);
    1233                 :          6 : }
    1234                 :            : 
    1235                 :            : static void
    1236                 :          6 : test_nvmf_rdma_request_free_data(void)
    1237                 :            : {
    1238                 :          6 :         struct spdk_nvmf_rdma_request rdma_req = {};
    1239                 :          6 :         struct spdk_nvmf_rdma_transport rtransport = {};
    1240                 :          6 :         struct spdk_nvmf_rdma_request_data *next_request_data = NULL;
    1241                 :            : 
    1242                 :          6 :         MOCK_CLEAR(spdk_mempool_get);
    1243         [ +  - ]:          6 :         rtransport.data_wr_pool = spdk_mempool_create("spdk_nvmf_rdma_wr_data",
    1244                 :            :                                   SPDK_NVMF_MAX_SGL_ENTRIES,
    1245                 :            :                                   sizeof(struct spdk_nvmf_rdma_request_data),
    1246                 :            :                                   SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
    1247                 :            :                                   SPDK_ENV_SOCKET_ID_ANY);
    1248         [ +  - ]:          6 :         next_request_data = spdk_mempool_get(rtransport.data_wr_pool);
    1249   [ +  +  +  -  :          6 :         SPDK_CU_ASSERT_FATAL(((struct test_mempool *)rtransport.data_wr_pool)->count ==
          +  -  +  -  #  
                      # ]
    1250                 :            :                              SPDK_NVMF_MAX_SGL_ENTRIES - 1);
    1251   [ +  -  +  -  :          6 :         next_request_data->wr.wr_id = (uint64_t)&rdma_req.data_wr;
                   +  - ]
    1252   [ +  -  +  -  :          6 :         next_request_data->wr.num_sge = 2;
                   +  - ]
    1253   [ +  -  +  -  :          6 :         next_request_data->wr.next = NULL;
                   +  - ]
    1254   [ +  -  +  -  :          6 :         rdma_req.data.wr.next = &next_request_data->wr;
             +  -  +  - ]
    1255   [ +  -  +  -  :          6 :         rdma_req.data.wr.wr_id = (uint64_t)&rdma_req.data_wr;
                   +  - ]
    1256   [ +  -  +  -  :          6 :         rdma_req.data.wr.num_sge = 2;
                   +  - ]
    1257   [ +  -  +  - ]:          6 :         rdma_req.transfer_wr = &rdma_req.data.wr;
    1258                 :            : 
    1259                 :          6 :         nvmf_rdma_request_free_data(&rdma_req, &rtransport);
    1260                 :            :         /* Check if next_request_data put into memory pool */
    1261   [ +  -  +  -  :          6 :         CU_ASSERT(((struct test_mempool *)rtransport.data_wr_pool)->count == SPDK_NVMF_MAX_SGL_ENTRIES);
                   +  - ]
    1262   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.data.wr.num_sge == 0);
                   +  - ]
    1263                 :            : 
    1264         [ +  - ]:          6 :         spdk_mempool_free(rtransport.data_wr_pool);
    1265                 :          6 : }
    1266                 :            : 
    1267                 :            : static void
    1268                 :          6 : test_nvmf_rdma_update_ibv_state(void)
    1269                 :            : {
    1270                 :          6 :         struct spdk_nvmf_rdma_qpair rqpair = {};
    1271                 :          6 :         struct spdk_rdma_qp rdma_qp = {};
    1272                 :          6 :         struct ibv_qp qp = {};
    1273                 :          6 :         int rc = 0;
    1274                 :            : 
    1275         [ +  - ]:          6 :         rqpair.rdma_qp = &rdma_qp;
    1276                 :            : 
    1277                 :            :         /* Case 1: Failed to get updated RDMA queue pair state */
    1278         [ +  - ]:          6 :         rqpair.ibv_state = IBV_QPS_INIT;
    1279   [ +  -  +  -  :          6 :         rqpair.rdma_qp->qp = NULL;
                   +  - ]
    1280                 :            : 
    1281                 :          6 :         rc = nvmf_rdma_update_ibv_state(&rqpair);
    1282                 :          6 :         CU_ASSERT(rc == IBV_QPS_ERR + 1);
    1283                 :            : 
    1284                 :            :         /* Case 2: Bad state updated */
    1285   [ +  -  +  -  :          6 :         rqpair.rdma_qp->qp = &qp;
                   +  - ]
    1286         [ +  - ]:          6 :         qp.state = IBV_QPS_ERR;
    1287                 :          6 :         rc = nvmf_rdma_update_ibv_state(&rqpair);
    1288         [ +  - ]:          6 :         CU_ASSERT(rqpair.ibv_state == 10);
    1289                 :          6 :         CU_ASSERT(rc == IBV_QPS_ERR + 1);
    1290                 :            : 
    1291                 :            :         /* Case 3: Pass */
    1292         [ +  - ]:          6 :         qp.state = IBV_QPS_INIT;
    1293                 :          6 :         rc = nvmf_rdma_update_ibv_state(&rqpair);
    1294         [ +  - ]:          6 :         CU_ASSERT(rqpair.ibv_state == IBV_QPS_INIT);
    1295                 :          6 :         CU_ASSERT(rc == IBV_QPS_INIT);
    1296                 :          6 : }
    1297                 :            : 
    1298                 :            : static void
    1299                 :          6 : test_nvmf_rdma_resources_create(void)
    1300                 :            : {
    1301                 :            :         static struct spdk_nvmf_rdma_resources *rdma_resource;
    1302                 :          6 :         struct spdk_nvmf_rdma_resource_opts opts = {};
    1303                 :          6 :         struct spdk_nvmf_rdma_qpair qpair = {};
    1304                 :          6 :         struct spdk_nvmf_rdma_recv *recv = NULL;
    1305                 :          6 :         struct spdk_nvmf_rdma_request *req = NULL;
    1306                 :          6 :         const int DEPTH = 128;
    1307                 :            : 
    1308         [ +  - ]:          6 :         opts.max_queue_depth = DEPTH;
    1309         [ +  - ]:          6 :         opts.in_capsule_data_size = 4096;
    1310         [ +  - ]:          6 :         opts.shared = true;
    1311                 :          6 :         opts.qpair = &qpair;
    1312                 :            : 
    1313                 :          6 :         rdma_resource = nvmf_rdma_resources_create(&opts);
    1314                 :          6 :         CU_ASSERT(rdma_resource != NULL);
    1315                 :            :         /* Just check first and last entry */
    1316   [ +  -  +  -  :          6 :         recv = &rdma_resource->recvs[0];
                   +  - ]
    1317   [ +  -  +  -  :          6 :         req = &rdma_resource->reqs[0];
                   +  - ]
    1318   [ +  -  +  -  :          6 :         CU_ASSERT(recv->rdma_wr.type == RDMA_WR_TYPE_RECV);
                   +  - ]
    1319   [ +  -  +  -  :          6 :         CU_ASSERT((uintptr_t)recv->buf == (uintptr_t)(rdma_resource->bufs));
             +  -  +  - ]
    1320   [ +  -  +  -  :          6 :         CU_ASSERT(recv->sgl[0].addr == (uintptr_t)&rdma_resource->cmds[0]);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
    1321   [ +  -  +  -  :          6 :         CU_ASSERT(recv->sgl[0].length == sizeof(rdma_resource->cmds[0]));
          +  -  +  -  +  
                      - ]
    1322   [ +  -  +  -  :          6 :         CU_ASSERT(recv->sgl[0].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                      - ]
    1323   [ +  -  +  -  :          6 :         CU_ASSERT(recv->wr.num_sge == 2);
                   +  - ]
    1324   [ +  -  +  -  :          6 :         CU_ASSERT(recv->wr.wr_id == (uintptr_t)&rdma_resource->recvs[0].rdma_wr);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1325   [ +  -  +  -  :          6 :         CU_ASSERT(recv->wr.sg_list == rdma_resource->recvs[0].sgl);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1326   [ +  -  +  -  :          6 :         CU_ASSERT(req->req.rsp == &rdma_resource->cpls[0]);
          +  -  +  -  +  
                -  +  - ]
    1327   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.sgl[0].addr == (uintptr_t)&rdma_resource->cpls[0]);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
    1328   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.sgl[0].length == sizeof(rdma_resource->cpls[0]));
          +  -  +  -  +  
                      - ]
    1329   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.sgl[0].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                      - ]
    1330   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp_wr.type == RDMA_WR_TYPE_SEND);
                   +  - ]
    1331   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.wr.wr_id == (uintptr_t)&rdma_resource->reqs[0].rsp_wr);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
    1332   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.wr.next == NULL);
             +  -  +  - ]
    1333   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.wr.opcode == IBV_WR_SEND);
             +  -  +  - ]
    1334   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.wr.send_flags == IBV_SEND_SIGNALED);
             +  -  +  - ]
    1335   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.wr.sg_list == rdma_resource->reqs[0].rsp.sgl);
          +  -  +  -  +  
          -  +  -  +  -  
             +  -  +  - ]
    1336   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.wr.num_sge == NVMF_DEFAULT_RSP_SGE);
             +  -  +  - ]
    1337   [ +  -  +  -  :          6 :         CU_ASSERT(req->data_wr.type == RDMA_WR_TYPE_DATA);
                   +  - ]
    1338   [ +  -  +  -  :          6 :         CU_ASSERT(req->data.wr.wr_id == (uintptr_t)&rdma_resource->reqs[0].data_wr);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
    1339   [ +  -  +  -  :          6 :         CU_ASSERT(req->data.wr.next == NULL);
             +  -  +  - ]
    1340   [ +  -  +  -  :          6 :         CU_ASSERT(req->data.wr.send_flags == IBV_SEND_SIGNALED);
             +  -  +  - ]
    1341   [ +  -  +  -  :          6 :         CU_ASSERT(req->data.wr.sg_list == rdma_resource->reqs[0].data.sgl);
          +  -  +  -  +  
          -  +  -  +  -  
             +  -  +  - ]
    1342   [ +  -  +  -  :          6 :         CU_ASSERT(req->data.wr.num_sge == SPDK_NVMF_MAX_SGL_ENTRIES);
             +  -  +  - ]
    1343   [ +  -  +  - ]:          6 :         CU_ASSERT(req->state == RDMA_REQUEST_STATE_FREE);
    1344                 :            : 
    1345   [ +  -  +  -  :          6 :         recv = &rdma_resource->recvs[DEPTH - 1];
                   +  - ]
    1346   [ +  -  +  -  :          6 :         req = &rdma_resource->reqs[DEPTH - 1];
                   +  - ]
    1347   [ +  -  +  -  :          6 :         CU_ASSERT(recv->rdma_wr.type == RDMA_WR_TYPE_RECV);
                   +  - ]
    1348   [ +  -  +  -  :          6 :         CU_ASSERT((uintptr_t)recv->buf == (uintptr_t)(rdma_resource->bufs +
             +  -  +  - ]
    1349                 :            :                         (DEPTH - 1) * 4096));
    1350   [ +  -  +  -  :          6 :         CU_ASSERT(recv->sgl[0].addr == (uintptr_t)&rdma_resource->cmds[DEPTH - 1]);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
    1351   [ +  -  +  -  :          6 :         CU_ASSERT(recv->sgl[0].length == sizeof(rdma_resource->cmds[DEPTH - 1]));
          +  -  +  -  +  
                      - ]
    1352   [ +  -  +  -  :          6 :         CU_ASSERT(recv->sgl[0].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                      - ]
    1353   [ +  -  +  -  :          6 :         CU_ASSERT(recv->wr.num_sge == 2);
                   +  - ]
    1354   [ +  -  +  -  :          6 :         CU_ASSERT(recv->wr.wr_id == (uintptr_t)&rdma_resource->recvs[DEPTH - 1].rdma_wr);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1355   [ +  -  +  -  :          6 :         CU_ASSERT(recv->wr.sg_list == rdma_resource->recvs[DEPTH - 1].sgl);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1356   [ +  -  +  -  :          6 :         CU_ASSERT(req->req.rsp == &rdma_resource->cpls[DEPTH - 1]);
          +  -  +  -  +  
                -  +  - ]
    1357   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.sgl[0].addr == (uintptr_t)&rdma_resource->cpls[DEPTH - 1]);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
    1358   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.sgl[0].length == sizeof(rdma_resource->cpls[DEPTH - 1]));
          +  -  +  -  +  
                      - ]
    1359   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.sgl[0].lkey == RDMA_UT_LKEY);
          +  -  +  -  +  
                      - ]
    1360   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp_wr.type == RDMA_WR_TYPE_SEND);
                   +  - ]
    1361   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.wr.wr_id == (uintptr_t)&req->rsp_wr);
          +  -  +  -  +  
                      - ]
    1362   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.wr.next == NULL);
             +  -  +  - ]
    1363   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.wr.opcode == IBV_WR_SEND);
             +  -  +  - ]
    1364   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.wr.send_flags == IBV_SEND_SIGNALED);
             +  -  +  - ]
    1365   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.wr.sg_list == rdma_resource->reqs[DEPTH - 1].rsp.sgl);
          +  -  +  -  +  
          -  +  -  +  -  
             +  -  +  - ]
    1366   [ +  -  +  -  :          6 :         CU_ASSERT(req->rsp.wr.num_sge == NVMF_DEFAULT_RSP_SGE);
             +  -  +  - ]
    1367   [ +  -  +  -  :          6 :         CU_ASSERT(req->data_wr.type == RDMA_WR_TYPE_DATA);
                   +  - ]
    1368   [ +  -  +  -  :          6 :         CU_ASSERT(req->data.wr.wr_id == (uintptr_t)&req->data_wr);
          +  -  +  -  +  
                      - ]
    1369   [ +  -  +  -  :          6 :         CU_ASSERT(req->data.wr.next == NULL);
             +  -  +  - ]
    1370   [ +  -  +  -  :          6 :         CU_ASSERT(req->data.wr.send_flags == IBV_SEND_SIGNALED);
             +  -  +  - ]
    1371   [ +  -  +  -  :          6 :         CU_ASSERT(req->data.wr.sg_list == rdma_resource->reqs[DEPTH - 1].data.sgl);
          +  -  +  -  +  
          -  +  -  +  -  
             +  -  +  - ]
    1372   [ +  -  +  -  :          6 :         CU_ASSERT(req->data.wr.num_sge == SPDK_NVMF_MAX_SGL_ENTRIES);
             +  -  +  - ]
    1373   [ +  -  +  - ]:          6 :         CU_ASSERT(req->state == RDMA_REQUEST_STATE_FREE);
    1374                 :            : 
    1375                 :          6 :         nvmf_rdma_resources_destroy(rdma_resource);
    1376                 :          6 : }
    1377                 :            : 
    1378                 :            : static void
    1379                 :          6 : test_nvmf_rdma_qpair_compare(void)
    1380                 :            : {
    1381                 :          6 :         struct spdk_nvmf_rdma_qpair rqpair1 = {}, rqpair2 = {};
    1382                 :            : 
    1383         [ +  - ]:          6 :         rqpair1.qp_num = 0;
    1384         [ +  - ]:          6 :         rqpair2.qp_num = UINT32_MAX;
    1385                 :            : 
    1386                 :          6 :         CU_ASSERT(nvmf_rdma_qpair_compare(&rqpair1, &rqpair2) < 0);
    1387                 :          6 :         CU_ASSERT(nvmf_rdma_qpair_compare(&rqpair2, &rqpair1) > 0);
    1388                 :          6 : }
    1389                 :            : 
    1390                 :            : static void
    1391                 :          6 : test_nvmf_rdma_resize_cq(void)
    1392                 :            : {
    1393                 :          6 :         int rc = -1;
    1394                 :          6 :         int tnum_wr = 0;
    1395                 :          6 :         int tnum_cqe = 0;
    1396                 :          6 :         struct spdk_nvmf_rdma_qpair rqpair = {};
    1397                 :          6 :         struct spdk_nvmf_rdma_poller rpoller = {};
    1398                 :          6 :         struct spdk_nvmf_rdma_device rdevice = {};
    1399                 :          6 :         struct ibv_context ircontext = {};
    1400                 :          6 :         struct ibv_device idevice = {};
    1401                 :            : 
    1402         [ +  - ]:          6 :         rdevice.context = &ircontext;
    1403         [ +  - ]:          6 :         rqpair.poller = &rpoller;
    1404                 :          6 :         ircontext.device = &idevice;
    1405                 :            : 
    1406                 :            :         /* Test1: Current capacity support required size. */
    1407         [ +  - ]:          6 :         rpoller.required_num_wr = 10;
    1408         [ +  - ]:          6 :         rpoller.num_cqe = 20;
    1409         [ +  - ]:          6 :         rqpair.max_queue_depth = 2;
    1410         [ +  - ]:          6 :         tnum_wr = rpoller.required_num_wr;
    1411         [ +  - ]:          6 :         tnum_cqe = rpoller.num_cqe;
    1412                 :            : 
    1413                 :          6 :         rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
    1414                 :          6 :         CU_ASSERT(rc == 0);
    1415   [ +  -  +  -  :          6 :         CU_ASSERT(rpoller.required_num_wr == 10 + MAX_WR_PER_QP(rqpair.max_queue_depth));
          +  -  +  -  +  
                      - ]
    1416         [ +  - ]:          6 :         CU_ASSERT(rpoller.required_num_wr > tnum_wr);
    1417         [ +  - ]:          6 :         CU_ASSERT(rpoller.num_cqe == tnum_cqe);
    1418                 :            : 
    1419                 :            :         /* Test2: iWARP doesn't support CQ resize. */
    1420         [ +  - ]:          6 :         tnum_wr = rpoller.required_num_wr;
    1421         [ +  - ]:          6 :         tnum_cqe = rpoller.num_cqe;
    1422         [ +  - ]:          6 :         idevice.transport_type = IBV_TRANSPORT_IWARP;
    1423                 :            : 
    1424                 :          6 :         rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
    1425                 :          6 :         CU_ASSERT(rc == -1);
    1426         [ +  - ]:          6 :         CU_ASSERT(rpoller.required_num_wr == tnum_wr);
    1427         [ +  - ]:          6 :         CU_ASSERT(rpoller.num_cqe == tnum_cqe);
    1428                 :            : 
    1429                 :            : 
    1430                 :            :         /* Test3: RDMA CQE requirement exceeds device max_cqe limitation. */
    1431         [ +  - ]:          6 :         tnum_wr = rpoller.required_num_wr;
    1432         [ +  - ]:          6 :         tnum_cqe = rpoller.num_cqe;
    1433         [ +  - ]:          6 :         idevice.transport_type = IBV_TRANSPORT_UNKNOWN;
    1434         [ +  - ]:          6 :         rdevice.attr.max_cqe = 3;
    1435                 :            : 
    1436                 :          6 :         rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
    1437                 :          6 :         CU_ASSERT(rc == -1);
    1438         [ +  - ]:          6 :         CU_ASSERT(rpoller.required_num_wr == tnum_wr);
    1439         [ +  - ]:          6 :         CU_ASSERT(rpoller.num_cqe == tnum_cqe);
    1440                 :            : 
    1441                 :            :         /* Test4: RDMA CQ resize failed. */
    1442         [ +  - ]:          6 :         tnum_wr = rpoller.required_num_wr;
    1443         [ +  - ]:          6 :         tnum_cqe = rpoller.num_cqe;
    1444         [ +  - ]:          6 :         idevice.transport_type = IBV_TRANSPORT_IB;
    1445         [ +  - ]:          6 :         rdevice.attr.max_cqe = 30;
    1446                 :          6 :         MOCK_SET(ibv_resize_cq, -1);
    1447                 :            : 
    1448                 :          6 :         rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
    1449                 :          6 :         CU_ASSERT(rc == -1);
    1450         [ +  - ]:          6 :         CU_ASSERT(rpoller.required_num_wr == tnum_wr);
    1451         [ +  - ]:          6 :         CU_ASSERT(rpoller.num_cqe == tnum_cqe);
    1452                 :            : 
    1453                 :            :         /* Test5: RDMA CQ resize success. rsize = MIN(MAX(num_cqe * 2, required_num_wr), device->attr.max_cqe). */
    1454         [ +  - ]:          6 :         tnum_wr = rpoller.required_num_wr;
    1455         [ +  - ]:          6 :         tnum_cqe = rpoller.num_cqe;
    1456                 :          6 :         MOCK_SET(ibv_resize_cq, 0);
    1457                 :            : 
    1458                 :          6 :         rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
    1459                 :          6 :         CU_ASSERT(rc == 0);
    1460         [ +  - ]:          6 :         CU_ASSERT(rpoller.num_cqe = 30);
    1461   [ +  -  +  -  :          6 :         CU_ASSERT(rpoller.required_num_wr == 18 + MAX_WR_PER_QP(rqpair.max_queue_depth));
          +  -  +  -  +  
                      - ]
    1462         [ +  - ]:          6 :         CU_ASSERT(rpoller.required_num_wr > tnum_wr);
    1463         [ +  - ]:          6 :         CU_ASSERT(rpoller.num_cqe > tnum_cqe);
    1464                 :          6 : }
    1465                 :            : 
    1466                 :            : int
    1467                 :          6 : main(int argc, char **argv)
    1468                 :            : {
    1469                 :          6 :         CU_pSuite       suite = NULL;
    1470                 :          1 :         unsigned int    num_failures;
    1471                 :            : 
    1472                 :          6 :         CU_initialize_registry();
    1473                 :            : 
    1474                 :          6 :         suite = CU_add_suite("nvmf", NULL, NULL);
    1475                 :            : 
    1476                 :          6 :         CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl);
    1477                 :          6 :         CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process);
    1478                 :          6 :         CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group);
    1479                 :          6 :         CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md);
    1480                 :          6 :         CU_ADD_TEST(suite, test_nvmf_rdma_opts_init);
    1481                 :          6 :         CU_ADD_TEST(suite, test_nvmf_rdma_request_free_data);
    1482                 :          6 :         CU_ADD_TEST(suite, test_nvmf_rdma_update_ibv_state);
    1483                 :          6 :         CU_ADD_TEST(suite, test_nvmf_rdma_resources_create);
    1484                 :          6 :         CU_ADD_TEST(suite, test_nvmf_rdma_qpair_compare);
    1485                 :          6 :         CU_ADD_TEST(suite, test_nvmf_rdma_resize_cq);
    1486                 :            : 
    1487                 :          6 :         num_failures = spdk_ut_run_tests(argc, argv, NULL);
    1488                 :          6 :         CU_cleanup_registry();
    1489                 :          7 :         return num_failures;
    1490                 :          1 : }

Generated by: LCOV version 1.14