LCOV - code coverage report
Current view: top level - spdk/test/unit/lib/nvme/nvme_rdma.c - nvme_rdma_ut.c (source / functions) Hit Total Coverage
Test: Combined Lines: 988 1010 97.8 %
Date: 2024-11-20 02:27:51 Functions: 39 53 73.6 %
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: 1572 3114 50.5 %

           Branch data     Line data    Source code
       1                 :            : /*   SPDX-License-Identifier: BSD-3-Clause
       2                 :            :  *   Copyright (C) 2018 Intel Corporation. All rights reserved.
       3                 :            :  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
       4                 :            :  */
       5                 :            : 
       6                 :            : #include "spdk/stdinc.h"
       7                 :            : #include "spdk_internal/cunit.h"
       8                 :            : #include "nvme/nvme_rdma.c"
       9                 :            : #include "common/lib/nvme/common_stubs.h"
      10                 :            : #include "common/lib/test_rdma.c"
      11                 :            : 
      12                 :          6 : SPDK_LOG_REGISTER_COMPONENT(nvme)
      13                 :            : 
      14                 :          0 : DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
      15                 :            :                 uint64_t size, uint64_t translation), 0);
      16                 :          0 : DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
      17                 :            :                 uint64_t size), 0);
      18                 :            : 
      19                 :          0 : DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
      20                 :            :                 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
      21                 :          0 : DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
      22                 :            : 
      23                 :          0 : DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
      24                 :            : 
      25                 :          0 : DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
      26                 :          0 : DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
      27                 :            :                 uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
      28                 :            : 
      29                 :         72 : DEFINE_STUB(rdma_ack_cm_event, int, (struct rdma_cm_event *event), 0);
      30                 :          6 : DEFINE_STUB_V(rdma_free_devices, (struct ibv_context **list));
      31                 :         53 : DEFINE_STUB(fcntl, int, (int fd, int cmd, ...), 0);
      32                 :          6 : DEFINE_STUB_V(rdma_destroy_event_channel, (struct rdma_event_channel *channel));
      33                 :            : 
      34                 :          0 : DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0);
      35                 :         12 : DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0);
      36                 :            : 
      37                 :          0 : DEFINE_STUB(spdk_memory_domain_get_context, struct spdk_memory_domain_ctx *,
      38                 :            :             (struct spdk_memory_domain *device), NULL);
      39                 :          0 : DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
      40                 :            :             (struct spdk_memory_domain *device), SPDK_DMA_DEVICE_TYPE_RDMA);
      41                 :         12 : DEFINE_STUB_V(spdk_memory_domain_destroy, (struct spdk_memory_domain *device));
      42                 :          0 : DEFINE_STUB(spdk_memory_domain_pull_data, int, (struct spdk_memory_domain *src_domain,
      43                 :            :                 void *src_domain_ctx, struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov,
      44                 :            :                 uint32_t dst_iov_cnt, spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg), 0);
      45                 :            : 
      46                 :          0 : DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
      47                 :            :                 struct spdk_nvme_cmd *cmd));
      48                 :            : 
      49                 :          0 : DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
      50                 :            :                 struct spdk_nvme_cpl *cpl));
      51                 :            : 
      52                 :            : DEFINE_RETURN_MOCK(spdk_memory_domain_create, int);
      53                 :            : int
      54                 :         24 : spdk_memory_domain_create(struct spdk_memory_domain **domain, enum spdk_dma_device_type type,
      55                 :            :                           struct spdk_memory_domain_ctx *ctx, const char *id)
      56                 :            : {
      57                 :            :         static struct spdk_memory_domain *__dma_dev = (struct spdk_memory_domain *)0xdeaddead;
      58                 :            : 
      59   [ +  +  +  + ]:         24 :         HANDLE_RETURN_MOCK(spdk_memory_domain_create);
      60                 :            : 
      61         [ +  - ]:         18 :         *domain = __dma_dev;
      62                 :            : 
      63                 :         18 :         return 0;
      64                 :          4 : }
      65                 :            : 
      66                 :            : static struct spdk_memory_domain_translation_result g_memory_translation_translation = {.size = sizeof(struct spdk_memory_domain_translation_result) };
      67                 :            : 
      68                 :            : DEFINE_RETURN_MOCK(spdk_memory_domain_translate_data, int);
      69                 :            : int
      70                 :         12 : spdk_memory_domain_translate_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
      71                 :            :                                   struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
      72                 :            :                                   void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
      73                 :            : {
      74                 :            : 
      75   [ +  +  +  + ]:         12 :         HANDLE_RETURN_MOCK(spdk_memory_domain_translate_data);
      76                 :            : 
      77   [ +  +  +  + ]:          6 :         memcpy(result, &g_memory_translation_translation, sizeof(g_memory_translation_translation));
      78                 :            : 
      79                 :          6 :         return 0;
      80                 :          2 : }
      81                 :            : 
      82                 :            : /* ibv_reg_mr can be a macro, need to undefine it */
      83                 :            : #ifdef ibv_reg_mr
      84                 :            : #undef ibv_reg_mr
      85                 :            : #endif
      86                 :            : 
      87                 :            : DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *);
      88                 :            : struct ibv_mr *
      89                 :          0 : ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
      90                 :            : {
      91   [ #  #  #  # ]:          0 :         HANDLE_RETURN_MOCK(ibv_reg_mr);
      92         [ #  # ]:          0 :         if (length > 0) {
      93                 :          0 :                 return &g_rdma_mr;
      94                 :            :         } else {
      95                 :          0 :                 return NULL;
      96                 :            :         }
      97                 :          0 : }
      98                 :            : 
      99                 :            : struct nvme_rdma_ut_bdev_io {
     100                 :            :         struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS];
     101                 :            :         int iovpos;
     102                 :            :         int iovcnt;
     103                 :            : };
     104                 :            : 
     105                 :            : DEFINE_RETURN_MOCK(rdma_get_devices, struct ibv_context **);
     106                 :            : struct ibv_context **
     107                 :          1 : rdma_get_devices(int *num_devices)
     108                 :            : {
     109                 :            :         static struct ibv_context *_contexts[] = {
     110                 :            :                 (struct ibv_context *)0xDEADBEEF,
     111                 :            :                 (struct ibv_context *)0xFEEDBEEF,
     112                 :            :                 NULL
     113                 :            :         };
     114                 :            : 
     115   [ +  +  -  + ]:          6 :         HANDLE_RETURN_MOCK(rdma_get_devices);
     116                 :          6 :         return _contexts;
     117                 :          1 : }
     118                 :            : 
     119                 :            : DEFINE_RETURN_MOCK(rdma_create_event_channel, struct rdma_event_channel *);
     120                 :            : struct rdma_event_channel *
     121                 :          1 : rdma_create_event_channel(void)
     122                 :            : {
     123   [ +  +  +  - ]:          6 :         HANDLE_RETURN_MOCK(rdma_create_event_channel);
     124                 :          0 :         return NULL;
     125                 :          1 : }
     126                 :            : 
     127                 :            : DEFINE_RETURN_MOCK(ibv_query_device, int);
     128                 :            : int
     129                 :          3 : ibv_query_device(struct ibv_context *context,
     130                 :            :                  struct ibv_device_attr *device_attr)
     131                 :            : {
     132         [ +  - ]:         18 :         if (device_attr) {
     133   [ +  -  +  - ]:         18 :                 device_attr->max_sge = NVME_RDMA_MAX_SGL_DESCRIPTORS;
     134                 :          3 :         }
     135   [ +  +  -  + ]:         18 :         HANDLE_RETURN_MOCK(ibv_query_device);
     136                 :            : 
     137                 :         18 :         return 0;
     138                 :          3 : }
     139                 :            : 
     140                 :            : /* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
     141                 :            : static void
     142                 :         60 : nvme_rdma_ut_reset_sgl(void *cb_arg, uint32_t offset)
     143                 :            : {
     144                 :         60 :         struct nvme_rdma_ut_bdev_io *bio = cb_arg;
     145                 :         10 :         struct iovec *iov;
     146                 :            : 
     147   [ +  -  +  -  :         60 :         for (bio->iovpos = 0; bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
          +  -  +  -  -  
             +  #  #  #  
                      # ]
     148   [ +  -  +  -  :         60 :                 iov = &bio->iovs[bio->iovpos];
          +  -  +  -  +  
                      - ]
     149                 :            :                 /* Only provide offsets at the beginning of an iov */
     150         [ +  - ]:         60 :                 if (offset == 0) {
     151                 :         60 :                         break;
     152                 :            :                 }
     153                 :            : 
     154   [ #  #  #  # ]:          0 :                 offset -= iov->iov_len;
     155                 :          0 :         }
     156                 :            : 
     157   [ +  +  +  -  :         60 :         SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
             +  -  #  # ]
     158                 :         60 : }
     159                 :            : 
     160                 :            : static int
     161                 :        126 : nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
     162                 :            : {
     163                 :        126 :         struct nvme_rdma_ut_bdev_io *bio = cb_arg;
     164                 :         21 :         struct iovec *iov;
     165                 :            : 
     166   [ +  +  +  -  :        126 :         SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
             +  -  #  # ]
     167                 :            : 
     168   [ +  +  +  -  :        126 :         if (bio->iovpos == bio->iovcnt) {
          +  -  +  -  +  
                      + ]
     169                 :          6 :                 return -1;
     170                 :            :         }
     171                 :            : 
     172   [ +  -  +  -  :        120 :         iov = &bio->iovs[bio->iovpos];
          +  -  +  -  +  
                      - ]
     173                 :            : 
     174   [ +  -  +  -  :        120 :         *address = iov->iov_base;
                   +  - ]
     175   [ +  -  +  -  :        120 :         *length = iov->iov_len;
                   +  - ]
     176   [ +  -  +  - ]:        120 :         bio->iovpos++;
     177                 :            : 
     178                 :        120 :         return 0;
     179                 :         21 : }
     180                 :            : 
     181                 :            : static void
     182                 :          6 : test_nvme_rdma_build_sgl_request(void)
     183                 :            : {
     184                 :          5 :         struct nvme_rdma_qpair rqpair;
     185                 :          6 :         struct spdk_nvme_ctrlr ctrlr = {0};
     186                 :          6 :         struct spdk_nvmf_cmd cmd = {{0}};
     187                 :          6 :         struct spdk_nvme_rdma_req rdma_req = {0};
     188                 :          6 :         struct nvme_request req = {{0}};
     189                 :          6 :         struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
     190                 :          1 :         uint64_t i;
     191                 :          1 :         int rc;
     192                 :            : 
     193         [ +  - ]:          6 :         ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
     194   [ +  -  +  -  :          6 :         ctrlr.cdata.nvmf_specific.msdbd = 16;
                   +  - ]
     195         [ +  - ]:          6 :         ctrlr.ioccsz_bytes = 4096;
     196                 :            : 
     197         [ +  - ]:          6 :         rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
     198         [ +  - ]:          6 :         rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
     199                 :          6 :         rqpair.qpair.ctrlr = &ctrlr;
     200         [ +  - ]:          6 :         rqpair.cmds = &cmd;
     201   [ +  -  +  -  :          6 :         cmd.sgl[0].address = 0x1111;
                   +  - ]
     202                 :          6 :         rdma_req.id = 0;
     203         [ +  - ]:          6 :         rdma_req.req = &req;
     204                 :            : 
     205                 :          6 :         req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
     206         [ +  - ]:          6 :         req.qpair = &rqpair.qpair;
     207                 :            : 
     208         [ +  + ]:        102 :         for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
     209   [ +  -  +  -  :         96 :                 bio.iovs[i].iov_base = (void *)i + 1;
             +  -  +  - ]
     210   [ +  -  +  -  :         96 :                 bio.iovs[i].iov_len = 0;
             +  -  +  - ]
     211                 :         16 :         }
     212                 :            : 
     213                 :            :         /* Test case 1: single SGL. Expected: PASS */
     214         [ +  - ]:          6 :         bio.iovpos = 0;
     215         [ +  - ]:          6 :         req.payload_offset = 0;
     216         [ +  - ]:          6 :         req.payload_size = 0x1000;
     217   [ +  -  +  -  :          6 :         bio.iovs[0].iov_len = 0x1000;
                   +  - ]
     218                 :          6 :         rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
     219   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc == 0);
     220         [ +  - ]:          6 :         CU_ASSERT(bio.iovpos == 1);
     221   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
             +  -  +  - ]
     222   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
             +  -  +  - ]
     223   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
          +  -  +  -  +  
                      - ]
     224   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
             +  -  +  - ]
     225   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
          +  -  +  -  +  
                      - ]
     226   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
             +  -  +  - ]
     227                 :            : 
     228                 :            :         /* Test case 2: multiple SGL. Expected: PASS */
     229         [ +  - ]:          6 :         bio.iovpos = 0;
     230         [ +  - ]:          6 :         req.payload_offset = 0;
     231         [ +  - ]:          6 :         req.payload_size = 0x4000;
     232         [ +  + ]:         30 :         for (i = 0; i < 4; i++) {
     233   [ +  -  +  -  :         24 :                 bio.iovs[i].iov_len = 0x1000;
             +  -  +  - ]
     234                 :          4 :         }
     235                 :          6 :         rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
     236   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc == 0);
     237         [ +  - ]:          6 :         CU_ASSERT(bio.iovpos == 4);
     238   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
             +  -  +  - ]
     239   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
             +  -  +  - ]
     240   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4 * sizeof(struct spdk_nvme_sgl_descriptor));
          +  -  +  -  +  
                      - ]
     241   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)0);
                   +  - ]
     242   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[0].length == 4 * sizeof(struct spdk_nvme_sgl_descriptor) + sizeof(
             +  -  +  - ]
     243                 :            :                           struct spdk_nvme_cmd))
     244         [ +  + ]:         30 :         for (i = 0; i < 4; i++) {
     245   [ +  -  +  -  :         24 :                 CU_ASSERT(cmd.sgl[i].keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
             +  -  +  - ]
     246   [ +  -  +  -  :         24 :                 CU_ASSERT(cmd.sgl[i].keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
             +  -  +  - ]
     247   [ +  -  +  -  :         24 :                 CU_ASSERT(cmd.sgl[i].keyed.length == bio.iovs[i].iov_len);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     248   [ +  -  +  -  :         24 :                 CU_ASSERT(cmd.sgl[i].keyed.key == RDMA_UT_RKEY);
             +  -  +  - ]
     249   [ +  -  +  -  :         24 :                 CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
     250                 :          4 :         }
     251                 :            : 
     252                 :            :         /* Test case 3: Multiple SGL, SGL 2X mr size. Expected: FAIL */
     253         [ +  - ]:          6 :         bio.iovpos = 0;
     254         [ +  - ]:          6 :         req.payload_offset = 0;
     255                 :          6 :         g_mr_size = 0x800;
     256                 :          6 :         rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
     257   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc != 0);
     258         [ +  - ]:          6 :         CU_ASSERT(bio.iovpos == 1);
     259                 :            : 
     260                 :            :         /* Test case 4: Multiple SGL, SGL size smaller than I/O size. Expected: FAIL */
     261         [ +  - ]:          6 :         bio.iovpos = 0;
     262         [ +  - ]:          6 :         bio.iovcnt = 4;
     263         [ +  - ]:          6 :         req.payload_offset = 0;
     264         [ +  - ]:          6 :         req.payload_size = 0x6000;
     265                 :          6 :         g_mr_size = 0x0;
     266                 :          6 :         rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
     267   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc != 0);
     268   [ +  -  +  - ]:          6 :         CU_ASSERT(bio.iovpos == bio.iovcnt);
     269         [ +  - ]:          6 :         bio.iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS;
     270                 :            : 
     271                 :            :         /* Test case 5: SGL length exceeds 3 bytes. Expected: FAIL */
     272   [ -  +  +  -  :          6 :         req.payload_size = 0x1000 + (1 << 24);
                   +  - ]
     273   [ +  -  +  -  :          6 :         bio.iovs[0].iov_len = 0x1000;
                   +  - ]
     274   [ -  +  +  -  :          6 :         bio.iovs[1].iov_len = 1 << 24;
          +  -  +  -  +  
                -  +  - ]
     275                 :          6 :         rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
     276   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc != 0);
     277                 :            : 
     278                 :            :         /* Test case 6: 4 SGL descriptors, size of SGL descriptors exceeds ICD. Expected: FAIL */
     279         [ +  - ]:          6 :         ctrlr.ioccsz_bytes = 60;
     280         [ +  - ]:          6 :         bio.iovpos = 0;
     281         [ +  - ]:          6 :         req.payload_offset = 0;
     282         [ +  - ]:          6 :         req.payload_size = 0x4000;
     283         [ +  + ]:         30 :         for (i = 0; i < 4; i++) {
     284   [ +  -  +  -  :         24 :                 bio.iovs[i].iov_len = 0x1000;
             +  -  +  - ]
     285                 :          4 :         }
     286                 :          6 :         rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
     287   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc == -1);
     288                 :          6 : }
     289                 :            : 
     290                 :            : static void
     291                 :          6 : test_nvme_rdma_build_sgl_inline_request(void)
     292                 :            : {
     293                 :          5 :         struct nvme_rdma_qpair rqpair;
     294                 :          6 :         struct spdk_nvme_ctrlr ctrlr = {0};
     295                 :          6 :         struct spdk_nvmf_cmd cmd = {{0}};
     296                 :          6 :         struct spdk_nvme_rdma_req rdma_req = {0};
     297                 :          6 :         struct nvme_request req = {{0}};
     298                 :          6 :         struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
     299                 :          1 :         int rc;
     300                 :            : 
     301         [ +  - ]:          6 :         ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
     302   [ +  -  +  -  :          6 :         ctrlr.cdata.nvmf_specific.msdbd = 16;
                   +  - ]
     303                 :            : 
     304         [ +  - ]:          6 :         rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
     305         [ +  - ]:          6 :         rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
     306                 :          6 :         rqpair.qpair.ctrlr = &ctrlr;
     307         [ +  - ]:          6 :         rqpair.cmds = &cmd;
     308   [ +  -  +  -  :          6 :         cmd.sgl[0].address = 0x1111;
                   +  - ]
     309                 :          6 :         rdma_req.id = 0;
     310         [ +  - ]:          6 :         rdma_req.req = &req;
     311                 :            : 
     312                 :          6 :         req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
     313         [ +  - ]:          6 :         req.qpair = &rqpair.qpair;
     314                 :            : 
     315                 :            :         /* Test case 1: single inline SGL. Expected: PASS */
     316         [ +  - ]:          6 :         bio.iovpos = 0;
     317         [ +  - ]:          6 :         req.payload_offset = 0;
     318         [ +  - ]:          6 :         req.payload_size = 0x1000;
     319   [ +  -  +  - ]:          6 :         bio.iovs[0].iov_base = (void *)0xdeadbeef;
     320   [ +  -  +  -  :          6 :         bio.iovs[0].iov_len = 0x1000;
                   +  - ]
     321                 :          6 :         rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
     322   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc == 0);
     323         [ +  - ]:          6 :         CU_ASSERT(bio.iovpos == 1);
     324   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
             +  -  +  - ]
     325   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
             +  -  +  - ]
     326   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
          +  -  +  -  +  
                -  +  - ]
     327   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
                   +  - ]
     328   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
             +  -  +  - ]
     329   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
          +  -  +  -  +  
                      - ]
     330   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
          +  -  +  -  +  
                -  +  - ]
     331   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
             +  -  +  - ]
     332                 :            : 
     333                 :            :         /* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
     334         [ +  - ]:          6 :         bio.iovpos = 0;
     335         [ +  - ]:          6 :         req.payload_offset = 0;
     336   [ -  +  +  -  :          6 :         req.payload_size = 1 << 24;
                   +  - ]
     337   [ -  +  +  -  :          6 :         bio.iovs[0].iov_len = 1 << 24;
          +  -  +  -  +  
                      - ]
     338                 :          6 :         rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
     339   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc == 0);
     340         [ +  - ]:          6 :         CU_ASSERT(bio.iovpos == 1);
     341   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
             +  -  +  - ]
     342   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
             +  -  +  - ]
     343   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
          +  -  +  -  +  
                -  +  - ]
     344   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
                   +  - ]
     345   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
             +  -  +  - ]
     346   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
          +  -  +  -  +  
                      - ]
     347   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
          +  -  +  -  +  
                -  +  - ]
     348   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
             +  -  +  - ]
     349                 :          6 : }
     350                 :            : 
     351                 :            : static void
     352                 :          6 : test_nvme_rdma_build_contig_request(void)
     353                 :            : {
     354                 :          5 :         struct nvme_rdma_qpair rqpair;
     355                 :          6 :         struct spdk_nvme_ctrlr ctrlr = {0};
     356                 :          6 :         struct spdk_nvmf_cmd cmd = {{0}};
     357                 :          6 :         struct spdk_nvme_rdma_req rdma_req = {0};
     358                 :          6 :         struct nvme_request req = {{0}};
     359                 :          1 :         int rc;
     360                 :            : 
     361         [ +  - ]:          6 :         ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
     362   [ +  -  +  -  :          6 :         ctrlr.cdata.nvmf_specific.msdbd = 16;
                   +  - ]
     363                 :            : 
     364         [ +  - ]:          6 :         rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
     365         [ +  - ]:          6 :         rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
     366                 :          6 :         rqpair.qpair.ctrlr = &ctrlr;
     367         [ +  - ]:          6 :         rqpair.cmds = &cmd;
     368   [ +  -  +  -  :          6 :         cmd.sgl[0].address = 0x1111;
                   +  - ]
     369                 :          6 :         rdma_req.id = 0;
     370         [ +  - ]:          6 :         rdma_req.req = &req;
     371                 :            : 
     372                 :          6 :         req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
     373         [ +  - ]:          6 :         req.qpair = &rqpair.qpair;
     374                 :            : 
     375                 :            :         /* Test case 1: contig request. Expected: PASS */
     376         [ +  - ]:          6 :         req.payload_offset = 0;
     377         [ +  - ]:          6 :         req.payload_size = 0x1000;
     378                 :          6 :         rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
     379   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc == 0);
     380   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
             +  -  +  - ]
     381   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
             +  -  +  - ]
     382   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
          +  -  +  -  +  
                      - ]
     383   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
             +  -  +  - ]
     384   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg);
          +  -  +  -  +  
                      - ]
     385   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
             +  -  +  - ]
     386                 :            : 
     387                 :            :         /* Test case 2: SGL length exceeds 3 bytes. Expected: FAIL */
     388         [ +  - ]:          6 :         req.payload_offset = 0;
     389   [ -  +  +  -  :          6 :         req.payload_size = 1 << 24;
                   +  - ]
     390                 :          6 :         rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
     391   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc != 0);
     392                 :          6 : }
     393                 :            : 
     394                 :            : static void
     395                 :          6 : test_nvme_rdma_build_contig_inline_request(void)
     396                 :            : {
     397                 :          5 :         struct nvme_rdma_qpair rqpair;
     398                 :          6 :         struct spdk_nvme_ctrlr ctrlr = {0};
     399                 :          6 :         struct spdk_nvmf_cmd cmd = {{0}};
     400                 :          6 :         struct spdk_nvme_rdma_req rdma_req = {0};
     401                 :          6 :         struct nvme_request req = {{0}};
     402                 :          1 :         int rc;
     403                 :            : 
     404         [ +  - ]:          6 :         ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
     405   [ +  -  +  -  :          6 :         ctrlr.cdata.nvmf_specific.msdbd = 16;
                   +  - ]
     406                 :            : 
     407         [ +  - ]:          6 :         rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
     408         [ +  - ]:          6 :         rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
     409                 :          6 :         rqpair.qpair.ctrlr = &ctrlr;
     410         [ +  - ]:          6 :         rqpair.cmds = &cmd;
     411   [ +  -  +  -  :          6 :         cmd.sgl[0].address = 0x1111;
                   +  - ]
     412                 :          6 :         rdma_req.id = 0;
     413         [ +  - ]:          6 :         rdma_req.req = &req;
     414                 :            : 
     415                 :          6 :         req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
     416         [ +  - ]:          6 :         req.qpair = &rqpair.qpair;
     417                 :            : 
     418                 :            :         /* Test case 1: single inline SGL. Expected: PASS */
     419         [ +  - ]:          6 :         req.payload_offset = 0;
     420         [ +  - ]:          6 :         req.payload_size = 0x1000;
     421                 :          6 :         rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
     422   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc == 0);
     423   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
             +  -  +  - ]
     424   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
             +  -  +  - ]
     425   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
          +  -  +  -  +  
                -  +  - ]
     426   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
                   +  - ]
     427   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
             +  -  +  - ]
     428   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
          +  -  +  -  +  
                      - ]
     429   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
          +  -  +  -  +  
                -  +  - ]
     430   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
             +  -  +  - ]
     431                 :            : 
     432                 :            :         /* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
     433         [ +  - ]:          6 :         req.payload_offset = 0;
     434   [ -  +  +  -  :          6 :         req.payload_size = 1 << 24;
                   +  - ]
     435                 :          6 :         rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
     436   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc == 0);
     437   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
             +  -  +  - ]
     438   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
             +  -  +  - ]
     439   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
          +  -  +  -  +  
                -  +  - ]
     440   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
                   +  - ]
     441   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
             +  -  +  - ]
     442   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
          +  -  +  -  +  
                      - ]
     443   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
          +  -  +  -  +  
                -  +  - ]
     444   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
             +  -  +  - ]
     445                 :          6 : }
     446                 :            : 
     447                 :            : static void
     448                 :          6 : test_nvme_rdma_create_reqs(void)
     449                 :            : {
     450                 :          6 :         struct nvme_rdma_qpair rqpair = {};
     451                 :          1 :         int rc;
     452                 :            : 
     453         [ +  + ]:          6 :         memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
     454                 :            : 
     455                 :            :         /* Test case 1: zero entry. Expect: FAIL */
     456         [ +  - ]:          6 :         rqpair.num_entries = 0;
     457                 :            : 
     458                 :          6 :         rc = nvme_rdma_create_reqs(&rqpair);
     459         [ +  - ]:          6 :         CU_ASSERT(rqpair.rdma_reqs == NULL);
     460   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
     461                 :            : 
     462                 :            :         /* Test case 2: single entry. Expect: PASS */
     463         [ +  + ]:          6 :         memset(&rqpair, 0, sizeof(rqpair));
     464         [ +  - ]:          6 :         rqpair.num_entries = 1;
     465                 :            : 
     466                 :          6 :         rc = nvme_rdma_create_reqs(&rqpair);
     467                 :          6 :         CU_ASSERT(rc == 0);
     468   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].lkey == g_rdma_mr.lkey);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     469   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].addr
          +  -  +  -  +  
          -  +  -  +  -  
             +  -  +  - ]
     470                 :            :                   == (uint64_t)&rqpair.cmds[0]);
     471   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.rdma_reqs[0].send_wr.wr_id
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     472                 :            :                   == (uint64_t)&rqpair.rdma_reqs[0].rdma_wr);
     473   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.rdma_reqs[0].send_wr.next == NULL);
          +  -  +  -  +  
                      - ]
     474   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.rdma_reqs[0].send_wr.opcode == IBV_WR_SEND);
          +  -  +  -  +  
                      - ]
     475   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.rdma_reqs[0].send_wr.send_flags == IBV_SEND_SIGNALED);
          +  -  +  -  +  
                      - ]
     476   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.rdma_reqs[0].send_wr.sg_list
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     477                 :            :                   == rqpair.rdma_reqs[0].send_sgl);
     478   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.rdma_reqs[0].send_wr.imm_data == 0);
          +  -  +  -  +  
                -  +  - ]
     479         [ +  - ]:          6 :         spdk_free(rqpair.rdma_reqs);
     480         [ +  - ]:          6 :         spdk_free(rqpair.cmds);
     481                 :            : 
     482                 :            :         /* Test case 3: multiple entries. Expect: PASS */
     483         [ +  + ]:          6 :         memset(&rqpair, 0, sizeof(rqpair));
     484         [ +  - ]:          6 :         rqpair.num_entries = 5;
     485                 :            : 
     486                 :          6 :         rc = nvme_rdma_create_reqs(&rqpair);
     487                 :          6 :         CU_ASSERT(rc == 0);
     488   [ +  +  +  - ]:         36 :         for (int i = 0; i < 5; i++) {
     489   [ +  -  +  -  :         30 :                 CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].lkey == g_rdma_mr.lkey);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     490   [ +  -  +  -  :         30 :                 CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].addr
          +  -  +  -  +  
          -  +  -  +  -  
             +  -  +  - ]
     491                 :            :                           == (uint64_t)&rqpair.cmds[i]);
     492   [ +  -  +  -  :         30 :                 CU_ASSERT(rqpair.rdma_reqs[i].send_wr.wr_id
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     493                 :            :                           == (uint64_t)&rqpair.rdma_reqs[i].rdma_wr);
     494   [ +  -  +  -  :         30 :                 CU_ASSERT(rqpair.rdma_reqs[i].send_wr.next == NULL);
          +  -  +  -  +  
                      - ]
     495   [ +  -  +  -  :         30 :                 CU_ASSERT(rqpair.rdma_reqs[i].send_wr.opcode == IBV_WR_SEND);
          +  -  +  -  +  
                      - ]
     496   [ +  -  +  -  :         30 :                 CU_ASSERT(rqpair.rdma_reqs[i].send_wr.send_flags
          +  -  +  -  +  
                      - ]
     497                 :            :                           == IBV_SEND_SIGNALED);
     498   [ +  -  +  -  :         30 :                 CU_ASSERT(rqpair.rdma_reqs[i].send_wr.sg_list
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     499                 :            :                           == rqpair.rdma_reqs[i].send_sgl);
     500   [ +  -  +  -  :         30 :                 CU_ASSERT(rqpair.rdma_reqs[i].send_wr.imm_data == 0);
          +  -  +  -  +  
                -  +  - ]
     501                 :          5 :         }
     502         [ +  - ]:          6 :         spdk_free(rqpair.rdma_reqs);
     503         [ +  - ]:          6 :         spdk_free(rqpair.cmds);
     504                 :          6 : }
     505                 :            : 
     506                 :            : static void
     507                 :          6 : test_nvme_rdma_create_rsps(void)
     508                 :            : {
     509                 :          6 :         struct nvme_rdma_rsp_opts opts = {};
     510                 :          1 :         struct nvme_rdma_rsps *rsps;
     511                 :          6 :         struct spdk_rdma_qp *rdma_qp = (struct spdk_rdma_qp *)0xfeedf00d;
     512                 :          6 :         struct nvme_rdma_qpair rqpair = { .rdma_qp = rdma_qp, };
     513                 :            : 
     514         [ +  + ]:          6 :         memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
     515                 :            : 
     516         [ +  - ]:          6 :         opts.rqpair = &rqpair;
     517                 :            : 
     518                 :            :         /* Test case 1 calloc false */
     519                 :          6 :         opts.num_entries = 0;
     520                 :          6 :         rsps = nvme_rdma_create_rsps(&opts);
     521   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rsps == NULL);
     522                 :            : 
     523                 :            :         /* Test case 2 calloc success */
     524                 :          6 :         opts.num_entries = 1;
     525                 :            : 
     526                 :          6 :         rsps = nvme_rdma_create_rsps(&opts);
     527   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rsps != NULL);
     528   [ +  -  +  - ]:          6 :         CU_ASSERT(rsps->rsp_sgls != NULL);
     529   [ +  -  +  - ]:          6 :         CU_ASSERT(rsps->rsp_recv_wrs != NULL);
     530   [ +  -  +  - ]:          6 :         CU_ASSERT(rsps->rsps != NULL);
     531   [ +  -  +  -  :          6 :         CU_ASSERT(rsps->rsp_sgls[0].lkey == g_rdma_mr.lkey);
          +  -  +  -  +  
                -  +  - ]
     532   [ +  -  +  -  :          6 :         CU_ASSERT(rsps->rsp_sgls[0].addr == (uint64_t)&rsps->rsps[0]);
          +  -  +  -  +  
          -  +  -  +  -  
                   +  - ]
     533   [ +  -  +  -  :          6 :         CU_ASSERT(rsps->rsp_recv_wrs[0].wr_id == (uint64_t)&rsps->rsps[0].rdma_wr);
          +  -  +  -  +  
          -  +  -  +  -  
             +  -  +  - ]
     534                 :            : 
     535                 :          6 :         nvme_rdma_free_rsps(rsps);
     536                 :          6 : }
     537                 :            : 
     538                 :            : static void
     539                 :          6 : test_nvme_rdma_ctrlr_create_qpair(void)
     540                 :            : {
     541                 :          6 :         struct spdk_nvme_ctrlr ctrlr = {};
     542                 :          1 :         uint16_t qid, qsize;
     543                 :          1 :         struct spdk_nvme_qpair *qpair;
     544                 :          1 :         struct nvme_rdma_qpair *rqpair;
     545                 :            : 
     546                 :            :         /* Test case 1: max qsize. Expect: PASS */
     547                 :          6 :         qsize = 0xffff;
     548                 :          6 :         qid = 1;
     549                 :            : 
     550                 :          6 :         qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
     551                 :            :                                              SPDK_NVME_QPRIO_URGENT, 1,
     552                 :            :                                              false, false);
     553                 :          6 :         CU_ASSERT(qpair != NULL);
     554                 :          6 :         rqpair = SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair);
     555         [ +  - ]:          6 :         CU_ASSERT(qpair == &rqpair->qpair);
     556   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair->num_entries == qsize - 1);
                   +  - ]
     557   [ +  +  +  -  :          6 :         CU_ASSERT(rqpair->delay_cmd_submit == false);
                   +  - ]
     558                 :            : 
     559                 :          6 :         spdk_free(rqpair);
     560                 :          6 :         rqpair = NULL;
     561                 :            : 
     562                 :            :         /* Test case 2: queue size 2. Expect: PASS */
     563                 :          6 :         qsize = 2;
     564                 :          6 :         qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
     565                 :            :                                              SPDK_NVME_QPRIO_URGENT, 1,
     566                 :            :                                              false, false);
     567                 :          6 :         CU_ASSERT(qpair != NULL);
     568                 :          6 :         rqpair = SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair);
     569   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair->num_entries == qsize - 1);
                   +  - ]
     570                 :            : 
     571                 :          6 :         spdk_free(rqpair);
     572                 :          6 :         rqpair = NULL;
     573                 :            : 
     574                 :            :         /* Test case 3: queue size zero. Expect: FAIL */
     575                 :          6 :         qsize = 0;
     576                 :            : 
     577                 :          6 :         qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
     578                 :            :                                              SPDK_NVME_QPRIO_URGENT, 1,
     579                 :            :                                              false, false);
     580   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(qpair == NULL);
     581                 :            : 
     582                 :            :         /* Test case 4: queue size 1. Expect: FAIL */
     583                 :          6 :         qsize = 1;
     584                 :          6 :         qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
     585                 :            :                                              SPDK_NVME_QPRIO_URGENT, 1,
     586                 :            :                                              false, false);
     587   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(qpair == NULL);
     588                 :          6 : }
     589                 :            : 
     590                 :         60 : DEFINE_STUB(ibv_create_cq, struct ibv_cq *, (struct ibv_context *context, int cqe, void *cq_context,
     591                 :            :                 struct ibv_comp_channel *channel, int comp_vector), (struct ibv_cq *)0xFEEDBEEF);
     592                 :         42 : DEFINE_STUB(ibv_destroy_cq, int, (struct ibv_cq *cq), 0);
     593                 :            : 
     594                 :            : static void
     595                 :          6 : test_nvme_rdma_poller_create(void)
     596                 :            : {
     597                 :          6 :         struct nvme_rdma_poll_group     group = {};
     598                 :          6 :         struct ibv_context context = {
     599                 :            :                 .device = (struct ibv_device *)0xDEADBEEF
     600                 :            :         };
     601                 :          6 :         struct ibv_context context_2 = {
     602                 :            :                 .device = (struct ibv_device *)0xBAADBEEF
     603                 :            :         };
     604                 :          1 :         struct nvme_rdma_poller *poller_1, *poller_2, *poller_3;
     605                 :            : 
     606                 :            :         /* Case: calloc and ibv not need to fail test */
     607   [ +  -  +  -  :          6 :         STAILQ_INIT(&group.pollers);
          +  -  +  -  +  
                      - ]
     608                 :            : 
     609                 :          6 :         poller_1 = nvme_rdma_poll_group_get_poller(&group, &context);
     610   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(poller_1 != NULL);
     611         [ +  - ]:          6 :         CU_ASSERT(group.num_pollers == 1);
     612   [ +  -  +  - ]:          6 :         CU_ASSERT(STAILQ_FIRST(&group.pollers) == poller_1);
     613   [ +  -  +  - ]:          6 :         CU_ASSERT(poller_1->refcnt == 1);
     614   [ +  -  +  - ]:          6 :         CU_ASSERT(poller_1->device == &context);
     615   [ +  -  +  - ]:          6 :         CU_ASSERT(poller_1->cq == (struct ibv_cq *)0xFEEDBEEF);
     616   [ +  -  +  - ]:          6 :         CU_ASSERT(poller_1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
     617   [ +  -  +  - ]:          6 :         CU_ASSERT(poller_1->required_num_wc == 0);
     618                 :            : 
     619                 :          6 :         poller_2 = nvme_rdma_poll_group_get_poller(&group, &context_2);
     620   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(poller_2 != NULL);
     621         [ +  - ]:          6 :         CU_ASSERT(group.num_pollers == 2);
     622   [ +  -  +  - ]:          6 :         CU_ASSERT(STAILQ_FIRST(&group.pollers) == poller_2);
     623   [ +  -  +  - ]:          6 :         CU_ASSERT(poller_2->refcnt == 1);
     624   [ +  -  +  - ]:          6 :         CU_ASSERT(poller_2->device == &context_2);
     625                 :            : 
     626                 :          6 :         poller_3 = nvme_rdma_poll_group_get_poller(&group, &context);
     627   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(poller_3 != NULL);
     628                 :          6 :         CU_ASSERT(poller_3 == poller_1);
     629         [ +  - ]:          6 :         CU_ASSERT(group.num_pollers == 2);
     630   [ +  -  +  - ]:          6 :         CU_ASSERT(poller_3->refcnt == 2);
     631                 :            : 
     632                 :          6 :         nvme_rdma_poll_group_put_poller(&group, poller_2);
     633         [ +  - ]:          6 :         CU_ASSERT(group.num_pollers == 1);
     634                 :            : 
     635                 :          6 :         nvme_rdma_poll_group_put_poller(&group, poller_1);
     636         [ +  - ]:          6 :         CU_ASSERT(group.num_pollers == 1);
     637   [ +  -  +  - ]:          6 :         CU_ASSERT(poller_3->refcnt == 1);
     638                 :            : 
     639                 :          6 :         nvme_rdma_poll_group_put_poller(&group, poller_3);
     640   [ +  -  +  - ]:          6 :         CU_ASSERT(STAILQ_EMPTY(&group.pollers));
     641         [ +  - ]:          6 :         CU_ASSERT(group.num_pollers == 0);
     642                 :            : 
     643                 :          6 :         nvme_rdma_poll_group_free_pollers(&group);
     644                 :          6 : }
     645                 :            : 
     646                 :            : static void
     647                 :          6 : test_nvme_rdma_qpair_process_cm_event(void)
     648                 :            : {
     649                 :          6 :         struct nvme_rdma_qpair rqpair = {};
     650                 :          6 :         struct rdma_cm_event     event = {};
     651                 :          6 :         struct spdk_nvmf_rdma_accept_private_data       accept_data = {};
     652                 :          6 :         int rc = 0;
     653                 :            : 
     654                 :            :         /* case1: event == RDMA_CM_EVENT_ADDR_RESOLVED */
     655         [ +  - ]:          6 :         rqpair.evt = &event;
     656         [ +  - ]:          6 :         event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
     657                 :          6 :         rc = nvme_rdma_qpair_process_cm_event(&rqpair);
     658                 :          6 :         CU_ASSERT(rc == 0);
     659                 :            : 
     660                 :            :         /* case2: event == RDMA_CM_EVENT_CONNECT_REQUEST */
     661         [ +  - ]:          6 :         rqpair.evt = &event;
     662         [ +  - ]:          6 :         event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
     663                 :          6 :         rc = nvme_rdma_qpair_process_cm_event(&rqpair);
     664                 :          6 :         CU_ASSERT(rc == 0);
     665                 :            : 
     666                 :            :         /* case3: event == RDMA_CM_EVENT_CONNECT_ERROR */
     667         [ +  - ]:          6 :         rqpair.evt = &event;
     668         [ +  - ]:          6 :         event.event = RDMA_CM_EVENT_CONNECT_ERROR;
     669                 :          6 :         rc = nvme_rdma_qpair_process_cm_event(&rqpair);
     670                 :          6 :         CU_ASSERT(rc == 0);
     671                 :            : 
     672                 :            :         /* case4: event == RDMA_CM_EVENT_UNREACHABLE */
     673         [ +  - ]:          6 :         rqpair.evt = &event;
     674         [ +  - ]:          6 :         event.event = RDMA_CM_EVENT_UNREACHABLE;
     675                 :          6 :         rc = nvme_rdma_qpair_process_cm_event(&rqpair);
     676                 :          6 :         CU_ASSERT(rc == 0);
     677                 :            : 
     678                 :            :         /* case5: event == RDMA_CM_EVENT_CONNECT_RESPONSE */
     679         [ +  - ]:          6 :         rqpair.evt = &event;
     680         [ +  - ]:          6 :         event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
     681   [ +  -  +  -  :          6 :         event.param.conn.private_data = NULL;
                   +  - ]
     682                 :          6 :         rc = nvme_rdma_qpair_process_cm_event(&rqpair);
     683                 :          6 :         CU_ASSERT(rc == -1);
     684                 :            : 
     685         [ +  - ]:          6 :         rqpair.evt = &event;
     686         [ +  - ]:          6 :         event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
     687   [ +  -  +  -  :          6 :         event.param.conn.private_data = &accept_data;
                   +  - ]
     688                 :          6 :         accept_data.crqsize = 512;
     689         [ +  - ]:          6 :         rqpair.num_entries = 1024;
     690                 :          6 :         rc = nvme_rdma_qpair_process_cm_event(&rqpair);
     691                 :          6 :         CU_ASSERT(rc == 0);
     692         [ +  - ]:          6 :         CU_ASSERT(rqpair.num_entries == 1024);
     693                 :            : 
     694                 :            :         /* case6: event == RDMA_CM_EVENT_DISCONNECTED */
     695         [ +  - ]:          6 :         rqpair.evt = &event;
     696         [ +  - ]:          6 :         event.event = RDMA_CM_EVENT_DISCONNECTED;
     697                 :          6 :         rc = nvme_rdma_qpair_process_cm_event(&rqpair);
     698                 :          6 :         CU_ASSERT(rc == 0);
     699                 :          6 :         CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_REMOTE);
     700                 :            : 
     701                 :            :         /* case7: event == RDMA_CM_EVENT_DEVICE_REMOVAL */
     702         [ +  - ]:          6 :         rqpair.evt = &event;
     703         [ +  - ]:          6 :         event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
     704                 :          6 :         rc = nvme_rdma_qpair_process_cm_event(&rqpair);
     705                 :          6 :         CU_ASSERT(rc == 0);
     706                 :          6 :         CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_LOCAL);
     707                 :            : 
     708                 :            :         /* case8: event == RDMA_CM_EVENT_MULTICAST_JOIN */
     709         [ +  - ]:          6 :         rqpair.evt = &event;
     710         [ +  - ]:          6 :         event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
     711                 :          6 :         rc = nvme_rdma_qpair_process_cm_event(&rqpair);
     712                 :          6 :         CU_ASSERT(rc == 0);
     713                 :            : 
     714                 :            :         /* case9: event == RDMA_CM_EVENT_ADDR_CHANGE */
     715         [ +  - ]:          6 :         rqpair.evt = &event;
     716         [ +  - ]:          6 :         event.event = RDMA_CM_EVENT_ADDR_CHANGE;
     717                 :          6 :         rc = nvme_rdma_qpair_process_cm_event(&rqpair);
     718                 :          6 :         CU_ASSERT(rc == 0);
     719                 :          6 :         CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_LOCAL);
     720                 :            : 
     721                 :            :         /* case10: event == RDMA_CM_EVENT_TIMEWAIT_EXIT */
     722         [ +  - ]:          6 :         rqpair.evt = &event;
     723         [ +  - ]:          6 :         event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
     724                 :          6 :         rc = nvme_rdma_qpair_process_cm_event(&rqpair);
     725                 :          6 :         CU_ASSERT(rc == 0);
     726                 :            : 
     727                 :            :         /* case11: default event == 0xFF */
     728         [ +  - ]:          6 :         rqpair.evt = &event;
     729         [ +  - ]:          6 :         event.event = 0xFF;
     730                 :          6 :         rc = nvme_rdma_qpair_process_cm_event(&rqpair);
     731                 :          6 :         CU_ASSERT(rc == 0);
     732                 :          6 : }
     733                 :            : 
     734                 :            : static void
     735                 :          6 : test_nvme_rdma_ctrlr_construct(void)
     736                 :            : {
     737                 :          1 :         struct spdk_nvme_ctrlr *ctrlr;
     738                 :          6 :         struct spdk_nvme_transport_id trid = {};
     739                 :          6 :         struct spdk_nvme_ctrlr_opts opts = {};
     740                 :          6 :         struct nvme_rdma_qpair *rqpair = NULL;
     741                 :          6 :         struct nvme_rdma_ctrlr *rctrlr = NULL;
     742                 :          6 :         struct rdma_event_channel cm_channel = {};
     743                 :          6 :         void *devhandle = NULL;
     744                 :          1 :         int rc;
     745                 :            : 
     746         [ +  - ]:          6 :         opts.transport_retry_count = NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT + 1;
     747                 :          6 :         opts.transport_ack_timeout = NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
     748         [ +  - ]:          6 :         opts.admin_queue_size = 0xFFFF;
     749         [ +  - ]:          6 :         trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
     750         [ +  - ]:          6 :         trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
     751                 :          6 :         MOCK_SET(rdma_create_event_channel, &cm_channel);
     752                 :            : 
     753                 :          6 :         ctrlr = nvme_rdma_ctrlr_construct(&trid, &opts, devhandle);
     754   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
     755   [ +  -  +  -  :          6 :         CU_ASSERT(ctrlr->opts.transport_retry_count ==
                   +  - ]
     756                 :            :                   NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT);
     757   [ +  -  +  -  :          6 :         CU_ASSERT(ctrlr->opts.transport_ack_timeout ==
                   +  - ]
     758                 :            :                   NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT);
     759   [ +  -  +  -  :          6 :         CU_ASSERT(ctrlr->opts.admin_queue_size == opts.admin_queue_size);
             +  -  +  - ]
     760                 :          6 :         rctrlr = SPDK_CONTAINEROF(ctrlr, struct nvme_rdma_ctrlr, ctrlr);
     761   [ +  -  +  - ]:          6 :         CU_ASSERT(rctrlr->max_sge == NVME_RDMA_MAX_SGL_DESCRIPTORS);
     762   [ +  -  +  - ]:          6 :         CU_ASSERT(rctrlr->cm_channel == &cm_channel);
     763   [ +  +  +  -  :          6 :         CU_ASSERT(!strncmp((char *)&rctrlr->ctrlr.trid,
             +  -  +  - ]
     764                 :            :                            (char *)&trid, sizeof(trid)));
     765                 :            : 
     766   [ +  +  +  -  :          6 :         SPDK_CU_ASSERT_FATAL(ctrlr->adminq != NULL);
             +  -  #  # ]
     767   [ +  -  +  - ]:          6 :         rqpair = SPDK_CONTAINEROF(ctrlr->adminq, struct nvme_rdma_qpair, qpair);
     768   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair->num_entries == opts.admin_queue_size - 1);
             +  -  +  - ]
     769   [ +  +  +  -  :          6 :         CU_ASSERT(rqpair->delay_cmd_submit == false);
                   +  - ]
     770                 :          6 :         MOCK_CLEAR(rdma_create_event_channel);
     771                 :            : 
     772                 :            :         /* Hardcode the trtype, because nvme_qpair_init() is stub function. */
     773   [ +  -  +  -  :          6 :         rqpair->qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
                   +  - ]
     774                 :          6 :         rc = nvme_rdma_ctrlr_destruct(ctrlr);
     775                 :          6 :         CU_ASSERT(rc == 0);
     776                 :          6 : }
     777                 :            : 
     778                 :            : static void
     779                 :          6 : test_nvme_rdma_req_put_and_get(void)
     780                 :            : {
     781                 :          6 :         struct nvme_rdma_qpair rqpair = {};
     782                 :          6 :         struct spdk_nvme_rdma_req rdma_req = {};
     783                 :          1 :         struct spdk_nvme_rdma_req *rdma_req_get;
     784                 :            : 
     785                 :            :         /* case 1: nvme_rdma_req_put */
     786   [ +  -  +  -  :          6 :         TAILQ_INIT(&rqpair.free_reqs);
          +  -  +  -  +  
                      - ]
     787                 :          6 :         rdma_req.completion_flags = 1;
     788         [ +  - ]:          6 :         rdma_req.req = (struct nvme_request *)0xDEADBEFF;
     789                 :          6 :         rdma_req.id = 10086;
     790                 :          6 :         nvme_rdma_req_put(&rqpair, &rdma_req);
     791                 :            : 
     792   [ +  -  +  - ]:          6 :         CU_ASSERT(rqpair.free_reqs.tqh_first == &rdma_req);
     793   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.free_reqs.tqh_first->completion_flags == 0);
                   +  - ]
     794   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.free_reqs.tqh_first->req == NULL);
             +  -  +  - ]
     795   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.free_reqs.tqh_first->id == 10086);
             +  -  +  - ]
     796                 :          6 :         CU_ASSERT(rdma_req.completion_flags == 0);
     797         [ +  - ]:          6 :         CU_ASSERT(rdma_req.req == NULL);
     798                 :            : 
     799                 :            :         /* case 2: nvme_rdma_req_get */
     800                 :          6 :         rdma_req_get = nvme_rdma_req_get(&rqpair);
     801                 :          6 :         CU_ASSERT(rdma_req_get == &rdma_req);
     802   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req_get->id == 10086);
     803   [ +  -  +  - ]:          6 :         CU_ASSERT(rqpair.free_reqs.tqh_first == NULL);
     804                 :          6 : }
     805                 :            : 
     806                 :            : static void
     807                 :          6 : test_nvme_rdma_req_init(void)
     808                 :            : {
     809                 :          6 :         struct nvme_rdma_qpair rqpair = {};
     810                 :          6 :         struct spdk_nvme_ctrlr ctrlr = {};
     811                 :          6 :         struct spdk_nvmf_cmd cmd = {};
     812                 :          6 :         struct spdk_nvme_rdma_req rdma_req = {};
     813                 :          6 :         struct nvme_request req = {};
     814                 :          6 :         struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
     815                 :          6 :         int rc = 1;
     816                 :            : 
     817         [ +  - ]:          6 :         ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
     818   [ +  -  +  -  :          6 :         ctrlr.cdata.nvmf_specific.msdbd = 16;
                   +  - ]
     819                 :            : 
     820         [ +  - ]:          6 :         rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
     821         [ +  - ]:          6 :         rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
     822                 :          6 :         rqpair.qpair.ctrlr = &ctrlr;
     823         [ +  - ]:          6 :         rqpair.cmds = &cmd;
     824   [ +  -  +  -  :          6 :         cmd.sgl[0].address = 0x1111;
                   +  - ]
     825                 :          6 :         rdma_req.id = 0;
     826                 :          6 :         req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
     827                 :            : 
     828                 :          6 :         req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
     829                 :            :         /* case 1: req->payload_size == 0, expect: pass. */
     830         [ +  - ]:          6 :         req.payload_size = 0;
     831   [ +  -  +  - ]:          6 :         rqpair.qpair.ctrlr->ioccsz_bytes = 1024;
     832   [ +  -  +  - ]:          6 :         rqpair.qpair.ctrlr->icdoff = 0;
     833                 :          6 :         rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
     834                 :          6 :         CU_ASSERT(rc == 0);
     835                 :          6 :         CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
     836   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
             +  -  +  - ]
     837   [ +  -  +  - ]:          6 :         CU_ASSERT(rdma_req.send_wr.num_sge == 1);
     838   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
             +  -  +  - ]
     839   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
             +  -  +  - ]
     840   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == 0);
             +  -  +  - ]
     841   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == 0);
             +  -  +  - ]
     842   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
                   +  - ]
     843                 :            : 
     844                 :            :         /* case 2: payload_type == NVME_PAYLOAD_TYPE_CONTIG, expect: pass. */
     845                 :            :         /* icd_supported is true */
     846         [ +  - ]:          6 :         rdma_req.req = NULL;
     847   [ +  -  +  - ]:          6 :         rqpair.qpair.ctrlr->icdoff = 0;
     848         [ +  - ]:          6 :         req.payload_offset = 0;
     849         [ +  - ]:          6 :         req.payload_size = 1024;
     850                 :          6 :         req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
     851                 :          6 :         rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
     852                 :          6 :         CU_ASSERT(rc == 0);
     853   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
             +  -  +  - ]
     854   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
             +  -  +  - ]
     855   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
          +  -  +  -  +  
                -  +  - ]
     856   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
                   +  - ]
     857   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
             +  -  +  - ]
     858   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
          +  -  +  -  +  
                      - ]
     859   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
          +  -  +  -  +  
                -  +  - ]
     860   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
             +  -  +  - ]
     861                 :            : 
     862                 :            :         /* icd_supported is false */
     863         [ +  - ]:          6 :         rdma_req.req = NULL;
     864   [ +  -  +  - ]:          6 :         rqpair.qpair.ctrlr->icdoff = 1;
     865         [ +  - ]:          6 :         req.payload_offset = 0;
     866         [ +  - ]:          6 :         req.payload_size = 1024;
     867                 :          6 :         req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
     868                 :          6 :         rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
     869                 :          6 :         CU_ASSERT(rc == 0);
     870   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
             +  -  +  - ]
     871   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
             +  -  +  - ]
     872   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
          +  -  +  -  +  
                      - ]
     873   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
             +  -  +  - ]
     874   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg);
          +  -  +  -  +  
                      - ]
     875   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
             +  -  +  - ]
     876                 :            : 
     877                 :            :         /* case 3: payload_type == NVME_PAYLOAD_TYPE_SGL, expect: pass. */
     878                 :            :         /* icd_supported is true */
     879         [ +  - ]:          6 :         rdma_req.req = NULL;
     880   [ +  -  +  - ]:          6 :         rqpair.qpair.ctrlr->icdoff = 0;
     881                 :          6 :         req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
     882         [ +  - ]:          6 :         req.qpair = &rqpair.qpair;
     883         [ +  - ]:          6 :         bio.iovpos = 0;
     884         [ +  - ]:          6 :         req.payload_offset = 0;
     885         [ +  - ]:          6 :         req.payload_size = 1024;
     886   [ +  -  +  - ]:          6 :         bio.iovs[0].iov_base = (void *)0xdeadbeef;
     887   [ +  -  +  -  :          6 :         bio.iovs[0].iov_len = 1024;
                   +  - ]
     888                 :          6 :         rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
     889                 :          6 :         CU_ASSERT(rc == 0);
     890         [ +  - ]:          6 :         CU_ASSERT(bio.iovpos == 1);
     891   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
             +  -  +  - ]
     892   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
             +  -  +  - ]
     893   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
          +  -  +  -  +  
                -  +  - ]
     894   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
                   +  - ]
     895   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
             +  -  +  - ]
     896   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
          +  -  +  -  +  
                      - ]
     897   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
          +  -  +  -  +  
                -  +  - ]
     898   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
             +  -  +  - ]
     899                 :            : 
     900                 :            :         /* icd_supported is false */
     901         [ +  - ]:          6 :         rdma_req.req = NULL;
     902   [ +  -  +  - ]:          6 :         rqpair.qpair.ctrlr->icdoff = 1;
     903                 :          6 :         req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
     904         [ +  - ]:          6 :         req.qpair = &rqpair.qpair;
     905         [ +  - ]:          6 :         bio.iovpos = 0;
     906         [ +  - ]:          6 :         req.payload_offset = 0;
     907         [ +  - ]:          6 :         req.payload_size = 1024;
     908   [ +  -  +  - ]:          6 :         bio.iovs[0].iov_base = (void *)0xdeadbeef;
     909   [ +  -  +  -  :          6 :         bio.iovs[0].iov_len = 1024;
                   +  - ]
     910                 :          6 :         rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
     911                 :          6 :         CU_ASSERT(rc == 0);
     912         [ +  - ]:          6 :         CU_ASSERT(bio.iovpos == 1);
     913   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
             +  -  +  - ]
     914   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
             +  -  +  - ]
     915   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
          +  -  +  -  +  
                      - ]
     916   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
             +  -  +  - ]
     917   [ +  -  +  -  :          6 :         CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
          +  -  +  -  +  
                      - ]
     918   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
             +  -  +  - ]
     919                 :          6 : }
     920                 :            : 
     921                 :            : static void
     922                 :          6 : test_nvme_rdma_validate_cm_event(void)
     923                 :            : {
     924                 :          1 :         enum rdma_cm_event_type expected_evt_type;
     925                 :          6 :         struct rdma_cm_event reaped_evt = {};
     926                 :          1 :         int rc;
     927                 :            : 
     928                 :            :         /* case 1: expected_evt_type == reaped_evt->event, expect: pass */
     929                 :          6 :         expected_evt_type = RDMA_CM_EVENT_ADDR_RESOLVED;
     930         [ +  - ]:          6 :         reaped_evt.event = RDMA_CM_EVENT_ADDR_RESOLVED;
     931                 :            : 
     932                 :          6 :         rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
     933                 :          6 :         CU_ASSERT(rc == 0);
     934                 :            : 
     935                 :            :         /* case 2: expected_evt_type != RDMA_CM_EVENT_ESTABLISHED and is not equal to reaped_evt->event, expect: fail */
     936         [ +  - ]:          6 :         reaped_evt.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
     937                 :            : 
     938                 :          6 :         rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
     939                 :          6 :         CU_ASSERT(rc == -EBADMSG);
     940                 :            : 
     941                 :            :         /* case 3: expected_evt_type == RDMA_CM_EVENT_ESTABLISHED */
     942                 :          6 :         expected_evt_type = RDMA_CM_EVENT_ESTABLISHED;
     943                 :            :         /* reaped_evt->event == RDMA_CM_EVENT_REJECTED and reaped_evt->status == 10, expect: fail */
     944         [ +  - ]:          6 :         reaped_evt.event = RDMA_CM_EVENT_REJECTED;
     945         [ +  - ]:          6 :         reaped_evt.status = 10;
     946                 :            : 
     947                 :          6 :         rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
     948                 :          6 :         CU_ASSERT(rc == -ESTALE);
     949                 :            : 
     950                 :            :         /* reaped_evt->event == RDMA_CM_EVENT_CONNECT_RESPONSE, expect: pass */
     951         [ +  - ]:          6 :         reaped_evt.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
     952                 :            : 
     953                 :          6 :         rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
     954                 :          6 :         CU_ASSERT(rc == 0);
     955                 :          6 : }
     956                 :            : 
     957                 :            : static void
     958                 :          6 : test_nvme_rdma_qpair_init(void)
     959                 :            : {
     960                 :          6 :         struct nvme_rdma_qpair          rqpair = {};
     961                 :          6 :         struct rdma_cm_id                cm_id = {};
     962                 :          6 :         struct ibv_pd                           *pd = (struct ibv_pd *)0xfeedbeef;
     963                 :          6 :         struct ibv_qp                           qp = { .pd = pd };
     964                 :          6 :         struct nvme_rdma_ctrlr  rctrlr = {};
     965                 :          6 :         int rc = 0;
     966                 :            : 
     967   [ +  -  +  - ]:          6 :         rctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
     968         [ +  - ]:          6 :         rqpair.cm_id = &cm_id;
     969                 :          6 :         g_nvme_hooks.get_ibv_pd = NULL;
     970         [ +  - ]:          6 :         rqpair.qpair.poll_group = NULL;
     971                 :          6 :         rqpair.qpair.ctrlr = &rctrlr.ctrlr;
     972                 :          6 :         g_spdk_rdma_qp.qp = &qp;
     973                 :          6 :         MOCK_SET(spdk_rdma_get_pd, pd);
     974                 :            : 
     975                 :          6 :         rc = nvme_rdma_qpair_init(&rqpair);
     976                 :          6 :         CU_ASSERT(rc == 0);
     977                 :            : 
     978   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.cm_id->context == &rqpair.qpair);
                   +  - ]
     979         [ +  - ]:          6 :         CU_ASSERT(rqpair.max_send_sge == NVME_RDMA_DEFAULT_TX_SGE);
     980         [ +  - ]:          6 :         CU_ASSERT(rqpair.max_recv_sge == NVME_RDMA_DEFAULT_RX_SGE);
     981         [ +  - ]:          6 :         CU_ASSERT(rqpair.current_num_sends == 0);
     982         [ +  - ]:          6 :         CU_ASSERT(rqpair.cq == (struct ibv_cq *)0xFEEDBEEF);
     983         [ +  - ]:          6 :         CU_ASSERT(rqpair.memory_domain != NULL);
     984                 :            : 
     985                 :          6 :         MOCK_CLEAR(spdk_rdma_get_pd);
     986                 :          6 : }
     987                 :            : 
     988                 :            : static void
     989                 :          6 : test_nvme_rdma_qpair_submit_request(void)
     990                 :            : {
     991                 :          1 :         int                             rc;
     992                 :          6 :         struct nvme_rdma_qpair          rqpair = {};
     993                 :          6 :         struct spdk_nvme_ctrlr          ctrlr = {};
     994                 :          6 :         struct nvme_request             req = {};
     995                 :          6 :         struct nvme_rdma_poller         poller = {};
     996                 :          6 :         struct spdk_nvme_rdma_req       *rdma_req = NULL;
     997                 :            : 
     998                 :          6 :         req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
     999                 :          6 :         req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
    1000         [ +  - ]:          6 :         req.payload_size = 0;
    1001         [ +  - ]:          6 :         rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
    1002         [ +  - ]:          6 :         rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
    1003                 :          6 :         rqpair.qpair.ctrlr = &ctrlr;
    1004         [ +  - ]:          6 :         rqpair.num_entries = 1;
    1005         [ +  - ]:          6 :         rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
    1006         [ +  - ]:          6 :         rqpair.poller = &poller;
    1007                 :            : 
    1008                 :          6 :         rc = nvme_rdma_create_reqs(&rqpair);
    1009                 :          6 :         CU_ASSERT(rc == 0);
    1010                 :            :         /* Give send_wr.next a non null value */
    1011   [ +  -  +  - ]:          6 :         rdma_req = TAILQ_FIRST(&rqpair.free_reqs);
    1012   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(rdma_req != NULL);
    1013   [ +  -  +  -  :          6 :         rdma_req->send_wr.next = (void *)0xdeadbeef;
                   +  - ]
    1014                 :            : 
    1015                 :          6 :         rc = nvme_rdma_qpair_submit_request(&rqpair.qpair, &req);
    1016                 :          6 :         CU_ASSERT(rc == 0);
    1017         [ +  - ]:          6 :         CU_ASSERT(rqpair.current_num_sends == 1);
    1018   [ +  -  +  -  :          6 :         CU_ASSERT(rdma_req->send_wr.next == NULL);
                   +  - ]
    1019   [ +  +  +  -  :          6 :         TAILQ_REMOVE(&rqpair.outstanding_reqs, rdma_req, link);
          +  -  -  +  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  #  #  #  
          #  #  +  -  +  
          -  +  -  +  -  
          +  -  +  -  +  
          -  +  -  +  -  
          +  -  +  -  +  
                      - ]
    1020   [ +  -  +  - ]:          6 :         CU_ASSERT(TAILQ_EMPTY(&rqpair.outstanding_reqs));
    1021                 :            : 
    1022                 :            :         /* No request available */
    1023                 :          6 :         rc = nvme_rdma_qpair_submit_request(&rqpair.qpair, &req);
    1024                 :          6 :         CU_ASSERT(rc == -EAGAIN);
    1025   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.poller->stats.queued_requests == 1);
             +  -  +  - ]
    1026                 :            : 
    1027                 :          6 :         nvme_rdma_free_reqs(&rqpair);
    1028                 :          6 : }
    1029                 :            : 
    1030                 :            : static void
    1031                 :          6 : test_nvme_rdma_memory_domain(void)
    1032                 :            : {
    1033                 :          6 :         struct nvme_rdma_memory_domain *domain_1 = NULL, *domain_2 = NULL, *domain_tmp;
    1034                 :          6 :         struct ibv_pd *pd_1 = (struct ibv_pd *)0x1, *pd_2 = (struct ibv_pd *)0x2;
    1035                 :            :         /* Counters below are used to check the number of created/destroyed rdma_dma_device objects.
    1036                 :            :          * Since other unit tests may create dma_devices, we can't just check that the queue is empty or not */
    1037                 :          6 :         uint32_t dma_dev_count_start = 0, dma_dev_count = 0, dma_dev_count_end = 0;
    1038                 :            : 
    1039   [ +  +  +  -  :         12 :         TAILQ_FOREACH(domain_tmp, &g_memory_domains, link) {
             +  -  +  - ]
    1040                 :          6 :                 dma_dev_count_start++;
    1041                 :          1 :         }
    1042                 :            : 
    1043                 :            :         /* spdk_memory_domain_create failed, expect fail */
    1044                 :          6 :         MOCK_SET(spdk_memory_domain_create, -1);
    1045                 :          6 :         domain_1 = nvme_rdma_get_memory_domain(pd_1);
    1046                 :          6 :         CU_ASSERT(domain_1 == NULL);
    1047                 :          6 :         MOCK_CLEAR(spdk_memory_domain_create);
    1048                 :            : 
    1049                 :            :         /* Normal scenario */
    1050                 :          6 :         domain_1 = nvme_rdma_get_memory_domain(pd_1);
    1051   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(domain_1 != NULL);
    1052   [ +  -  +  - ]:          6 :         CU_ASSERT(domain_1->domain != NULL);
    1053   [ +  -  +  - ]:          6 :         CU_ASSERT(domain_1->pd == pd_1);
    1054   [ +  -  +  - ]:          6 :         CU_ASSERT(domain_1->ref == 1);
    1055                 :            : 
    1056                 :            :         /* Request the same pd, ref counter increased */
    1057                 :          6 :         CU_ASSERT(nvme_rdma_get_memory_domain(pd_1) == domain_1);
    1058   [ +  -  +  - ]:          6 :         CU_ASSERT(domain_1->ref == 2);
    1059                 :            : 
    1060                 :            :         /* Request another pd */
    1061                 :          6 :         domain_2 = nvme_rdma_get_memory_domain(pd_2);
    1062   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(domain_2 != NULL);
    1063   [ +  -  +  - ]:          6 :         CU_ASSERT(domain_2->domain != NULL);
    1064   [ +  -  +  - ]:          6 :         CU_ASSERT(domain_2->pd == pd_2);
    1065   [ +  -  +  - ]:          6 :         CU_ASSERT(domain_2->ref == 1);
    1066                 :            : 
    1067   [ +  +  +  -  :         24 :         TAILQ_FOREACH(domain_tmp, &g_memory_domains, link) {
             +  -  +  - ]
    1068                 :         18 :                 dma_dev_count++;
    1069                 :          3 :         }
    1070                 :          6 :         CU_ASSERT(dma_dev_count == dma_dev_count_start + 2);
    1071                 :            : 
    1072                 :            :         /* put domain_1, decrement refcount */
    1073                 :          6 :         nvme_rdma_put_memory_domain(domain_1);
    1074                 :            : 
    1075                 :            :         /* Release both devices */
    1076   [ +  -  +  - ]:          6 :         CU_ASSERT(domain_2->ref == 1);
    1077                 :          6 :         nvme_rdma_put_memory_domain(domain_1);
    1078                 :          6 :         nvme_rdma_put_memory_domain(domain_2);
    1079                 :            : 
    1080   [ +  +  +  -  :         12 :         TAILQ_FOREACH(domain_tmp, &g_memory_domains, link) {
             +  -  +  - ]
    1081                 :          6 :                 dma_dev_count_end++;
    1082                 :          1 :         }
    1083                 :          6 :         CU_ASSERT(dma_dev_count_start == dma_dev_count_end);
    1084                 :          6 : }
    1085                 :            : 
    1086                 :            : static void
    1087                 :          6 : test_rdma_ctrlr_get_memory_domains(void)
    1088                 :            : {
    1089                 :          6 :         struct nvme_rdma_ctrlr rctrlr = {};
    1090                 :          6 :         struct nvme_rdma_qpair rqpair = {};
    1091                 :          6 :         struct spdk_memory_domain *domain = (struct spdk_memory_domain *)0xbaadbeef;
    1092                 :          6 :         struct nvme_rdma_memory_domain rdma_domain = { .domain = domain };
    1093                 :          6 :         struct spdk_memory_domain *domains[1] = {NULL};
    1094                 :            : 
    1095         [ +  - ]:          6 :         rqpair.memory_domain = &rdma_domain;
    1096         [ +  - ]:          6 :         rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
    1097         [ +  - ]:          6 :         rctrlr.ctrlr.adminq = &rqpair.qpair;
    1098                 :            : 
    1099                 :            :         /* Test 1, input domains pointer is NULL */
    1100                 :          6 :         CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, NULL, 1) == 1);
    1101                 :            : 
    1102                 :            :         /* Test 2, input array_size is 0 */
    1103                 :          6 :         CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, domains, 0) == 1);
    1104   [ +  -  +  - ]:          6 :         CU_ASSERT(domains[0] == NULL);
    1105                 :            : 
    1106                 :            :         /* Test 3, both input domains pointer and array_size are NULL/0 */
    1107                 :          6 :         CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, NULL, 0) == 1);
    1108                 :            : 
    1109                 :            :         /* Test 2, input parameters are valid */
    1110                 :          6 :         CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, domains, 1) == 1);
    1111   [ +  -  +  - ]:          6 :         CU_ASSERT(domains[0] == domain);
    1112                 :          6 : }
    1113                 :            : 
    1114                 :            : static void
    1115                 :          6 : test_rdma_get_memory_translation(void)
    1116                 :            : {
    1117                 :          6 :         struct ibv_qp qp = {.pd = (struct ibv_pd *) 0xfeedbeef};
    1118                 :          6 :         struct spdk_rdma_qp rdma_qp = {.qp = &qp};
    1119                 :          6 :         struct nvme_rdma_qpair rqpair = {.rdma_qp = &rdma_qp};
    1120                 :          6 :         struct spdk_nvme_ns_cmd_ext_io_opts io_opts = {
    1121                 :            :                 .memory_domain = (struct spdk_memory_domain *) 0xdeaddead
    1122                 :            :         };
    1123                 :          6 :         struct nvme_request req = {.payload = {.opts = &io_opts}};
    1124                 :          6 :         struct nvme_rdma_memory_translation_ctx ctx = {
    1125                 :            :                 .addr = (void *) 0xBAADF00D,
    1126                 :            :                 .length = 0x100
    1127                 :            :         };
    1128                 :          1 :         int rc;
    1129                 :            : 
    1130   [ +  -  +  -  :          6 :         rqpair.memory_domain = nvme_rdma_get_memory_domain(rqpair.rdma_qp->qp->pd);
          +  -  +  -  +  
                -  +  - ]
    1131   [ +  +  +  -  :          6 :         SPDK_CU_ASSERT_FATAL(rqpair.memory_domain != NULL);
                   #  # ]
    1132                 :            : 
    1133                 :            :         /* case 1, using extended IO opts with DMA device.
    1134                 :            :          * Test 1 - spdk_dma_translate_data error, expect fail */
    1135                 :          6 :         MOCK_SET(spdk_memory_domain_translate_data, -1);
    1136                 :          6 :         rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
    1137                 :          6 :         CU_ASSERT(rc != 0);
    1138                 :          6 :         MOCK_CLEAR(spdk_memory_domain_translate_data);
    1139                 :            : 
    1140                 :            :         /* Test 2 - expect pass */
    1141         [ +  - ]:          6 :         g_memory_translation_translation.iov_count = 1;
    1142   [ +  -  +  - ]:          6 :         g_memory_translation_translation.iov.iov_base = ctx.addr + 1;
    1143   [ +  -  +  -  :          6 :         g_memory_translation_translation.iov.iov_len = ctx.length;
                   +  - ]
    1144   [ +  -  +  -  :          6 :         g_memory_translation_translation.rdma.lkey = 123;
                   +  - ]
    1145   [ +  -  +  -  :          6 :         g_memory_translation_translation.rdma.rkey = 321;
                   +  - ]
    1146                 :            : 
    1147                 :          6 :         rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
    1148                 :          6 :         CU_ASSERT(rc == 0);
    1149   [ +  -  +  -  :          6 :         CU_ASSERT(ctx.lkey == g_memory_translation_translation.rdma.lkey);
             +  -  +  - ]
    1150   [ +  -  +  -  :          6 :         CU_ASSERT(ctx.rkey == g_memory_translation_translation.rdma.rkey);
             +  -  +  - ]
    1151   [ +  -  +  - ]:          6 :         CU_ASSERT(ctx.addr == g_memory_translation_translation.iov.iov_base);
    1152   [ +  -  +  -  :          6 :         CU_ASSERT(ctx.length == g_memory_translation_translation.iov.iov_len);
                   +  - ]
    1153                 :            : 
    1154                 :            :         /* case 2, using rdma translation
    1155                 :            :          * Test 1 - spdk_rdma_get_translation error, expect fail */
    1156   [ +  -  +  - ]:          6 :         req.payload.opts = NULL;
    1157                 :          6 :         MOCK_SET(spdk_rdma_get_translation, -1);
    1158                 :          6 :         rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
    1159                 :          6 :         CU_ASSERT(rc != 0);
    1160                 :          6 :         MOCK_CLEAR(spdk_rdma_get_translation);
    1161                 :            : 
    1162                 :            :         /* Test 2 - expect pass */
    1163                 :          6 :         rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
    1164                 :          6 :         CU_ASSERT(rc == 0);
    1165         [ +  - ]:          6 :         CU_ASSERT(ctx.lkey == RDMA_UT_LKEY);
    1166         [ +  - ]:          6 :         CU_ASSERT(ctx.rkey == RDMA_UT_RKEY);
    1167                 :            : 
    1168                 :            :         /* Cleanup */
    1169         [ +  - ]:          6 :         nvme_rdma_put_memory_domain(rqpair.memory_domain);
    1170                 :          6 : }
    1171                 :            : 
    1172                 :            : static void
    1173                 :          6 : test_get_rdma_qpair_from_wc(void)
    1174                 :            : {
    1175                 :          6 :         const uint32_t test_qp_num = 123;
    1176                 :          6 :         struct nvme_rdma_poll_group     group = {};
    1177                 :          6 :         struct nvme_rdma_qpair rqpair = {};
    1178                 :          6 :         struct spdk_rdma_qp rdma_qp = {};
    1179                 :          6 :         struct ibv_qp qp = { .qp_num = test_qp_num };
    1180                 :          6 :         struct ibv_wc wc = { .qp_num = test_qp_num };
    1181                 :            : 
    1182   [ +  -  +  -  :          6 :         STAILQ_INIT(&group.group.disconnected_qpairs);
          +  -  +  -  +  
                      - ]
    1183   [ +  -  +  -  :          6 :         STAILQ_INIT(&group.group.connected_qpairs);
          +  -  +  -  +  
                      - ]
    1184         [ +  - ]:          6 :         rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
    1185                 :            : 
    1186                 :            :         /* Test 1 - Simulate case when nvme_rdma_qpair is disconnected but still in one of lists.
    1187                 :            :          * get_rdma_qpair_from_wc must return NULL */
    1188   [ +  -  +  -  :          6 :         STAILQ_INSERT_HEAD(&group.group.disconnected_qpairs, &rqpair.qpair, poll_group_stailq);
          +  -  +  -  +  
          -  +  -  +  -  
          +  -  +  -  +  
                      - ]
    1189                 :          6 :         CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == NULL);
    1190   [ +  -  +  -  :          6 :         STAILQ_REMOVE_HEAD(&group.group.disconnected_qpairs, poll_group_stailq);
          +  -  +  -  +  
          -  +  -  +  -  
          +  -  +  -  +  
                -  +  - ]
    1191                 :            : 
    1192   [ +  -  +  -  :          6 :         STAILQ_INSERT_HEAD(&group.group.connected_qpairs, &rqpair.qpair, poll_group_stailq);
          +  -  +  -  +  
          -  +  -  +  -  
          +  -  +  -  +  
                      - ]
    1193                 :          6 :         CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == NULL);
    1194   [ +  -  +  -  :          6 :         STAILQ_REMOVE_HEAD(&group.group.connected_qpairs, poll_group_stailq);
          +  -  +  -  +  
          -  +  -  +  -  
          +  -  +  -  +  
                -  +  - ]
    1195                 :            : 
    1196                 :            :         /* Test 2 - nvme_rdma_qpair with valid rdma_qp/ibv_qp and qp_num */
    1197                 :          6 :         rdma_qp.qp = &qp;
    1198         [ +  - ]:          6 :         rqpair.rdma_qp = &rdma_qp;
    1199                 :            : 
    1200   [ +  -  +  -  :          6 :         STAILQ_INSERT_HEAD(&group.group.disconnected_qpairs, &rqpair.qpair, poll_group_stailq);
          +  -  +  -  +  
          -  +  -  +  -  
          +  -  +  -  +  
                      - ]
    1201                 :          6 :         CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == &rqpair);
    1202   [ +  -  +  -  :          6 :         STAILQ_REMOVE_HEAD(&group.group.disconnected_qpairs, poll_group_stailq);
          +  -  +  -  +  
          -  +  -  +  -  
          +  -  +  -  +  
                -  +  - ]
    1203                 :            : 
    1204   [ +  -  +  -  :          6 :         STAILQ_INSERT_HEAD(&group.group.connected_qpairs, &rqpair.qpair, poll_group_stailq);
          +  -  +  -  +  
          -  +  -  +  -  
          +  -  +  -  +  
                      - ]
    1205                 :          6 :         CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == &rqpair);
    1206   [ +  -  +  -  :          6 :         STAILQ_REMOVE_HEAD(&group.group.connected_qpairs, poll_group_stailq);
          +  -  +  -  +  
          -  +  -  +  -  
          +  -  +  -  +  
                -  +  - ]
    1207                 :          6 : }
    1208                 :            : 
    1209                 :            : static void
    1210                 :          6 : test_nvme_rdma_ctrlr_get_max_sges(void)
    1211                 :            : {
    1212                 :          6 :         struct nvme_rdma_ctrlr  rctrlr = {};
    1213                 :            : 
    1214   [ +  -  +  - ]:          6 :         rctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
    1215         [ +  - ]:          6 :         rctrlr.max_sge = NVME_RDMA_MAX_SGL_DESCRIPTORS;
    1216   [ +  -  +  -  :          6 :         rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16;
                   +  - ]
    1217   [ +  -  +  -  :          6 :         rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096;
                   +  - ]
    1218                 :          6 :         CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 16);
    1219                 :            : 
    1220   [ +  -  +  -  :          6 :         rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 32;
                   +  - ]
    1221   [ +  -  +  -  :          6 :         rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096;
                   +  - ]
    1222                 :          6 :         CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 16);
    1223                 :            : 
    1224   [ +  -  +  -  :          6 :         rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 8;
                   +  - ]
    1225   [ +  -  +  -  :          6 :         rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096;
                   +  - ]
    1226                 :          6 :         CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 8);
    1227                 :            : 
    1228   [ +  -  +  -  :          6 :         rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16;
                   +  - ]
    1229   [ +  -  +  -  :          6 :         rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4;
                   +  - ]
    1230                 :          6 :         CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 1);
    1231                 :            : 
    1232   [ +  -  +  -  :          6 :         rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16;
                   +  - ]
    1233   [ +  -  +  -  :          6 :         rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 6;
                   +  - ]
    1234                 :          6 :         CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 2);
    1235                 :          6 : }
    1236                 :            : 
    1237                 :            : static void
    1238                 :          6 : test_nvme_rdma_poll_group_get_stats(void)
    1239                 :            : {
    1240                 :          6 :         int rc = -1;
    1241                 :          6 :         struct spdk_nvme_transport_poll_group_stat *tpointer = NULL;
    1242                 :          6 :         struct nvme_rdma_poll_group tgroup = {};
    1243                 :          6 :         struct ibv_device dev1, dev2 = {};
    1244                 :          6 :         struct ibv_context contexts1, contexts2 = {};
    1245                 :          6 :         struct nvme_rdma_poller *tpoller1 = NULL;
    1246                 :          6 :         struct nvme_rdma_poller *tpoller2 = NULL;
    1247                 :            : 
    1248   [ +  -  +  - ]:          6 :         memcpy(dev1.name, "/dev/test1", sizeof("/dev/test1"));
    1249   [ +  -  +  - ]:          6 :         memcpy(dev2.name, "/dev/test2", sizeof("/dev/test2"));
    1250                 :          6 :         contexts1.device = &dev1;
    1251                 :          6 :         contexts2.device = &dev2;
    1252                 :            : 
    1253                 :            :         /* Initialization */
    1254   [ +  -  +  -  :          6 :         STAILQ_INIT(&tgroup.pollers);
          +  -  +  -  +  
                      - ]
    1255                 :          6 :         tpoller2 = nvme_rdma_poller_create(&tgroup, &contexts1);
    1256   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(tpoller2 != NULL);
    1257         [ +  - ]:          6 :         CU_ASSERT(tgroup.num_pollers == 1);
    1258                 :            : 
    1259                 :          6 :         tpoller1 = nvme_rdma_poller_create(&tgroup, &contexts2);
    1260   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(tpoller1 != NULL);
    1261         [ +  - ]:          6 :         CU_ASSERT(tgroup.num_pollers == 2);
    1262                 :          6 :         CU_ASSERT(&tgroup.pollers != NULL);
    1263                 :            : 
    1264   [ +  -  +  - ]:          6 :         CU_ASSERT(tpoller1->device == &contexts2);
    1265   [ +  -  +  - ]:          6 :         CU_ASSERT(tpoller2->device == &contexts1);
    1266   [ +  +  +  -  :          6 :         CU_ASSERT(strcmp(tpoller1->device->device->name, "/dev/test2") == 0);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1267   [ +  +  +  -  :          6 :         CU_ASSERT(strcmp(tpoller2->device->device->name, "/dev/test1") == 0);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1268   [ +  -  +  - ]:          6 :         CU_ASSERT(tpoller1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
    1269   [ +  -  +  - ]:          6 :         CU_ASSERT(tpoller2->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
    1270   [ +  -  +  - ]:          6 :         CU_ASSERT(tpoller1->required_num_wc == 0);
    1271   [ +  -  +  - ]:          6 :         CU_ASSERT(tpoller2->required_num_wc == 0);
    1272                 :            : 
    1273                 :            :         /* Test1: Invalid stats */
    1274                 :          6 :         rc = nvme_rdma_poll_group_get_stats(NULL, &tpointer);
    1275                 :          6 :         CU_ASSERT(rc == -EINVAL);
    1276                 :            : 
    1277                 :            :         /* Test2: Invalid group pointer */
    1278                 :          6 :         rc = nvme_rdma_poll_group_get_stats(&tgroup.group, NULL);
    1279                 :          6 :         CU_ASSERT(rc == -EINVAL);
    1280                 :            : 
    1281                 :            :         /* Test3: Success member variables should be correct */
    1282   [ +  -  +  -  :          6 :         tpoller1->stats.polls = 111;
                   +  - ]
    1283   [ +  -  +  -  :          6 :         tpoller1->stats.idle_polls = 112;
                   +  - ]
    1284   [ +  -  +  -  :          6 :         tpoller1->stats.completions = 113;
                   +  - ]
    1285   [ +  -  +  -  :          6 :         tpoller1->stats.queued_requests = 114;
                   +  - ]
    1286   [ +  -  +  -  :          6 :         tpoller1->stats.rdma_stats.send.num_submitted_wrs = 121;
          +  -  +  -  +  
                      - ]
    1287   [ +  -  +  -  :          6 :         tpoller1->stats.rdma_stats.send.doorbell_updates = 122;
          +  -  +  -  +  
                      - ]
    1288   [ +  -  +  -  :          6 :         tpoller1->stats.rdma_stats.recv.num_submitted_wrs = 131;
          +  -  +  -  +  
                      - ]
    1289   [ +  -  +  -  :          6 :         tpoller1->stats.rdma_stats.recv.doorbell_updates = 132;
          +  -  +  -  +  
                      - ]
    1290   [ +  -  +  -  :          6 :         tpoller2->stats.polls = 211;
                   +  - ]
    1291   [ +  -  +  -  :          6 :         tpoller2->stats.idle_polls = 212;
                   +  - ]
    1292   [ +  -  +  -  :          6 :         tpoller2->stats.completions = 213;
                   +  - ]
    1293   [ +  -  +  -  :          6 :         tpoller2->stats.queued_requests = 214;
                   +  - ]
    1294   [ +  -  +  -  :          6 :         tpoller2->stats.rdma_stats.send.num_submitted_wrs = 221;
          +  -  +  -  +  
                      - ]
    1295   [ +  -  +  -  :          6 :         tpoller2->stats.rdma_stats.send.doorbell_updates = 222;
          +  -  +  -  +  
                      - ]
    1296   [ +  -  +  -  :          6 :         tpoller2->stats.rdma_stats.recv.num_submitted_wrs = 231;
          +  -  +  -  +  
                      - ]
    1297   [ +  -  +  -  :          6 :         tpoller2->stats.rdma_stats.recv.doorbell_updates = 232;
          +  -  +  -  +  
                      - ]
    1298                 :            : 
    1299                 :          6 :         rc = nvme_rdma_poll_group_get_stats(&tgroup.group, &tpointer);
    1300                 :          6 :         CU_ASSERT(rc == 0);
    1301                 :          6 :         CU_ASSERT(tpointer != NULL);
    1302   [ +  -  +  - ]:          6 :         CU_ASSERT(tpointer->trtype == SPDK_NVME_TRANSPORT_RDMA);
    1303   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.num_devices == tgroup.num_pollers);
          +  -  +  -  +  
                      - ]
    1304   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats != NULL);
             +  -  +  - ]
    1305                 :            : 
    1306   [ +  +  +  -  :          6 :         CU_ASSERT(strcmp(tpointer->rdma.device_stats[0].name, "/dev/test2") == 0);
          +  -  +  -  +  
          -  +  -  +  -  
             +  -  +  - ]
    1307   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[0].polls == 111);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1308   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[0].idle_polls == 112);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1309   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[0].completions == 113);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1310   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[0].queued_requests == 114);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1311   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[0].total_send_wrs == 121);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1312   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[0].send_doorbell_updates == 122);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1313   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[0].total_recv_wrs == 131);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1314   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[0].recv_doorbell_updates == 132);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1315                 :            : 
    1316   [ +  +  +  -  :          6 :         CU_ASSERT(strcmp(tpointer->rdma.device_stats[1].name, "/dev/test1") == 0);
          +  -  +  -  +  
          -  +  -  +  -  
             +  -  +  - ]
    1317   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[1].polls == 211);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1318   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[1].idle_polls == 212);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1319   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[1].completions == 213);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1320   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[1].queued_requests == 214);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1321   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[1].total_send_wrs == 221);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1322   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[1].send_doorbell_updates == 222);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1323   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[1].total_recv_wrs == 231);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1324   [ +  -  +  -  :          6 :         CU_ASSERT(tpointer->rdma.device_stats[1].recv_doorbell_updates == 232);
          +  -  +  -  +  
             -  +  -  +  
                      - ]
    1325                 :            : 
    1326                 :          6 :         nvme_rdma_poll_group_free_stats(&tgroup.group, tpointer);
    1327                 :          6 :         nvme_rdma_poll_group_free_pollers(&tgroup);
    1328                 :          6 : }
    1329                 :            : 
    1330                 :            : static void
    1331                 :          6 : test_nvme_rdma_qpair_set_poller(void)
    1332                 :            : {
    1333                 :          6 :         int rc = -1;
    1334                 :          1 :         struct nvme_rdma_poll_group *group;
    1335                 :          1 :         struct spdk_nvme_transport_poll_group *tgroup;
    1336                 :          1 :         struct nvme_rdma_poller *poller;
    1337                 :          6 :         struct nvme_rdma_qpair rqpair = {};
    1338                 :          6 :         struct rdma_cm_id cm_id = {};
    1339                 :            : 
    1340                 :            :         /* Case1: Test function nvme_rdma_poll_group_create */
    1341                 :            :         /* Test1: Function nvme_rdma_poll_group_create success */
    1342                 :          6 :         tgroup = nvme_rdma_poll_group_create();
    1343   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(tgroup != NULL);
    1344                 :            : 
    1345                 :          6 :         group = nvme_rdma_poll_group(tgroup);
    1346                 :          6 :         CU_ASSERT(group != NULL);
    1347   [ +  -  +  -  :          6 :         CU_ASSERT(STAILQ_EMPTY(&group->pollers));
                   +  - ]
    1348                 :            : 
    1349                 :            :         /* Case2: Test function nvme_rdma_qpair_set_poller */
    1350         [ +  - ]:          6 :         rqpair.qpair.poll_group = tgroup;
    1351         [ +  - ]:          6 :         rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
    1352         [ +  - ]:          6 :         rqpair.cm_id = &cm_id;
    1353                 :            : 
    1354                 :            :         /* Test1: Function ibv_create_cq failed */
    1355                 :          6 :         cm_id.verbs = (void *)0xFEEDBEEF;
    1356                 :          6 :         MOCK_SET(ibv_create_cq, NULL);
    1357                 :            : 
    1358                 :          6 :         rc = nvme_rdma_qpair_set_poller(&rqpair.qpair);
    1359                 :          6 :         CU_ASSERT(rc == -EINVAL);
    1360         [ +  - ]:          6 :         CU_ASSERT(rqpair.cq == NULL);
    1361   [ +  -  +  -  :          6 :         CU_ASSERT(STAILQ_EMPTY(&group->pollers));
                   +  - ]
    1362                 :            : 
    1363                 :          6 :         MOCK_CLEAR(ibv_create_cq);
    1364                 :            : 
    1365                 :            :         /* Test2: Unable to find a cq for qpair on poll group */
    1366                 :          6 :         cm_id.verbs = NULL;
    1367                 :            : 
    1368                 :          6 :         rc = nvme_rdma_qpair_set_poller(&rqpair.qpair);
    1369                 :          6 :         CU_ASSERT(rc == -EINVAL);
    1370         [ +  - ]:          6 :         CU_ASSERT(rqpair.cq == NULL);
    1371   [ +  -  +  -  :          6 :         CU_ASSERT(STAILQ_EMPTY(&group->pollers));
                   +  - ]
    1372                 :            : 
    1373                 :            :         /* Test3: Match cq success, current_num_wc is enough */
    1374                 :          6 :         MOCK_SET(ibv_create_cq, (struct ibv_cq *)0xFEEDBEEF);
    1375                 :            : 
    1376                 :          6 :         cm_id.verbs = (void *)0xFEEDBEEF;
    1377         [ +  - ]:          6 :         rqpair.num_entries = 0;
    1378                 :            : 
    1379                 :          6 :         rc = nvme_rdma_qpair_set_poller(&rqpair.qpair);
    1380                 :          6 :         CU_ASSERT(rc == 0);
    1381         [ +  - ]:          6 :         CU_ASSERT(rqpair.cq == (void *)0xFEEDBEEF);
    1382                 :            : 
    1383   [ +  -  +  -  :          6 :         poller = STAILQ_FIRST(&group->pollers);
                   +  - ]
    1384   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(poller != NULL);
    1385   [ +  -  +  -  :          6 :         CU_ASSERT(STAILQ_NEXT(poller, link) == NULL);
                   +  - ]
    1386   [ +  -  +  - ]:          6 :         CU_ASSERT(poller->device == (struct ibv_context *)0xFEEDBEEF);
    1387   [ +  -  +  - ]:          6 :         CU_ASSERT(poller->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
    1388   [ +  -  +  - ]:          6 :         CU_ASSERT(poller->required_num_wc == 0);
    1389         [ +  - ]:          6 :         CU_ASSERT(rqpair.poller == poller);
    1390                 :            : 
    1391   [ +  -  +  - ]:          6 :         rqpair.qpair.poll_group_tailq_head = &tgroup->disconnected_qpairs;
    1392                 :            : 
    1393         [ +  - ]:          6 :         nvme_rdma_poll_group_put_poller(group, rqpair.poller);
    1394   [ +  -  +  -  :          6 :         CU_ASSERT(STAILQ_EMPTY(&group->pollers));
                   +  - ]
    1395                 :            : 
    1396   [ +  -  +  - ]:          6 :         rqpair.qpair.poll_group_tailq_head = &tgroup->connected_qpairs;
    1397                 :            : 
    1398                 :            :         /* Test4: Match cq success, function ibv_resize_cq failed */
    1399         [ +  - ]:          6 :         rqpair.cq = NULL;
    1400         [ +  - ]:          6 :         rqpair.num_entries = DEFAULT_NVME_RDMA_CQ_SIZE - 1;
    1401                 :          6 :         MOCK_SET(ibv_resize_cq, -1);
    1402                 :            : 
    1403                 :          6 :         rc = nvme_rdma_qpair_set_poller(&rqpair.qpair);
    1404                 :          6 :         CU_ASSERT(rc == -EPROTO);
    1405   [ +  -  +  -  :          6 :         CU_ASSERT(STAILQ_EMPTY(&group->pollers));
                   +  - ]
    1406                 :            : 
    1407                 :            :         /* Test5: Current_num_wc is not enough, resize success */
    1408                 :          6 :         MOCK_SET(ibv_resize_cq, 0);
    1409                 :            : 
    1410                 :          6 :         rc = nvme_rdma_qpair_set_poller(&rqpair.qpair);
    1411                 :          6 :         CU_ASSERT(rc == 0);
    1412                 :            : 
    1413   [ +  -  +  -  :          6 :         poller = STAILQ_FIRST(&group->pollers);
                   +  - ]
    1414   [ +  +  #  # ]:          6 :         SPDK_CU_ASSERT_FATAL(poller != NULL);
    1415   [ +  -  +  - ]:          6 :         CU_ASSERT(poller->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE * 2);
    1416   [ +  -  +  - ]:          6 :         CU_ASSERT(poller->required_num_wc == (DEFAULT_NVME_RDMA_CQ_SIZE - 1) * 2);
    1417   [ +  -  +  -  :          6 :         CU_ASSERT(rqpair.cq == poller->cq);
                   +  - ]
    1418         [ +  - ]:          6 :         CU_ASSERT(rqpair.poller == poller);
    1419                 :            : 
    1420   [ +  -  +  - ]:          6 :         rqpair.qpair.poll_group_tailq_head = &tgroup->disconnected_qpairs;
    1421                 :            : 
    1422         [ +  - ]:          6 :         nvme_rdma_poll_group_put_poller(group, rqpair.poller);
    1423   [ +  -  +  -  :          6 :         CU_ASSERT(STAILQ_EMPTY(&group->pollers));
                   +  - ]
    1424                 :            : 
    1425                 :          6 :         rc = nvme_rdma_poll_group_destroy(tgroup);
    1426                 :          6 :         CU_ASSERT(rc == 0);
    1427                 :          6 : }
    1428                 :            : 
    1429                 :            : int
    1430                 :          6 : main(int argc, char **argv)
    1431                 :            : {
    1432                 :          6 :         CU_pSuite       suite = NULL;
    1433                 :          1 :         unsigned int    num_failures;
    1434                 :            : 
    1435                 :          6 :         CU_initialize_registry();
    1436                 :            : 
    1437                 :          6 :         suite = CU_add_suite("nvme_rdma", NULL, NULL);
    1438                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_request);
    1439                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_inline_request);
    1440                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_build_contig_request);
    1441                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_build_contig_inline_request);
    1442                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_create_reqs);
    1443                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_create_rsps);
    1444                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_create_qpair);
    1445                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_poller_create);
    1446                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_qpair_process_cm_event);
    1447                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_construct);
    1448                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_req_put_and_get);
    1449                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_req_init);
    1450                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_validate_cm_event);
    1451                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_qpair_init);
    1452                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_qpair_submit_request);
    1453                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_memory_domain);
    1454                 :          6 :         CU_ADD_TEST(suite, test_rdma_ctrlr_get_memory_domains);
    1455                 :          6 :         CU_ADD_TEST(suite, test_rdma_get_memory_translation);
    1456                 :          6 :         CU_ADD_TEST(suite, test_get_rdma_qpair_from_wc);
    1457                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_get_max_sges);
    1458                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_poll_group_get_stats);
    1459                 :          6 :         CU_ADD_TEST(suite, test_nvme_rdma_qpair_set_poller);
    1460                 :            : 
    1461                 :          6 :         num_failures = spdk_ut_run_tests(argc, argv, NULL);
    1462                 :          6 :         CU_cleanup_registry();
    1463                 :          7 :         return num_failures;
    1464                 :          1 : }

Generated by: LCOV version 1.14