LCOV - code coverage report
Current view: top level - lib/nvme - nvme_ns_cmd.c (source / functions) Hit Total Coverage
Test: ut_cov_unit.info Lines: 423 586 72.2 %
Date: 2024-07-15 03:40:05 Functions: 41 46 89.1 %

          Line data    Source code
       1             : /*   SPDX-License-Identifier: BSD-3-Clause
       2             :  *   Copyright (C) 2015 Intel Corporation.
       3             :  *   All rights reserved.
       4             :  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
       5             :  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
       6             :  *   Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
       7             :  */
       8             : 
       9             : #include "nvme_internal.h"
      10             : 
      11             : static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns,
      12             :                 struct spdk_nvme_qpair *qpair,
      13             :                 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
      14             :                 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
      15             :                 void *cb_arg, uint32_t opc, uint32_t io_flags,
      16             :                 uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl,
      17             :                 void *accel_sequence, int *rc);
      18             : 
      19             : static bool
      20           1 : nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
      21             :                              uint32_t sectors_per_stripe, uint32_t qdepth)
      22             : {
      23           1 :         uint32_t child_per_io = UINT32_MAX;
      24             : 
      25             :         /* After a namespace is destroyed(e.g. hotplug), all the fields associated with the
      26             :          * namespace will be cleared to zero, the function will return TRUE for this case,
      27             :          * and -EINVAL will be returned to caller.
      28             :          */
      29           1 :         if (sectors_per_stripe > 0) {
      30           0 :                 child_per_io = (lba_count + sectors_per_stripe - 1) / sectors_per_stripe;
      31           1 :         } else if (sectors_per_max_io > 0) {
      32           1 :                 child_per_io = (lba_count + sectors_per_max_io - 1) / sectors_per_max_io;
      33             :         }
      34             : 
      35           1 :         SPDK_DEBUGLOG(nvme, "checking maximum i/o length %d\n", child_per_io);
      36             : 
      37           1 :         return child_per_io >= qdepth;
      38             : }
      39             : 
      40             : static inline int
      41           2 : nvme_ns_map_failure_rc(uint32_t lba_count, uint32_t sectors_per_max_io,
      42             :                        uint32_t sectors_per_stripe, uint32_t qdepth, int rc)
      43             : {
      44           2 :         assert(rc);
      45           3 :         if (rc == -ENOMEM &&
      46           1 :             nvme_ns_check_request_length(lba_count, sectors_per_max_io, sectors_per_stripe, qdepth)) {
      47           1 :                 return -EINVAL;
      48             :         }
      49           1 :         return rc;
      50             : }
      51             : 
      52             : static inline bool
      53         220 : _nvme_md_excluded_from_xfer(struct spdk_nvme_ns *ns, uint32_t io_flags)
      54             : {
      55         255 :         return (io_flags & SPDK_NVME_IO_FLAGS_PRACT) &&
      56          35 :                (ns->flags & SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED) &&
      57         261 :                (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) &&
      58           6 :                (ns->md_size == 8);
      59             : }
      60             : 
      61             : static inline uint32_t
      62         120 : _nvme_get_host_buffer_sector_size(struct spdk_nvme_ns *ns, uint32_t io_flags)
      63             : {
      64         120 :         return _nvme_md_excluded_from_xfer(ns, io_flags) ?
      65         120 :                ns->sector_size : ns->extended_lba_size;
      66             : }
      67             : 
      68             : static inline uint32_t
      69         100 : _nvme_get_sectors_per_max_io(struct spdk_nvme_ns *ns, uint32_t io_flags)
      70             : {
      71         100 :         return _nvme_md_excluded_from_xfer(ns, io_flags) ?
      72         100 :                ns->sectors_per_max_io_no_md : ns->sectors_per_max_io;
      73             : }
      74             : 
      75             : static struct nvme_request *
      76          56 : _nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
      77             :                         const struct nvme_payload *payload,
      78             :                         uint32_t payload_offset, uint32_t md_offset,
      79             :                         uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
      80             :                         uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
      81             :                         struct nvme_request *parent, bool check_sgl, int *rc)
      82             : {
      83             :         struct nvme_request     *child;
      84             : 
      85          56 :         child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn,
      86             :                                 cb_arg, opc, io_flags, apptag_mask, apptag, cdw13, check_sgl, NULL, rc);
      87          56 :         if (child == NULL) {
      88           1 :                 nvme_request_free_children(parent);
      89           1 :                 nvme_free_request(parent);
      90           1 :                 return NULL;
      91             :         }
      92             : 
      93          55 :         nvme_request_add_child(parent, child);
      94          55 :         return child;
      95             : }
      96             : 
      97             : static struct nvme_request *
      98          14 : _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
      99             :                            struct spdk_nvme_qpair *qpair,
     100             :                            const struct nvme_payload *payload,
     101             :                            uint32_t payload_offset, uint32_t md_offset,
     102             :                            uint64_t lba, uint32_t lba_count,
     103             :                            spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
     104             :                            uint32_t io_flags, struct nvme_request *req,
     105             :                            uint32_t sectors_per_max_io, uint32_t sector_mask,
     106             :                            uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
     107             :                            void *accel_sequence, int *rc)
     108             : {
     109          14 :         uint32_t                sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
     110          14 :         uint32_t                remaining_lba_count = lba_count;
     111             :         struct nvme_request     *child;
     112             : 
     113          14 :         if (spdk_unlikely(accel_sequence != NULL)) {
     114           0 :                 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
     115           0 :                 *rc = -EINVAL;
     116           0 :                 return NULL;
     117             :         }
     118             : 
     119          69 :         while (remaining_lba_count > 0) {
     120          56 :                 lba_count = sectors_per_max_io - (lba & sector_mask);
     121          56 :                 lba_count = spdk_min(remaining_lba_count, lba_count);
     122             : 
     123          56 :                 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
     124             :                                                 lba, lba_count, cb_fn, cb_arg, opc,
     125             :                                                 io_flags, apptag_mask, apptag, cdw13, req, true, rc);
     126          56 :                 if (child == NULL) {
     127           1 :                         return NULL;
     128             :                 }
     129             : 
     130          55 :                 remaining_lba_count -= lba_count;
     131          55 :                 lba += lba_count;
     132          55 :                 payload_offset += lba_count * sector_size;
     133          55 :                 md_offset += lba_count * ns->md_size;
     134             :         }
     135             : 
     136          13 :         return req;
     137             : }
     138             : 
     139             : static inline bool
     140         141 : _is_io_flags_valid(uint32_t io_flags)
     141             : {
     142         141 :         if (spdk_unlikely(io_flags & ~SPDK_NVME_IO_FLAGS_VALID_MASK)) {
     143             :                 /* Invalid io_flags */
     144           3 :                 SPDK_ERRLOG("Invalid io_flags 0x%x\n", io_flags);
     145           3 :                 return false;
     146             :         }
     147             : 
     148         138 :         return true;
     149             : }
     150             : 
     151             : static inline bool
     152           2 : _is_accel_sequence_valid(struct spdk_nvme_qpair *qpair, void *seq)
     153             : {
     154             :         /* An accel sequence can only be executed if the controller supports accel and a qpair is
     155             :          * part of a of a poll group */
     156           2 :         if (spdk_likely(seq == NULL || ((qpair->ctrlr->flags & SPDK_NVME_CTRLR_ACCEL_SEQUENCE_SUPPORTED) &&
     157             :                                         qpair->poll_group != NULL))) {
     158           2 :                 return true;
     159             :         }
     160             : 
     161           0 :         return false;
     162             : }
     163             : 
     164             : static void
     165          85 : _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
     166             :                            uint32_t opc, uint64_t lba, uint32_t lba_count,
     167             :                            uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag,
     168             :                            uint32_t cdw13)
     169             : {
     170             :         struct spdk_nvme_cmd    *cmd;
     171             : 
     172          85 :         assert(_is_io_flags_valid(io_flags));
     173             : 
     174          85 :         cmd = &req->cmd;
     175          85 :         cmd->opc = opc;
     176          85 :         cmd->nsid = ns->id;
     177             : 
     178          85 :         *(uint64_t *)&cmd->cdw10 = lba;
     179             : 
     180          85 :         if (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) {
     181          13 :                 switch (ns->pi_type) {
     182           1 :                 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1:
     183             :                 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2:
     184           1 :                         cmd->cdw14 = (uint32_t)lba;
     185           1 :                         break;
     186             :                 }
     187          72 :         }
     188             : 
     189          85 :         cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
     190             : 
     191          85 :         cmd->cdw12 = lba_count - 1;
     192          85 :         cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
     193             : 
     194          85 :         cmd->cdw13 = cdw13;
     195             : 
     196          85 :         cmd->cdw15 = apptag_mask;
     197          85 :         cmd->cdw15 = (cmd->cdw15 << 16 | apptag);
     198          85 : }
     199             : 
     200             : static struct nvme_request *
     201          18 : _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
     202             :                                struct spdk_nvme_qpair *qpair,
     203             :                                const struct nvme_payload *payload,
     204             :                                uint32_t payload_offset, uint32_t md_offset,
     205             :                                uint64_t lba, uint32_t lba_count,
     206             :                                spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
     207             :                                uint32_t io_flags, struct nvme_request *req,
     208             :                                uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
     209             :                                void *accel_sequence, int *rc)
     210             : {
     211          18 :         spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
     212          18 :         spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
     213          18 :         void *sgl_cb_arg = req->payload.contig_or_cb_arg;
     214             :         bool start_valid, end_valid, last_sge, child_equals_parent;
     215          18 :         uint64_t child_lba = lba;
     216          18 :         uint32_t req_current_length = 0;
     217          18 :         uint32_t child_length = 0;
     218          18 :         uint32_t sge_length;
     219          18 :         uint32_t page_size = qpair->ctrlr->page_size;
     220          18 :         uintptr_t address;
     221             : 
     222          18 :         reset_sgl_fn(sgl_cb_arg, payload_offset);
     223          18 :         next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
     224          36 :         while (req_current_length < req->payload_size) {
     225             : 
     226          19 :                 if (sge_length == 0) {
     227           0 :                         continue;
     228          19 :                 } else if (req_current_length + sge_length > req->payload_size) {
     229           5 :                         sge_length = req->payload_size - req_current_length;
     230             :                 }
     231             : 
     232             :                 /*
     233             :                  * The start of the SGE is invalid if the start address is not page aligned,
     234             :                  *  unless it is the first SGE in the child request.
     235             :                  */
     236          19 :                 start_valid = child_length == 0 || _is_page_aligned(address, page_size);
     237             : 
     238             :                 /* Boolean for whether this is the last SGE in the parent request. */
     239          19 :                 last_sge = (req_current_length + sge_length == req->payload_size);
     240             : 
     241             :                 /*
     242             :                  * The end of the SGE is invalid if the end address is not page aligned,
     243             :                  *  unless it is the last SGE in the parent request.
     244             :                  */
     245          19 :                 end_valid = last_sge || _is_page_aligned(address + sge_length, page_size);
     246             : 
     247             :                 /*
     248             :                  * This child request equals the parent request, meaning that no splitting
     249             :                  *  was required for the parent request (the one passed into this function).
     250             :                  *  In this case, we do not create a child request at all - we just send
     251             :                  *  the original request as a single request at the end of this function.
     252             :                  */
     253          19 :                 child_equals_parent = (child_length + sge_length == req->payload_size);
     254             : 
     255          19 :                 if (start_valid) {
     256             :                         /*
     257             :                          * The start of the SGE is valid, so advance the length parameters,
     258             :                          *  to include this SGE with previous SGEs for this child request
     259             :                          *  (if any).  If it is not valid, we do not advance the length
     260             :                          *  parameters nor get the next SGE, because we must send what has
     261             :                          *  been collected before this SGE as a child request.
     262             :                          */
     263          19 :                         child_length += sge_length;
     264          19 :                         req_current_length += sge_length;
     265          19 :                         if (req_current_length < req->payload_size) {
     266           2 :                                 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
     267             :                                 /*
     268             :                                  * If the next SGE is not page aligned, we will need to create a
     269             :                                  *  child request for what we have so far, and then start a new
     270             :                                  *  child request for the next SGE.
     271             :                                  */
     272           2 :                                 start_valid = _is_page_aligned(address, page_size);
     273             :                         }
     274             :                 }
     275             : 
     276          19 :                 if (start_valid && end_valid && !last_sge) {
     277           1 :                         continue;
     278             :                 }
     279             : 
     280             :                 /*
     281             :                  * We need to create a split here.  Send what we have accumulated so far as a child
     282             :                  *  request.  Checking if child_equals_parent allows us to *not* create a child request
     283             :                  *  when no splitting is required - in that case we will fall-through and just create
     284             :                  *  a single request with no children for the entire I/O.
     285             :                  */
     286          18 :                 if (!child_equals_parent) {
     287             :                         struct nvme_request *child;
     288             :                         uint32_t child_lba_count;
     289             : 
     290           1 :                         if ((child_length % ns->extended_lba_size) != 0) {
     291           1 :                                 SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n",
     292             :                                             child_length, ns->extended_lba_size);
     293           1 :                                 *rc = -EINVAL;
     294           1 :                                 return NULL;
     295             :                         }
     296           0 :                         if (spdk_unlikely(accel_sequence != NULL)) {
     297           0 :                                 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
     298           0 :                                 *rc = -EINVAL;
     299           0 :                                 return NULL;
     300             :                         }
     301             : 
     302           0 :                         child_lba_count = child_length / ns->extended_lba_size;
     303             :                         /*
     304             :                          * Note the last parameter is set to "false" - this tells the recursive
     305             :                          *  call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
     306             :                          *  since we have already verified it here.
     307             :                          */
     308           0 :                         child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
     309             :                                                         child_lba, child_lba_count,
     310             :                                                         cb_fn, cb_arg, opc, io_flags,
     311             :                                                         apptag_mask, apptag, cdw13, req, false, rc);
     312           0 :                         if (child == NULL) {
     313           0 :                                 return NULL;
     314             :                         }
     315           0 :                         payload_offset += child_length;
     316           0 :                         md_offset += child_lba_count * ns->md_size;
     317           0 :                         child_lba += child_lba_count;
     318           0 :                         child_length = 0;
     319             :                 }
     320             :         }
     321             : 
     322          17 :         if (child_length == req->payload_size) {
     323             :                 /* No splitting was required, so setup the whole payload as one request. */
     324          17 :                 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
     325             :         }
     326             : 
     327          17 :         return req;
     328             : }
     329             : 
     330             : static struct nvme_request *
     331           0 : _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,
     332             :                                struct spdk_nvme_qpair *qpair,
     333             :                                const struct nvme_payload *payload,
     334             :                                uint32_t payload_offset, uint32_t md_offset,
     335             :                                uint64_t lba, uint32_t lba_count,
     336             :                                spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
     337             :                                uint32_t io_flags, struct nvme_request *req,
     338             :                                uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
     339             :                                void *accel_sequence, int *rc)
     340             : {
     341           0 :         spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
     342           0 :         spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
     343           0 :         void *sgl_cb_arg = req->payload.contig_or_cb_arg;
     344           0 :         uint64_t child_lba = lba;
     345           0 :         uint32_t req_current_length = 0;
     346           0 :         uint32_t child_length = 0;
     347           0 :         uint32_t sge_length;
     348             :         uint16_t max_sges, num_sges;
     349           0 :         uintptr_t address;
     350             : 
     351           0 :         max_sges = ns->ctrlr->max_sges;
     352             : 
     353           0 :         reset_sgl_fn(sgl_cb_arg, payload_offset);
     354           0 :         num_sges = 0;
     355             : 
     356           0 :         while (req_current_length < req->payload_size) {
     357           0 :                 next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
     358             : 
     359           0 :                 if (req_current_length + sge_length > req->payload_size) {
     360           0 :                         sge_length = req->payload_size - req_current_length;
     361             :                 }
     362             : 
     363           0 :                 child_length += sge_length;
     364           0 :                 req_current_length += sge_length;
     365           0 :                 num_sges++;
     366             : 
     367           0 :                 if (num_sges < max_sges && req_current_length < req->payload_size) {
     368           0 :                         continue;
     369             :                 }
     370             : 
     371             :                 /*
     372             :                  * We need to create a split here.  Send what we have accumulated so far as a child
     373             :                  *  request.  Checking if the child equals the full payload allows us to *not*
     374             :                  *  create a child request when no splitting is required - in that case we will
     375             :                  *  fall-through and just create a single request with no children for the entire I/O.
     376             :                  */
     377           0 :                 if (child_length != req->payload_size) {
     378             :                         struct nvme_request *child;
     379             :                         uint32_t child_lba_count;
     380             : 
     381           0 :                         if ((child_length % ns->extended_lba_size) != 0) {
     382           0 :                                 SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n",
     383             :                                             child_length, ns->extended_lba_size);
     384           0 :                                 *rc = -EINVAL;
     385           0 :                                 return NULL;
     386             :                         }
     387           0 :                         if (spdk_unlikely(accel_sequence != NULL)) {
     388           0 :                                 SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
     389           0 :                                 *rc = -EINVAL;
     390           0 :                                 return NULL;
     391             :                         }
     392             : 
     393           0 :                         child_lba_count = child_length / ns->extended_lba_size;
     394             :                         /*
     395             :                          * Note the last parameter is set to "false" - this tells the recursive
     396             :                          *  call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
     397             :                          *  since we have already verified it here.
     398             :                          */
     399           0 :                         child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
     400             :                                                         child_lba, child_lba_count,
     401             :                                                         cb_fn, cb_arg, opc, io_flags,
     402             :                                                         apptag_mask, apptag, cdw13, req, false, rc);
     403           0 :                         if (child == NULL) {
     404           0 :                                 return NULL;
     405             :                         }
     406           0 :                         payload_offset += child_length;
     407           0 :                         md_offset += child_lba_count * ns->md_size;
     408           0 :                         child_lba += child_lba_count;
     409           0 :                         child_length = 0;
     410           0 :                         num_sges = 0;
     411             :                 }
     412             :         }
     413             : 
     414           0 :         if (child_length == req->payload_size) {
     415             :                 /* No splitting was required, so setup the whole payload as one request. */
     416           0 :                 _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
     417             :         }
     418             : 
     419           0 :         return req;
     420             : }
     421             : 
     422             : static inline struct nvme_request *
     423         100 : _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     424             :                 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
     425             :                 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
     426             :                 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl,
     427             :                 void *accel_sequence, int *rc)
     428             : {
     429             :         struct nvme_request     *req;
     430         100 :         uint32_t                sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
     431         100 :         uint32_t                sectors_per_max_io = _nvme_get_sectors_per_max_io(ns, io_flags);
     432         100 :         uint32_t                sectors_per_stripe = ns->sectors_per_stripe;
     433             : 
     434         100 :         assert(rc != NULL);
     435         100 :         assert(*rc == 0);
     436             : 
     437         100 :         req = nvme_allocate_request(qpair, payload, lba_count * sector_size, lba_count * ns->md_size,
     438             :                                     cb_fn, cb_arg);
     439         100 :         if (req == NULL) {
     440           1 :                 *rc = -ENOMEM;
     441           1 :                 return NULL;
     442             :         }
     443             : 
     444          99 :         req->payload_offset = payload_offset;
     445          99 :         req->md_offset = md_offset;
     446          99 :         req->accel_sequence = accel_sequence;
     447             : 
     448             :         /* Zone append commands cannot be split. */
     449          99 :         if (opc == SPDK_NVME_OPC_ZONE_APPEND) {
     450           3 :                 assert(ns->csi == SPDK_NVME_CSI_ZNS);
     451             :                 /*
     452             :                  * As long as we disable driver-assisted striping for Zone append commands,
     453             :                  * _nvme_ns_cmd_rw() should never cause a proper request to be split.
     454             :                  * If a request is split, after all, error handling is done in caller functions.
     455             :                  */
     456           3 :                 sectors_per_stripe = 0;
     457             :         }
     458             : 
     459             :         /*
     460             :          * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping.
     461             :          * If this controller defines a stripe boundary and this I/O spans a stripe
     462             :          *  boundary, split the request into multiple requests and submit each
     463             :          *  separately to hardware.
     464             :          */
     465          99 :         if (sectors_per_stripe > 0 &&
     466           7 :             (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) {
     467           1 :                 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
     468             :                                                   cb_fn,
     469             :                                                   cb_arg, opc,
     470             :                                                   io_flags, req, sectors_per_stripe, sectors_per_stripe - 1,
     471             :                                                   apptag_mask, apptag, cdw13,  accel_sequence, rc);
     472          98 :         } else if (lba_count > sectors_per_max_io) {
     473          13 :                 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
     474             :                                                   cb_fn,
     475             :                                                   cb_arg, opc,
     476             :                                                   io_flags, req, sectors_per_max_io, 0, apptag_mask,
     477             :                                                   apptag, cdw13, accel_sequence, rc);
     478          85 :         } else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL && check_sgl) {
     479          18 :                 if (ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
     480           0 :                         return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset,
     481             :                                                               lba, lba_count, cb_fn, cb_arg, opc, io_flags,
     482             :                                                               req, apptag_mask, apptag, cdw13,
     483             :                                                               accel_sequence, rc);
     484             :                 } else {
     485          18 :                         return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset,
     486             :                                                               lba, lba_count, cb_fn, cb_arg, opc, io_flags,
     487             :                                                               req, apptag_mask, apptag, cdw13,
     488             :                                                               accel_sequence, rc);
     489             :                 }
     490             :         }
     491             : 
     492          67 :         _nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
     493          67 :         return req;
     494             : }
     495             : 
     496             : int
     497           1 : spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
     498             :                          uint64_t lba,
     499             :                          uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     500             :                          uint32_t io_flags)
     501             : {
     502             :         struct nvme_request *req;
     503           1 :         struct nvme_payload payload;
     504           1 :         int rc = 0;
     505             : 
     506           1 :         if (!_is_io_flags_valid(io_flags)) {
     507           0 :                 return -EINVAL;
     508             :         }
     509             : 
     510           1 :         payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
     511             : 
     512           1 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
     513             :                               SPDK_NVME_OPC_COMPARE,
     514             :                               io_flags, 0,
     515             :                               0, 0, false, NULL, &rc);
     516           1 :         if (req != NULL) {
     517           1 :                 return nvme_qpair_submit_request(qpair, req);
     518             :         } else {
     519           0 :                 return nvme_ns_map_failure_rc(lba_count,
     520             :                                               ns->sectors_per_max_io,
     521             :                                               ns->sectors_per_stripe,
     522           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     523             :                                               rc);
     524             :         }
     525             : }
     526             : 
     527             : int
     528           6 : spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     529             :                                  void *buffer,
     530             :                                  void *metadata,
     531             :                                  uint64_t lba,
     532             :                                  uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     533             :                                  uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
     534             : {
     535             :         struct nvme_request *req;
     536           6 :         struct nvme_payload payload;
     537           6 :         int rc = 0;
     538             : 
     539           6 :         if (!_is_io_flags_valid(io_flags)) {
     540           0 :                 return -EINVAL;
     541             :         }
     542             : 
     543           6 :         payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
     544             : 
     545           6 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
     546             :                               SPDK_NVME_OPC_COMPARE,
     547             :                               io_flags,
     548             :                               apptag_mask, apptag, 0, false, NULL, &rc);
     549           6 :         if (req != NULL) {
     550           6 :                 return nvme_qpair_submit_request(qpair, req);
     551             :         } else {
     552           0 :                 return nvme_ns_map_failure_rc(lba_count,
     553             :                                               ns->sectors_per_max_io,
     554             :                                               ns->sectors_per_stripe,
     555           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     556             :                                               rc);
     557             :         }
     558             : }
     559             : 
     560             : int
     561           2 : spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     562             :                           uint64_t lba, uint32_t lba_count,
     563             :                           spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
     564             :                           spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     565             :                           spdk_nvme_req_next_sge_cb next_sge_fn)
     566             : {
     567             :         struct nvme_request *req;
     568           2 :         struct nvme_payload payload;
     569           2 :         int rc = 0;
     570             : 
     571           2 :         if (!_is_io_flags_valid(io_flags)) {
     572           0 :                 return -EINVAL;
     573             :         }
     574             : 
     575           2 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
     576           1 :                 return -EINVAL;
     577             :         }
     578             : 
     579           1 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
     580             : 
     581           1 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
     582             :                               SPDK_NVME_OPC_COMPARE,
     583             :                               io_flags, 0, 0, 0, true, NULL, &rc);
     584           1 :         if (req != NULL) {
     585           1 :                 return nvme_qpair_submit_request(qpair, req);
     586             :         } else {
     587           0 :                 return nvme_ns_map_failure_rc(lba_count,
     588             :                                               ns->sectors_per_max_io,
     589             :                                               ns->sectors_per_stripe,
     590           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     591             :                                               rc);
     592             :         }
     593             : }
     594             : 
     595             : int
     596           6 : spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     597             :                                   uint64_t lba, uint32_t lba_count,
     598             :                                   spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
     599             :                                   spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     600             :                                   spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
     601             :                                   uint16_t apptag_mask, uint16_t apptag)
     602             : {
     603             :         struct nvme_request *req;
     604           6 :         struct nvme_payload payload;
     605           6 :         int rc = 0;
     606             : 
     607           6 :         if (!_is_io_flags_valid(io_flags)) {
     608           0 :                 return -EINVAL;
     609             :         }
     610             : 
     611           6 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
     612           0 :                 return -EINVAL;
     613             :         }
     614             : 
     615           6 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
     616             : 
     617           6 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
     618             :                               SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, 0, true,
     619             :                               NULL, &rc);
     620           6 :         if (req != NULL) {
     621           6 :                 return nvme_qpair_submit_request(qpair, req);
     622             :         } else {
     623           0 :                 return nvme_ns_map_failure_rc(lba_count,
     624             :                                               ns->sectors_per_max_io,
     625             :                                               ns->sectors_per_stripe,
     626           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     627             :                                               rc);
     628             :         }
     629             : }
     630             : 
     631             : int
     632          10 : spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
     633             :                       uint64_t lba,
     634             :                       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     635             :                       uint32_t io_flags)
     636             : {
     637             :         struct nvme_request *req;
     638          10 :         struct nvme_payload payload;
     639          10 :         int rc = 0;
     640             : 
     641          10 :         if (!_is_io_flags_valid(io_flags)) {
     642           0 :                 return -EINVAL;
     643             :         }
     644             : 
     645          10 :         payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
     646             : 
     647          10 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
     648             :                               io_flags, 0,
     649             :                               0, 0, false, NULL, &rc);
     650          10 :         if (req != NULL) {
     651           9 :                 return nvme_qpair_submit_request(qpair, req);
     652             :         } else {
     653           1 :                 return nvme_ns_map_failure_rc(lba_count,
     654             :                                               ns->sectors_per_max_io,
     655             :                                               ns->sectors_per_stripe,
     656           1 :                                               qpair->ctrlr->opts.io_queue_requests,
     657             :                                               rc);
     658             :         }
     659             : }
     660             : 
     661             : int
     662           1 : spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
     663             :                               void *metadata,
     664             :                               uint64_t lba,
     665             :                               uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     666             :                               uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
     667             : {
     668             :         struct nvme_request *req;
     669           1 :         struct nvme_payload payload;
     670           1 :         int rc = 0;
     671             : 
     672           1 :         if (!_is_io_flags_valid(io_flags)) {
     673           0 :                 return -EINVAL;
     674             :         }
     675             : 
     676           1 :         payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
     677             : 
     678           1 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
     679             :                               io_flags,
     680             :                               apptag_mask, apptag, 0, false, NULL, &rc);
     681           1 :         if (req != NULL) {
     682           1 :                 return nvme_qpair_submit_request(qpair, req);
     683             :         } else {
     684           0 :                 return nvme_ns_map_failure_rc(lba_count,
     685             :                                               ns->sectors_per_max_io,
     686             :                                               ns->sectors_per_stripe,
     687           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     688             :                                               rc);
     689             :         }
     690             : }
     691             : 
     692             : static int
     693           0 : nvme_ns_cmd_rw_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
     694             :                    uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     695             :                    struct spdk_nvme_ns_cmd_ext_io_opts *opts, enum spdk_nvme_nvm_opcode opc)
     696             : {
     697             :         struct nvme_request *req;
     698           0 :         struct nvme_payload payload;
     699             :         void *seq;
     700           0 :         int rc = 0;
     701             : 
     702           0 :         assert(opc == SPDK_NVME_OPC_READ || opc == SPDK_NVME_OPC_WRITE);
     703           0 :         assert(opts);
     704             : 
     705           0 :         payload = NVME_PAYLOAD_CONTIG(buffer, opts->metadata);
     706             : 
     707           0 :         if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
     708           0 :                 return -EINVAL;
     709             :         }
     710             : 
     711           0 :         seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL);
     712           0 :         if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) {
     713           0 :                 return -EINVAL;
     714             :         }
     715             : 
     716           0 :         payload.opts = opts;
     717             : 
     718           0 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, opts->io_flags,
     719           0 :                               opts->apptag_mask, opts->apptag, 0, false, seq, &rc);
     720           0 :         if (spdk_unlikely(req == NULL)) {
     721           0 :                 return nvme_ns_map_failure_rc(lba_count,
     722             :                                               ns->sectors_per_max_io,
     723             :                                               ns->sectors_per_stripe,
     724           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     725             :                                               rc);
     726             :         }
     727             : 
     728           0 :         return nvme_qpair_submit_request(qpair, req);
     729             : }
     730             : 
     731             : int
     732           0 : spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
     733             :                           uint64_t lba,
     734             :                           uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     735             :                           struct spdk_nvme_ns_cmd_ext_io_opts *opts)
     736             : {
     737           0 :         return nvme_ns_cmd_rw_ext(ns, qpair, buffer, lba, lba_count, cb_fn, cb_arg, opts,
     738             :                                   SPDK_NVME_OPC_READ);
     739             : }
     740             : 
     741             : int
     742           2 : spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     743             :                        uint64_t lba, uint32_t lba_count,
     744             :                        spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
     745             :                        spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     746             :                        spdk_nvme_req_next_sge_cb next_sge_fn)
     747             : {
     748             :         struct nvme_request *req;
     749           2 :         struct nvme_payload payload;
     750           2 :         int rc = 0;
     751             : 
     752           2 :         if (!_is_io_flags_valid(io_flags)) {
     753           0 :                 return -EINVAL;
     754             :         }
     755             : 
     756           2 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
     757           1 :                 return -EINVAL;
     758             :         }
     759             : 
     760           1 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
     761             : 
     762           1 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
     763             :                               io_flags, 0, 0, 0, true, NULL, &rc);
     764           1 :         if (req != NULL) {
     765           1 :                 return nvme_qpair_submit_request(qpair, req);
     766             :         } else {
     767           0 :                 return nvme_ns_map_failure_rc(lba_count,
     768             :                                               ns->sectors_per_max_io,
     769             :                                               ns->sectors_per_stripe,
     770           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     771             :                                               rc);
     772             :         }
     773             : }
     774             : 
     775             : int
     776           2 : spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     777             :                                uint64_t lba, uint32_t lba_count,
     778             :                                spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
     779             :                                spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     780             :                                spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
     781             :                                uint16_t apptag_mask, uint16_t apptag)
     782             : {
     783             :         struct nvme_request *req;
     784           2 :         struct nvme_payload payload;
     785           2 :         int rc = 0;
     786             : 
     787           2 :         if (!_is_io_flags_valid(io_flags)) {
     788           0 :                 return -EINVAL;
     789             :         }
     790             : 
     791           2 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
     792           1 :                 return -EINVAL;
     793             :         }
     794             : 
     795           1 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
     796             : 
     797           1 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
     798             :                               io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
     799           1 :         if (req != NULL) {
     800           1 :                 return nvme_qpair_submit_request(qpair, req);
     801             :         } else {
     802           0 :                 return nvme_ns_map_failure_rc(lba_count,
     803             :                                               ns->sectors_per_max_io,
     804             :                                               ns->sectors_per_stripe,
     805           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     806             :                                               rc);
     807             :         }
     808             : }
     809             : 
     810             : static int
     811           8 : nvme_ns_cmd_rwv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba,
     812             :                     uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     813             :                     spdk_nvme_req_next_sge_cb next_sge_fn, struct spdk_nvme_ns_cmd_ext_io_opts *opts,
     814             :                     enum spdk_nvme_nvm_opcode opc)
     815             : {
     816             :         struct nvme_request *req;
     817           8 :         struct nvme_payload payload;
     818             :         void *seq;
     819           8 :         int rc = 0;
     820             : 
     821           8 :         assert(opc == SPDK_NVME_OPC_READ || opc == SPDK_NVME_OPC_WRITE);
     822             : 
     823           8 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
     824           4 :                 return -EINVAL;
     825             :         }
     826             : 
     827           4 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
     828             : 
     829           4 :         if (opts) {
     830           4 :                 if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
     831           2 :                         return -EINVAL;
     832             :                 }
     833             : 
     834           2 :                 seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL);
     835           2 :                 if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) {
     836           0 :                         return -EINVAL;
     837             :                 }
     838             : 
     839           2 :                 payload.opts = opts;
     840           2 :                 payload.md = opts->metadata;
     841           2 :                 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, opts->io_flags,
     842           2 :                                       opts->apptag_mask, opts->apptag, opts->cdw13, true, seq, &rc);
     843             : 
     844             :         } else {
     845           0 :                 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, 0, 0, 0, 0,
     846             :                                       true, NULL, &rc);
     847             :         }
     848             : 
     849           2 :         if (req == NULL) {
     850           0 :                 return nvme_ns_map_failure_rc(lba_count,
     851             :                                               ns->sectors_per_max_io,
     852             :                                               ns->sectors_per_stripe,
     853           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     854             :                                               rc);
     855             :         }
     856             : 
     857           2 :         return nvme_qpair_submit_request(qpair, req);
     858             : }
     859             : 
     860             : int
     861           4 : spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     862             :                            uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
     863             :                            void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     864             :                            spdk_nvme_req_next_sge_cb next_sge_fn,
     865             :                            struct spdk_nvme_ns_cmd_ext_io_opts *opts)
     866             : {
     867           4 :         return nvme_ns_cmd_rwv_ext(ns, qpair, lba, lba_count, cb_fn, cb_arg, reset_sgl_fn, next_sge_fn,
     868             :                                    opts, SPDK_NVME_OPC_READ);
     869             : }
     870             : 
     871             : int
     872           3 : spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     873             :                        void *buffer, uint64_t lba,
     874             :                        uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     875             :                        uint32_t io_flags)
     876             : {
     877             :         struct nvme_request *req;
     878           3 :         struct nvme_payload payload;
     879           3 :         int rc = 0;
     880             : 
     881           3 :         if (!_is_io_flags_valid(io_flags)) {
     882           1 :                 return -EINVAL;
     883             :         }
     884             : 
     885           2 :         payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
     886             : 
     887           2 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
     888             :                               io_flags, 0, 0, 0, false, NULL, &rc);
     889           2 :         if (req != NULL) {
     890           2 :                 return nvme_qpair_submit_request(qpair, req);
     891             :         } else {
     892           0 :                 return nvme_ns_map_failure_rc(lba_count,
     893             :                                               ns->sectors_per_max_io,
     894             :                                               ns->sectors_per_stripe,
     895           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     896             :                                               rc);
     897             :         }
     898             : }
     899             : 
     900             : static int
     901           6 : nvme_ns_cmd_check_zone_append(struct spdk_nvme_ns *ns, uint32_t lba_count, uint32_t io_flags)
     902             : {
     903             :         uint32_t sector_size;
     904             : 
     905             :         /* Not all NVMe Zoned Namespaces support the zone append command. */
     906           6 :         if (!(ns->ctrlr->flags & SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED)) {
     907           0 :                 return -EINVAL;
     908             :         }
     909             : 
     910           6 :         sector_size =  _nvme_get_host_buffer_sector_size(ns, io_flags);
     911             : 
     912             :         /* Fail a too large zone append command early. */
     913           6 :         if (lba_count * sector_size > ns->ctrlr->max_zone_append_size) {
     914           3 :                 return -EINVAL;
     915             :         }
     916             : 
     917           3 :         return 0;
     918             : }
     919             : 
     920             : int
     921           4 : nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     922             :                                 void *buffer, void *metadata, uint64_t zslba,
     923             :                                 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
     924             :                                 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
     925             : {
     926             :         struct nvme_request *req;
     927           4 :         struct nvme_payload payload;
     928           4 :         int rc = 0;
     929             : 
     930           4 :         if (!_is_io_flags_valid(io_flags)) {
     931           0 :                 return -EINVAL;
     932             :         }
     933             : 
     934           4 :         rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags);
     935           4 :         if (rc) {
     936           2 :                 return rc;
     937             :         }
     938             : 
     939           2 :         payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
     940             : 
     941           2 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
     942             :                               SPDK_NVME_OPC_ZONE_APPEND,
     943             :                               io_flags, apptag_mask, apptag, 0, false, NULL, &rc);
     944           2 :         if (req != NULL) {
     945             :                 /*
     946             :                  * Zone append commands cannot be split (num_children has to be 0).
     947             :                  * For NVME_PAYLOAD_TYPE_CONTIG, _nvme_ns_cmd_rw() should never cause a split
     948             :                  * to happen, since a too large request would have already been failed by
     949             :                  * nvme_ns_cmd_check_zone_append(), since zasl <= mdts.
     950             :                  */
     951           2 :                 assert(req->num_children == 0);
     952           2 :                 if (req->num_children) {
     953           0 :                         nvme_request_free_children(req);
     954           0 :                         nvme_free_request(req);
     955           0 :                         return -EINVAL;
     956             :                 }
     957           2 :                 return nvme_qpair_submit_request(qpair, req);
     958             :         } else {
     959           0 :                 return nvme_ns_map_failure_rc(lba_count,
     960             :                                               ns->sectors_per_max_io,
     961             :                                               ns->sectors_per_stripe,
     962           0 :                                               qpair->ctrlr->opts.io_queue_requests,
     963             :                                               rc);
     964             :         }
     965             : }
     966             : 
     967             : int
     968           2 : nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
     969             :                                  uint64_t zslba, uint32_t lba_count,
     970             :                                  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
     971             :                                  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
     972             :                                  spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
     973             :                                  uint16_t apptag_mask, uint16_t apptag)
     974             : {
     975             :         struct nvme_request *req;
     976           2 :         struct nvme_payload payload;
     977           2 :         int rc = 0;
     978             : 
     979           2 :         if (!_is_io_flags_valid(io_flags)) {
     980           0 :                 return -EINVAL;
     981             :         }
     982             : 
     983           2 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
     984           0 :                 return -EINVAL;
     985             :         }
     986             : 
     987           2 :         rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags);
     988           2 :         if (rc) {
     989           1 :                 return rc;
     990             :         }
     991             : 
     992           1 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
     993             : 
     994           1 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
     995             :                               SPDK_NVME_OPC_ZONE_APPEND,
     996             :                               io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
     997           1 :         if (req != NULL) {
     998             :                 /*
     999             :                  * Zone append commands cannot be split (num_children has to be 0).
    1000             :                  * For NVME_PAYLOAD_TYPE_SGL, _nvme_ns_cmd_rw() can cause a split.
    1001             :                  * However, _nvme_ns_cmd_split_request_sgl() and _nvme_ns_cmd_split_request_prp()
    1002             :                  * do not always cause a request to be split. These functions verify payload size,
    1003             :                  * verify num sge < max_sge, and verify SGE alignment rules (in case of PRPs).
    1004             :                  * If any of the verifications fail, they will split the request.
    1005             :                  * In our case, a split is very unlikely, since we already verified the size using
    1006             :                  * nvme_ns_cmd_check_zone_append(), however, we still need to call these functions
    1007             :                  * in order to perform the verification part. If they do cause a split, we return
    1008             :                  * an error here. For proper requests, these functions will never cause a split.
    1009             :                  */
    1010           1 :                 if (req->num_children) {
    1011           0 :                         nvme_request_free_children(req);
    1012           0 :                         nvme_free_request(req);
    1013           0 :                         return -EINVAL;
    1014             :                 }
    1015           1 :                 return nvme_qpair_submit_request(qpair, req);
    1016             :         } else {
    1017           0 :                 return nvme_ns_map_failure_rc(lba_count,
    1018             :                                               ns->sectors_per_max_io,
    1019             :                                               ns->sectors_per_stripe,
    1020           0 :                                               qpair->ctrlr->opts.io_queue_requests,
    1021             :                                               rc);
    1022             :         }
    1023             : }
    1024             : 
    1025             : int
    1026           7 : spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1027             :                                void *buffer, void *metadata, uint64_t lba,
    1028             :                                uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
    1029             :                                uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
    1030             : {
    1031             :         struct nvme_request *req;
    1032           7 :         struct nvme_payload payload;
    1033           7 :         int rc = 0;
    1034             : 
    1035           7 :         if (!_is_io_flags_valid(io_flags)) {
    1036           0 :                 return -EINVAL;
    1037             :         }
    1038             : 
    1039           7 :         payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
    1040             : 
    1041           7 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
    1042             :                               io_flags, apptag_mask, apptag, 0, false, NULL, &rc);
    1043           7 :         if (req != NULL) {
    1044           7 :                 return nvme_qpair_submit_request(qpair, req);
    1045             :         } else {
    1046           0 :                 return nvme_ns_map_failure_rc(lba_count,
    1047             :                                               ns->sectors_per_max_io,
    1048             :                                               ns->sectors_per_stripe,
    1049           0 :                                               qpair->ctrlr->opts.io_queue_requests,
    1050             :                                               rc);
    1051             :         }
    1052             : }
    1053             : 
    1054             : int
    1055           0 : spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1056             :                            void *buffer, uint64_t lba,
    1057             :                            uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
    1058             :                            struct spdk_nvme_ns_cmd_ext_io_opts *opts)
    1059             : {
    1060           0 :         return nvme_ns_cmd_rw_ext(ns, qpair, buffer, lba, lba_count, cb_fn, cb_arg, opts,
    1061             :                                   SPDK_NVME_OPC_WRITE);
    1062             : }
    1063             : 
    1064             : int
    1065           4 : spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1066             :                         uint64_t lba, uint32_t lba_count,
    1067             :                         spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
    1068             :                         spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
    1069             :                         spdk_nvme_req_next_sge_cb next_sge_fn)
    1070             : {
    1071             :         struct nvme_request *req;
    1072           4 :         struct nvme_payload payload;
    1073           4 :         int rc = 0;
    1074             : 
    1075           4 :         if (!_is_io_flags_valid(io_flags)) {
    1076           0 :                 return -EINVAL;
    1077             :         }
    1078             : 
    1079           4 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
    1080           1 :                 return -EINVAL;
    1081             :         }
    1082             : 
    1083           3 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
    1084             : 
    1085           3 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
    1086             :                               io_flags, 0, 0, 0, true, NULL, &rc);
    1087           3 :         if (req != NULL) {
    1088           2 :                 return nvme_qpair_submit_request(qpair, req);
    1089             :         } else {
    1090           1 :                 return nvme_ns_map_failure_rc(lba_count,
    1091             :                                               ns->sectors_per_max_io,
    1092             :                                               ns->sectors_per_stripe,
    1093           1 :                                               qpair->ctrlr->opts.io_queue_requests,
    1094             :                                               rc);
    1095             :         }
    1096             : }
    1097             : 
    1098             : int
    1099           0 : spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1100             :                                 uint64_t lba, uint32_t lba_count,
    1101             :                                 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
    1102             :                                 spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
    1103             :                                 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
    1104             :                                 uint16_t apptag_mask, uint16_t apptag)
    1105             : {
    1106             :         struct nvme_request *req;
    1107           0 :         struct nvme_payload payload;
    1108           0 :         int rc = 0;
    1109             : 
    1110           0 :         if (!_is_io_flags_valid(io_flags)) {
    1111           0 :                 return -EINVAL;
    1112             :         }
    1113             : 
    1114           0 :         if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
    1115           0 :                 return -EINVAL;
    1116             :         }
    1117             : 
    1118           0 :         payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
    1119             : 
    1120           0 :         req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
    1121             :                               io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
    1122           0 :         if (req != NULL) {
    1123           0 :                 return nvme_qpair_submit_request(qpair, req);
    1124             :         } else {
    1125           0 :                 return nvme_ns_map_failure_rc(lba_count,
    1126             :                                               ns->sectors_per_max_io,
    1127             :                                               ns->sectors_per_stripe,
    1128           0 :                                               qpair->ctrlr->opts.io_queue_requests,
    1129             :                                               rc);
    1130             :         }
    1131             : }
    1132             : 
    1133             : int
    1134           4 : spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba,
    1135             :                             uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
    1136             :                             spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
    1137             :                             spdk_nvme_req_next_sge_cb next_sge_fn,
    1138             :                             struct spdk_nvme_ns_cmd_ext_io_opts *opts)
    1139             : {
    1140           4 :         return nvme_ns_cmd_rwv_ext(ns, qpair, lba, lba_count, cb_fn, cb_arg, reset_sgl_fn, next_sge_fn,
    1141             :                                    opts, SPDK_NVME_OPC_WRITE);
    1142             : }
    1143             : 
    1144             : int
    1145           1 : spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1146             :                               uint64_t lba, uint32_t lba_count,
    1147             :                               spdk_nvme_cmd_cb cb_fn, void *cb_arg,
    1148             :                               uint32_t io_flags)
    1149             : {
    1150             :         struct nvme_request     *req;
    1151             :         struct spdk_nvme_cmd    *cmd;
    1152             :         uint64_t                *tmp_lba;
    1153             : 
    1154           1 :         if (!_is_io_flags_valid(io_flags)) {
    1155           0 :                 return -EINVAL;
    1156             :         }
    1157             : 
    1158           1 :         if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
    1159           0 :                 return -EINVAL;
    1160             :         }
    1161             : 
    1162           1 :         req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
    1163           1 :         if (req == NULL) {
    1164           0 :                 return -ENOMEM;
    1165             :         }
    1166             : 
    1167           1 :         cmd = &req->cmd;
    1168           1 :         cmd->opc = SPDK_NVME_OPC_WRITE_ZEROES;
    1169           1 :         cmd->nsid = ns->id;
    1170             : 
    1171           1 :         tmp_lba = (uint64_t *)&cmd->cdw10;
    1172           1 :         *tmp_lba = lba;
    1173           1 :         cmd->cdw12 = lba_count - 1;
    1174           1 :         cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
    1175           1 :         cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
    1176             : 
    1177           1 :         return nvme_qpair_submit_request(qpair, req);
    1178             : }
    1179             : 
    1180             : int
    1181           1 : spdk_nvme_ns_cmd_verify(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1182             :                         uint64_t lba, uint32_t lba_count,
    1183             :                         spdk_nvme_cmd_cb cb_fn, void *cb_arg,
    1184             :                         uint32_t io_flags)
    1185             : {
    1186             :         struct nvme_request     *req;
    1187             :         struct spdk_nvme_cmd    *cmd;
    1188             : 
    1189           1 :         if (!_is_io_flags_valid(io_flags)) {
    1190           0 :                 return -EINVAL;
    1191             :         }
    1192             : 
    1193           1 :         if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
    1194           0 :                 return -EINVAL;
    1195             :         }
    1196             : 
    1197           1 :         req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
    1198           1 :         if (req == NULL) {
    1199           0 :                 return -ENOMEM;
    1200             :         }
    1201             : 
    1202           1 :         cmd = &req->cmd;
    1203           1 :         cmd->opc = SPDK_NVME_OPC_VERIFY;
    1204           1 :         cmd->nsid = ns->id;
    1205             : 
    1206           1 :         *(uint64_t *)&cmd->cdw10 = lba;
    1207           1 :         cmd->cdw12 = lba_count - 1;
    1208           1 :         cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
    1209           1 :         cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
    1210             : 
    1211           1 :         return nvme_qpair_submit_request(qpair, req);
    1212             : }
    1213             : 
    1214             : int
    1215           1 : spdk_nvme_ns_cmd_write_uncorrectable(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1216             :                                      uint64_t lba, uint32_t lba_count,
    1217             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1218             : {
    1219             :         struct nvme_request     *req;
    1220             :         struct spdk_nvme_cmd    *cmd;
    1221             :         uint64_t                *tmp_lba;
    1222             : 
    1223           1 :         if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
    1224           0 :                 return -EINVAL;
    1225             :         }
    1226             : 
    1227           1 :         req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
    1228           1 :         if (req == NULL) {
    1229           0 :                 return -ENOMEM;
    1230             :         }
    1231             : 
    1232           1 :         cmd = &req->cmd;
    1233           1 :         cmd->opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE;
    1234           1 :         cmd->nsid = ns->id;
    1235             : 
    1236           1 :         tmp_lba = (uint64_t *)&cmd->cdw10;
    1237           1 :         *tmp_lba = lba;
    1238           1 :         cmd->cdw12 = lba_count - 1;
    1239             : 
    1240           1 :         return nvme_qpair_submit_request(qpair, req);
    1241             : }
    1242             : 
    1243             : int
    1244           3 : spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1245             :                                     uint32_t type,
    1246             :                                     const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
    1247             :                                     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1248             : {
    1249             :         struct nvme_request     *req;
    1250             :         struct spdk_nvme_cmd    *cmd;
    1251             : 
    1252           3 :         if (num_ranges == 0 || num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES) {
    1253           1 :                 return -EINVAL;
    1254             :         }
    1255             : 
    1256           2 :         if (ranges == NULL) {
    1257           0 :                 return -EINVAL;
    1258             :         }
    1259             : 
    1260           2 :         req = nvme_allocate_request_user_copy(qpair, (void *)ranges,
    1261             :                                               num_ranges * sizeof(struct spdk_nvme_dsm_range),
    1262             :                                               cb_fn, cb_arg, true);
    1263           2 :         if (req == NULL) {
    1264           0 :                 return -ENOMEM;
    1265             :         }
    1266             : 
    1267           2 :         cmd = &req->cmd;
    1268           2 :         cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
    1269           2 :         cmd->nsid = ns->id;
    1270             : 
    1271           2 :         cmd->cdw10_bits.dsm.nr = num_ranges - 1;
    1272           2 :         cmd->cdw11 = type;
    1273             : 
    1274           2 :         return nvme_qpair_submit_request(qpair, req);
    1275             : }
    1276             : 
    1277             : int
    1278           3 : spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1279             :                       const struct spdk_nvme_scc_source_range *ranges,
    1280             :                       uint16_t num_ranges, uint64_t dest_lba,
    1281             :                       spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1282             : {
    1283             :         struct nvme_request     *req;
    1284             :         struct spdk_nvme_cmd    *cmd;
    1285             : 
    1286           3 :         if (num_ranges == 0) {
    1287           1 :                 return -EINVAL;
    1288             :         }
    1289             : 
    1290           2 :         if (ranges == NULL) {
    1291           0 :                 return -EINVAL;
    1292             :         }
    1293             : 
    1294           2 :         req = nvme_allocate_request_user_copy(qpair, (void *)ranges,
    1295             :                                               num_ranges * sizeof(struct spdk_nvme_scc_source_range),
    1296             :                                               cb_fn, cb_arg, true);
    1297           2 :         if (req == NULL) {
    1298           0 :                 return -ENOMEM;
    1299             :         }
    1300             : 
    1301           2 :         cmd = &req->cmd;
    1302           2 :         cmd->opc = SPDK_NVME_OPC_COPY;
    1303           2 :         cmd->nsid = ns->id;
    1304             : 
    1305           2 :         *(uint64_t *)&cmd->cdw10 = dest_lba;
    1306           2 :         cmd->cdw12 = num_ranges - 1;
    1307             : 
    1308           2 :         return nvme_qpair_submit_request(qpair, req);
    1309             : }
    1310             : 
    1311             : int
    1312           1 : spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1313             :                        spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1314             : {
    1315             :         struct nvme_request     *req;
    1316             :         struct spdk_nvme_cmd    *cmd;
    1317             : 
    1318           1 :         req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
    1319           1 :         if (req == NULL) {
    1320           0 :                 return -ENOMEM;
    1321             :         }
    1322             : 
    1323           1 :         cmd = &req->cmd;
    1324           1 :         cmd->opc = SPDK_NVME_OPC_FLUSH;
    1325           1 :         cmd->nsid = ns->id;
    1326             : 
    1327           1 :         return nvme_qpair_submit_request(qpair, req);
    1328             : }
    1329             : 
    1330             : int
    1331           1 : spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
    1332             :                                       struct spdk_nvme_qpair *qpair,
    1333             :                                       struct spdk_nvme_reservation_register_data *payload,
    1334             :                                       bool ignore_key,
    1335             :                                       enum spdk_nvme_reservation_register_action action,
    1336             :                                       enum spdk_nvme_reservation_register_cptpl cptpl,
    1337             :                                       spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1338             : {
    1339             :         struct nvme_request     *req;
    1340             :         struct spdk_nvme_cmd    *cmd;
    1341             : 
    1342           1 :         req = nvme_allocate_request_user_copy(qpair,
    1343             :                                               payload, sizeof(struct spdk_nvme_reservation_register_data),
    1344             :                                               cb_fn, cb_arg, true);
    1345           1 :         if (req == NULL) {
    1346           0 :                 return -ENOMEM;
    1347             :         }
    1348             : 
    1349           1 :         cmd = &req->cmd;
    1350           1 :         cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER;
    1351           1 :         cmd->nsid = ns->id;
    1352             : 
    1353           1 :         cmd->cdw10_bits.resv_register.rrega = action;
    1354           1 :         cmd->cdw10_bits.resv_register.iekey = ignore_key;
    1355           1 :         cmd->cdw10_bits.resv_register.cptpl = cptpl;
    1356             : 
    1357           1 :         return nvme_qpair_submit_request(qpair, req);
    1358             : }
    1359             : 
    1360             : int
    1361           1 : spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
    1362             :                                      struct spdk_nvme_qpair *qpair,
    1363             :                                      struct spdk_nvme_reservation_key_data *payload,
    1364             :                                      bool ignore_key,
    1365             :                                      enum spdk_nvme_reservation_release_action action,
    1366             :                                      enum spdk_nvme_reservation_type type,
    1367             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1368             : {
    1369             :         struct nvme_request     *req;
    1370             :         struct spdk_nvme_cmd    *cmd;
    1371             : 
    1372           1 :         req = nvme_allocate_request_user_copy(qpair,
    1373             :                                               payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn,
    1374             :                                               cb_arg, true);
    1375           1 :         if (req == NULL) {
    1376           0 :                 return -ENOMEM;
    1377             :         }
    1378             : 
    1379           1 :         cmd = &req->cmd;
    1380           1 :         cmd->opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
    1381           1 :         cmd->nsid = ns->id;
    1382             : 
    1383           1 :         cmd->cdw10_bits.resv_release.rrela = action;
    1384           1 :         cmd->cdw10_bits.resv_release.iekey = ignore_key;
    1385           1 :         cmd->cdw10_bits.resv_release.rtype = type;
    1386             : 
    1387           1 :         return nvme_qpair_submit_request(qpair, req);
    1388             : }
    1389             : 
    1390             : int
    1391           1 : spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
    1392             :                                      struct spdk_nvme_qpair *qpair,
    1393             :                                      struct spdk_nvme_reservation_acquire_data *payload,
    1394             :                                      bool ignore_key,
    1395             :                                      enum spdk_nvme_reservation_acquire_action action,
    1396             :                                      enum spdk_nvme_reservation_type type,
    1397             :                                      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1398             : {
    1399             :         struct nvme_request     *req;
    1400             :         struct spdk_nvme_cmd    *cmd;
    1401             : 
    1402           1 :         req = nvme_allocate_request_user_copy(qpair,
    1403             :                                               payload, sizeof(struct spdk_nvme_reservation_acquire_data),
    1404             :                                               cb_fn, cb_arg, true);
    1405           1 :         if (req == NULL) {
    1406           0 :                 return -ENOMEM;
    1407             :         }
    1408             : 
    1409           1 :         cmd = &req->cmd;
    1410           1 :         cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE;
    1411           1 :         cmd->nsid = ns->id;
    1412             : 
    1413           1 :         cmd->cdw10_bits.resv_acquire.racqa = action;
    1414           1 :         cmd->cdw10_bits.resv_acquire.iekey = ignore_key;
    1415           1 :         cmd->cdw10_bits.resv_acquire.rtype = type;
    1416             : 
    1417           1 :         return nvme_qpair_submit_request(qpair, req);
    1418             : }
    1419             : 
    1420             : int
    1421           1 : spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns,
    1422             :                                     struct spdk_nvme_qpair *qpair,
    1423             :                                     void *payload, uint32_t len,
    1424             :                                     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1425             : {
    1426             :         uint32_t                num_dwords;
    1427             :         struct nvme_request     *req;
    1428             :         struct spdk_nvme_cmd    *cmd;
    1429             : 
    1430           1 :         if (len & 0x3) {
    1431           0 :                 return -EINVAL;
    1432             :         }
    1433             : 
    1434           1 :         req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
    1435           1 :         if (req == NULL) {
    1436           0 :                 return -ENOMEM;
    1437             :         }
    1438             : 
    1439           1 :         cmd = &req->cmd;
    1440           1 :         cmd->opc = SPDK_NVME_OPC_RESERVATION_REPORT;
    1441           1 :         cmd->nsid = ns->id;
    1442             : 
    1443           1 :         num_dwords = (len >> 2);
    1444           1 :         cmd->cdw10 = num_dwords - 1; /* 0-based */
    1445             : 
    1446           1 :         return nvme_qpair_submit_request(qpair, req);
    1447             : }
    1448             : 
    1449             : int
    1450           2 : spdk_nvme_ns_cmd_io_mgmt_recv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1451             :                               void *payload, uint32_t len, uint8_t mo, uint16_t mos,
    1452             :                               spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1453             : {
    1454             :         uint32_t                num_dwords;
    1455             :         struct nvme_request     *req;
    1456             :         struct spdk_nvme_cmd    *cmd;
    1457             : 
    1458           2 :         if (len & 0x3) {
    1459           1 :                 return -EINVAL;
    1460             :         }
    1461             : 
    1462           1 :         req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
    1463           1 :         if (req == NULL) {
    1464           0 :                 return -ENOMEM;
    1465             :         }
    1466             : 
    1467           1 :         cmd = &req->cmd;
    1468           1 :         cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_RECEIVE;
    1469           1 :         cmd->nsid = ns->id;
    1470             : 
    1471           1 :         cmd->cdw10_bits.mgmt_send_recv.mo = mo;
    1472           1 :         cmd->cdw10_bits.mgmt_send_recv.mos = mos;
    1473             : 
    1474           1 :         num_dwords = (len >> 2);
    1475           1 :         cmd->cdw11 = num_dwords - 1; /* 0-based */
    1476             : 
    1477           1 :         return nvme_qpair_submit_request(qpair, req);
    1478             : }
    1479             : 
    1480             : int
    1481           1 : spdk_nvme_ns_cmd_io_mgmt_send(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
    1482             :                               void *payload, uint32_t len, uint8_t mo, uint16_t mos,
    1483             :                               spdk_nvme_cmd_cb cb_fn, void *cb_arg)
    1484             : {
    1485             :         struct nvme_request     *req;
    1486             :         struct spdk_nvme_cmd    *cmd;
    1487             : 
    1488           1 :         req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
    1489           1 :         if (req == NULL) {
    1490           0 :                 return -ENOMEM;
    1491             :         }
    1492             : 
    1493           1 :         cmd = &req->cmd;
    1494           1 :         cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_SEND;
    1495           1 :         cmd->nsid = ns->id;
    1496             : 
    1497           1 :         cmd->cdw10_bits.mgmt_send_recv.mo = mo;
    1498           1 :         cmd->cdw10_bits.mgmt_send_recv.mos = mos;
    1499             : 
    1500           1 :         return nvme_qpair_submit_request(qpair, req);
    1501             : }

Generated by: LCOV version 1.15