Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2017 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4 : * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "nvmf_internal.h"
10 :
11 : #include "spdk/bdev.h"
12 : #include "spdk/endian.h"
13 : #include "spdk/thread.h"
14 : #include "spdk/likely.h"
15 : #include "spdk/nvme.h"
16 : #include "spdk/nvmf_cmd.h"
17 : #include "spdk/nvmf_spec.h"
18 : #include "spdk/trace.h"
19 : #include "spdk/scsi_spec.h"
20 : #include "spdk/string.h"
21 : #include "spdk/util.h"
22 :
23 : #include "spdk/log.h"
24 :
25 : static bool
26 0 : nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem,
27 : enum spdk_bdev_io_type io_type)
28 : {
29 : struct spdk_nvmf_ns *ns;
30 :
31 0 : for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
32 0 : ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
33 0 : if (ns->bdev == NULL) {
34 0 : continue;
35 : }
36 :
37 0 : if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) {
38 0 : SPDK_DEBUGLOG(nvmf,
39 : "Subsystem %s namespace %u (%s) does not support io_type %d\n",
40 : spdk_nvmf_subsystem_get_nqn(subsystem),
41 : ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type);
42 0 : return false;
43 : }
44 0 : }
45 :
46 0 : SPDK_DEBUGLOG(nvmf, "All devices in Subsystem %s support io_type %d\n",
47 : spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type);
48 0 : return true;
49 0 : }
50 :
51 : bool
52 0 : nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr)
53 : {
54 0 : return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP);
55 : }
56 :
57 : bool
58 0 : nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr)
59 : {
60 0 : return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
61 : }
62 :
63 : bool
64 0 : nvmf_ctrlr_copy_supported(struct spdk_nvmf_ctrlr *ctrlr)
65 : {
66 0 : return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_COPY);
67 : }
68 :
69 : static void
70 4 : nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success,
71 : void *cb_arg)
72 : {
73 4 : struct spdk_nvmf_request *req = cb_arg;
74 4 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
75 4 : int sc = 0, sct = 0;
76 4 : uint32_t cdw0 = 0;
77 :
78 4 : if (spdk_unlikely(req->first_fused)) {
79 0 : struct spdk_nvmf_request *first_req = req->first_fused_req;
80 0 : struct spdk_nvme_cpl *first_response = &first_req->rsp->nvme_cpl;
81 0 : int first_sc = 0, first_sct = 0;
82 :
83 : /* get status for both operations */
84 0 : spdk_bdev_io_get_nvme_fused_status(bdev_io, &cdw0, &first_sct, &first_sc, &sct, &sc);
85 0 : first_response->cdw0 = cdw0;
86 0 : first_response->status.sc = first_sc;
87 0 : first_response->status.sct = first_sct;
88 :
89 : /* first request should be completed */
90 0 : spdk_nvmf_request_complete(first_req);
91 0 : req->first_fused_req = NULL;
92 0 : req->first_fused = false;
93 0 : } else {
94 4 : spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
95 : }
96 :
97 4 : response->cdw0 = cdw0;
98 4 : response->status.sc = sc;
99 4 : response->status.sct = sct;
100 :
101 4 : spdk_nvmf_request_complete(req);
102 4 : spdk_bdev_free_io(bdev_io);
103 4 : }
104 :
105 : static void
106 2 : nvmf_bdev_ctrlr_complete_admin_cmd(struct spdk_bdev_io *bdev_io, bool success,
107 : void *cb_arg)
108 : {
109 2 : struct spdk_nvmf_request *req = cb_arg;
110 :
111 2 : if (req->cmd_cb_fn) {
112 0 : req->cmd_cb_fn(req);
113 0 : }
114 :
115 2 : nvmf_bdev_ctrlr_complete_cmd(bdev_io, success, req);
116 2 : }
117 :
118 : void
119 2 : nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
120 : bool dif_insert_or_strip)
121 : {
122 2 : struct spdk_bdev *bdev = ns->bdev;
123 2 : struct spdk_bdev_desc *desc = ns->desc;
124 : uint64_t num_blocks;
125 : uint32_t phys_blocklen;
126 : uint32_t max_copy;
127 :
128 2 : num_blocks = spdk_bdev_get_num_blocks(bdev);
129 :
130 2 : nsdata->nsze = num_blocks;
131 2 : nsdata->ncap = num_blocks;
132 2 : nsdata->nuse = num_blocks;
133 2 : nsdata->nlbaf = 0;
134 2 : nsdata->flbas.format = 0;
135 2 : nsdata->flbas.msb_format = 0;
136 2 : nsdata->nacwu = spdk_bdev_get_acwu(bdev) - 1; /* nacwu is 0-based */
137 2 : if (!dif_insert_or_strip) {
138 1 : nsdata->lbaf[0].ms = spdk_bdev_desc_get_md_size(desc);
139 1 : nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_desc_get_block_size(desc));
140 1 : if (nsdata->lbaf[0].ms != 0) {
141 1 : nsdata->flbas.extended = 1;
142 1 : nsdata->mc.extended = 1;
143 1 : nsdata->mc.pointer = 0;
144 1 : nsdata->dps.md_start = spdk_bdev_desc_is_dif_head_of_md(desc);
145 : /* NVMf library doesn't process PRACT and PRCHK flags, we
146 : * leave the use of extended LBA buffer to users.
147 : */
148 1 : nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE;
149 1 : }
150 1 : } else {
151 1 : nsdata->lbaf[0].ms = 0;
152 1 : nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_data_block_size(bdev));
153 : }
154 :
155 2 : phys_blocklen = spdk_bdev_get_physical_block_size(bdev);
156 2 : assert(phys_blocklen > 0);
157 : /* Linux driver uses min(nawupf, npwg) to set physical_block_size */
158 2 : nsdata->nsfeat.optperf = 1;
159 2 : nsdata->nsfeat.ns_atomic_write_unit = 1;
160 2 : nsdata->npwg = (phys_blocklen >> nsdata->lbaf[0].lbads) - 1;
161 2 : nsdata->nawupf = nsdata->npwg;
162 2 : nsdata->npwa = nsdata->npwg;
163 2 : nsdata->npdg = nsdata->npwg;
164 2 : nsdata->npda = nsdata->npwg;
165 :
166 2 : if (spdk_bdev_get_write_unit_size(bdev) == 1) {
167 2 : nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev);
168 2 : }
169 2 : nsdata->nmic.can_share = 1;
170 2 : if (nvmf_ns_is_ptpl_capable(ns)) {
171 2 : nsdata->nsrescap.rescap.persist = 1;
172 2 : }
173 2 : nsdata->nsrescap.rescap.write_exclusive = 1;
174 2 : nsdata->nsrescap.rescap.exclusive_access = 1;
175 2 : nsdata->nsrescap.rescap.write_exclusive_reg_only = 1;
176 2 : nsdata->nsrescap.rescap.exclusive_access_reg_only = 1;
177 2 : nsdata->nsrescap.rescap.write_exclusive_all_reg = 1;
178 2 : nsdata->nsrescap.rescap.exclusive_access_all_reg = 1;
179 2 : nsdata->nsrescap.rescap.ignore_existing_key = 1;
180 :
181 : SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch");
182 2 : memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid));
183 :
184 : SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch");
185 2 : memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64));
186 :
187 : /* For now we support just one source range for copy command */
188 2 : nsdata->msrc = 0;
189 :
190 2 : max_copy = spdk_bdev_get_max_copy(bdev);
191 2 : if (max_copy == 0 || max_copy > UINT16_MAX) {
192 : /* Zero means copy size is unlimited */
193 2 : nsdata->mcl = UINT16_MAX;
194 2 : nsdata->mssrl = UINT16_MAX;
195 2 : } else {
196 0 : nsdata->mcl = max_copy;
197 0 : nsdata->mssrl = max_copy;
198 : }
199 2 : }
200 :
201 : void
202 0 : nvmf_bdev_ctrlr_identify_iocs_nvm(struct spdk_nvmf_ns *ns,
203 : struct spdk_nvme_nvm_ns_data *nsdata_nvm)
204 : {
205 0 : struct spdk_bdev_desc *desc = ns->desc;
206 : uint8_t _16bpists;
207 : uint32_t sts, pif;
208 :
209 0 : if (spdk_bdev_desc_get_dif_type(desc) == SPDK_DIF_DISABLE) {
210 0 : return;
211 : }
212 :
213 0 : pif = spdk_bdev_desc_get_dif_pi_format(desc);
214 :
215 : /*
216 : * 16BPISTS shall be 1 for 32/64b Guard PI.
217 : * STCRS shall be 1 if 16BPISTS is 1.
218 : * 16 is the minimum value of STS for 32b Guard PI.
219 : */
220 0 : switch (pif) {
221 : case SPDK_DIF_PI_FORMAT_16:
222 0 : _16bpists = 0;
223 0 : sts = 0;
224 0 : break;
225 : case SPDK_DIF_PI_FORMAT_32:
226 0 : _16bpists = 1;
227 0 : sts = 16;
228 0 : break;
229 : case SPDK_DIF_PI_FORMAT_64:
230 0 : _16bpists = 1;
231 0 : sts = 0;
232 0 : break;
233 : default:
234 0 : SPDK_WARNLOG("PI format %u is not supported\n", pif);
235 0 : return;
236 : }
237 :
238 : /* For 16b Guard PI, Storage Tag is not available because we set STS to 0.
239 : * In this case, we do not have to set 16BPISTM to 1. For simplicity,
240 : * set 16BPISTM to 0 and set LBSTM to all zeroes.
241 : *
242 : * We will revisit here when we find any OS uses Storage Tag.
243 : */
244 0 : nsdata_nvm->lbstm = 0;
245 0 : nsdata_nvm->pic._16bpistm = 0;
246 :
247 0 : nsdata_nvm->pic._16bpists = _16bpists;
248 0 : nsdata_nvm->pic.stcrs = 0;
249 0 : nsdata_nvm->elbaf[0].sts = sts;
250 0 : nsdata_nvm->elbaf[0].pif = pif;
251 0 : }
252 :
253 : static void
254 22 : nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba,
255 : uint64_t *num_blocks)
256 : {
257 : /* SLBA: CDW10 and CDW11 */
258 22 : *start_lba = from_le64(&cmd->cdw10);
259 :
260 : /* NLB: CDW12 bits 15:00, 0's based */
261 22 : *num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1;
262 22 : }
263 :
264 : static void
265 3 : nvmf_bdev_ctrlr_get_rw_ext_params(const struct spdk_nvme_cmd *cmd,
266 : struct spdk_bdev_ext_io_opts *opts)
267 : {
268 : /* Get CDW12 values */
269 3 : opts->nvme_cdw12.raw = from_le32(&cmd->cdw12);
270 :
271 : /* Get CDW13 values */
272 3 : opts->nvme_cdw13.raw = from_le32(&cmd->cdw13);
273 :
274 3 : opts->dif_check_flags_exclude_mask = (~opts->nvme_cdw12.raw) & SPDK_NVME_IO_FLAGS_PRCHK_MASK;
275 3 : }
276 :
277 : static bool
278 27 : nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba,
279 : uint64_t io_num_blocks)
280 : {
281 27 : if (io_start_lba + io_num_blocks > bdev_num_blocks ||
282 19 : io_start_lba + io_num_blocks < io_start_lba) {
283 10 : return false;
284 : }
285 :
286 17 : return true;
287 27 : }
288 :
289 : static void
290 0 : nvmf_ctrlr_process_io_cmd_resubmit(void *arg)
291 : {
292 0 : struct spdk_nvmf_request *req = arg;
293 : int rc;
294 :
295 0 : rc = nvmf_ctrlr_process_io_cmd(req);
296 0 : if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
297 0 : spdk_nvmf_request_complete(req);
298 0 : }
299 0 : }
300 :
301 : static void
302 0 : nvmf_ctrlr_process_admin_cmd_resubmit(void *arg)
303 : {
304 0 : struct spdk_nvmf_request *req = arg;
305 : int rc;
306 :
307 0 : rc = nvmf_ctrlr_process_admin_cmd(req);
308 0 : if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
309 0 : spdk_nvmf_request_complete(req);
310 0 : }
311 0 : }
312 :
313 : static void
314 2 : nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev,
315 : struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg)
316 : {
317 : int rc;
318 :
319 2 : req->bdev_io_wait.bdev = bdev;
320 2 : req->bdev_io_wait.cb_fn = cb_fn;
321 2 : req->bdev_io_wait.cb_arg = cb_arg;
322 :
323 2 : rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait);
324 2 : if (rc != 0) {
325 0 : assert(false);
326 : }
327 2 : req->qpair->group->stat.pending_bdev_io++;
328 2 : }
329 :
330 : bool
331 0 : nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
332 : {
333 0 : return spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY);
334 : }
335 :
336 : int
337 1 : nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
338 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
339 : {
340 4 : struct spdk_bdev_ext_io_opts opts = {
341 : .size = SPDK_SIZEOF(&opts, accel_sequence),
342 1 : .memory_domain = req->memory_domain,
343 1 : .memory_domain_ctx = req->memory_domain_ctx,
344 1 : .accel_sequence = req->accel_sequence,
345 : };
346 1 : uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
347 1 : uint32_t block_size = spdk_bdev_desc_get_block_size(desc);
348 1 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
349 1 : struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
350 : uint64_t start_lba;
351 : uint64_t num_blocks;
352 : int rc;
353 :
354 1 : nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
355 1 : nvmf_bdev_ctrlr_get_rw_ext_params(cmd, &opts);
356 :
357 1 : if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
358 0 : SPDK_ERRLOG("end of media\n");
359 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
360 0 : rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
361 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
362 : }
363 :
364 1 : if (spdk_unlikely(num_blocks * block_size > req->length)) {
365 0 : SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
366 : num_blocks, block_size, req->length);
367 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
368 0 : rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
369 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
370 : }
371 :
372 1 : assert(!spdk_nvmf_request_using_zcopy(req));
373 :
374 2 : rc = spdk_bdev_readv_blocks_ext(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
375 1 : nvmf_bdev_ctrlr_complete_cmd, req, &opts);
376 1 : if (spdk_unlikely(rc)) {
377 0 : if (rc == -ENOMEM) {
378 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
379 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
380 : }
381 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
382 0 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
383 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
384 : }
385 :
386 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
387 1 : }
388 :
389 : int
390 1 : nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
391 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
392 : {
393 4 : struct spdk_bdev_ext_io_opts opts = {
394 : .size = SPDK_SIZEOF(&opts, nvme_cdw13),
395 1 : .memory_domain = req->memory_domain,
396 1 : .memory_domain_ctx = req->memory_domain_ctx,
397 1 : .accel_sequence = req->accel_sequence,
398 : };
399 1 : uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
400 1 : uint32_t block_size = spdk_bdev_desc_get_block_size(desc);
401 1 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
402 1 : struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
403 : uint64_t start_lba;
404 : uint64_t num_blocks;
405 : int rc;
406 :
407 1 : nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
408 1 : nvmf_bdev_ctrlr_get_rw_ext_params(cmd, &opts);
409 :
410 1 : if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
411 0 : SPDK_ERRLOG("end of media\n");
412 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
413 0 : rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
414 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
415 : }
416 :
417 1 : if (spdk_unlikely(num_blocks * block_size > req->length)) {
418 0 : SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
419 : num_blocks, block_size, req->length);
420 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
421 0 : rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
422 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
423 : }
424 :
425 1 : assert(!spdk_nvmf_request_using_zcopy(req));
426 :
427 2 : rc = spdk_bdev_writev_blocks_ext(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
428 1 : nvmf_bdev_ctrlr_complete_cmd, req, &opts);
429 1 : if (spdk_unlikely(rc)) {
430 0 : if (rc == -ENOMEM) {
431 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
432 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
433 : }
434 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
435 0 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
436 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
437 : }
438 :
439 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
440 1 : }
441 :
442 : int
443 4 : nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
444 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
445 : {
446 4 : uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
447 4 : uint32_t block_size = spdk_bdev_desc_get_block_size(desc);
448 4 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
449 4 : struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
450 : uint64_t start_lba;
451 : uint64_t num_blocks;
452 : int rc;
453 :
454 4 : nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
455 :
456 4 : if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
457 1 : SPDK_ERRLOG("end of media\n");
458 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
459 1 : rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
460 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
461 : }
462 :
463 3 : if (spdk_unlikely(num_blocks * block_size > req->length)) {
464 1 : SPDK_ERRLOG("Compare NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
465 : num_blocks, block_size, req->length);
466 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
467 1 : rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
468 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
469 : }
470 :
471 4 : rc = spdk_bdev_comparev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
472 2 : nvmf_bdev_ctrlr_complete_cmd, req);
473 2 : if (spdk_unlikely(rc)) {
474 1 : if (rc == -ENOMEM) {
475 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
476 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
477 : }
478 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
479 1 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
480 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
481 : }
482 :
483 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
484 4 : }
485 :
486 : int
487 4 : nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
488 : struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req)
489 : {
490 4 : uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
491 4 : uint32_t block_size = spdk_bdev_desc_get_block_size(desc);
492 4 : struct spdk_nvme_cmd *cmp_cmd = &cmp_req->cmd->nvme_cmd;
493 4 : struct spdk_nvme_cmd *write_cmd = &write_req->cmd->nvme_cmd;
494 4 : struct spdk_nvme_cpl *rsp = &write_req->rsp->nvme_cpl;
495 : uint64_t write_start_lba, cmp_start_lba;
496 : uint64_t write_num_blocks, cmp_num_blocks;
497 : int rc;
498 :
499 4 : nvmf_bdev_ctrlr_get_rw_params(cmp_cmd, &cmp_start_lba, &cmp_num_blocks);
500 4 : nvmf_bdev_ctrlr_get_rw_params(write_cmd, &write_start_lba, &write_num_blocks);
501 :
502 4 : if (spdk_unlikely(write_start_lba != cmp_start_lba || write_num_blocks != cmp_num_blocks)) {
503 1 : SPDK_ERRLOG("Fused command start lba / num blocks mismatch\n");
504 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
505 1 : rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
506 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
507 : }
508 :
509 3 : if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, write_start_lba,
510 : write_num_blocks))) {
511 1 : SPDK_ERRLOG("end of media\n");
512 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
513 1 : rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
514 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
515 : }
516 :
517 2 : if (spdk_unlikely(write_num_blocks * block_size > write_req->length)) {
518 1 : SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
519 : write_num_blocks, block_size, write_req->length);
520 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
521 1 : rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
522 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
523 : }
524 :
525 2 : rc = spdk_bdev_comparev_and_writev_blocks(desc, ch, cmp_req->iov, cmp_req->iovcnt, write_req->iov,
526 1 : write_req->iovcnt, write_start_lba, write_num_blocks, nvmf_bdev_ctrlr_complete_cmd, write_req);
527 1 : if (spdk_unlikely(rc)) {
528 0 : if (rc == -ENOMEM) {
529 0 : nvmf_bdev_ctrl_queue_io(cmp_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, cmp_req);
530 0 : nvmf_bdev_ctrl_queue_io(write_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, write_req);
531 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
532 : }
533 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
534 0 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
535 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
536 : }
537 :
538 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
539 4 : }
540 :
541 : int
542 4 : nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
543 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
544 : {
545 4 : uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
546 4 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
547 4 : struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
548 4 : uint64_t max_write_zeroes_size = req->qpair->ctrlr->subsys->max_write_zeroes_size_kib;
549 : uint64_t start_lba;
550 : uint64_t num_blocks;
551 : int rc;
552 :
553 4 : nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
554 4 : if (spdk_unlikely(max_write_zeroes_size > 0 &&
555 : num_blocks > (max_write_zeroes_size << 10) / spdk_bdev_desc_get_block_size(desc))) {
556 1 : SPDK_ERRLOG("invalid write zeroes size, should not exceed %" PRIu64 "Kib\n", max_write_zeroes_size);
557 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
558 1 : rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
559 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
560 : }
561 :
562 3 : if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
563 1 : SPDK_ERRLOG("end of media\n");
564 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
565 1 : rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
566 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
567 : }
568 :
569 2 : if (spdk_unlikely(cmd->cdw12_bits.write_zeroes.deac)) {
570 0 : SPDK_ERRLOG("Write Zeroes Deallocate is not supported\n");
571 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
572 0 : rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
573 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
574 : }
575 :
576 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks,
577 2 : nvmf_bdev_ctrlr_complete_cmd, req);
578 2 : if (spdk_unlikely(rc)) {
579 1 : if (rc == -ENOMEM) {
580 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
581 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
582 : }
583 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
584 1 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
585 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
586 : }
587 :
588 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
589 4 : }
590 :
591 : int
592 3 : nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
593 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
594 : {
595 3 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
596 : int rc;
597 :
598 : /* As for NVMeoF controller, SPDK always set volatile write
599 : * cache bit to 1, return success for those block devices
600 : * which can't support FLUSH command.
601 : */
602 3 : if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) {
603 1 : response->status.sct = SPDK_NVME_SCT_GENERIC;
604 1 : response->status.sc = SPDK_NVME_SC_SUCCESS;
605 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
606 : }
607 :
608 4 : rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev),
609 2 : nvmf_bdev_ctrlr_complete_cmd, req);
610 2 : if (spdk_unlikely(rc)) {
611 1 : if (rc == -ENOMEM) {
612 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
613 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
614 : }
615 1 : response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
616 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
617 : }
618 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
619 3 : }
620 :
621 : struct nvmf_bdev_ctrlr_unmap {
622 : struct spdk_nvmf_request *req;
623 : uint32_t count;
624 : struct spdk_bdev_desc *desc;
625 : struct spdk_bdev *bdev;
626 : struct spdk_io_channel *ch;
627 : uint32_t range_index;
628 : };
629 :
630 : static void
631 0 : nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success,
632 : void *cb_arg)
633 : {
634 0 : struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg;
635 0 : struct spdk_nvmf_request *req = unmap_ctx->req;
636 0 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
637 : int sc, sct;
638 : uint32_t cdw0;
639 :
640 0 : unmap_ctx->count--;
641 :
642 0 : if (response->status.sct == SPDK_NVME_SCT_GENERIC &&
643 0 : response->status.sc == SPDK_NVME_SC_SUCCESS) {
644 0 : spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
645 0 : response->cdw0 = cdw0;
646 0 : response->status.sc = sc;
647 0 : response->status.sct = sct;
648 0 : }
649 :
650 0 : if (unmap_ctx->count == 0) {
651 0 : spdk_nvmf_request_complete(req);
652 0 : free(unmap_ctx);
653 0 : }
654 0 : spdk_bdev_free_io(bdev_io);
655 0 : }
656 :
657 : static int nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
658 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
659 : struct nvmf_bdev_ctrlr_unmap *unmap_ctx);
660 : static void
661 0 : nvmf_bdev_ctrlr_unmap_resubmit(void *arg)
662 : {
663 0 : struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg;
664 0 : struct spdk_nvmf_request *req = unmap_ctx->req;
665 0 : struct spdk_bdev_desc *desc = unmap_ctx->desc;
666 0 : struct spdk_bdev *bdev = unmap_ctx->bdev;
667 0 : struct spdk_io_channel *ch = unmap_ctx->ch;
668 :
669 0 : nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx);
670 0 : }
671 :
672 : static int
673 0 : nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
674 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
675 : struct nvmf_bdev_ctrlr_unmap *unmap_ctx)
676 : {
677 : uint16_t nr, i;
678 0 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
679 0 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
680 0 : uint64_t max_discard_size = req->qpair->ctrlr->subsys->max_discard_size_kib;
681 0 : uint32_t block_size = spdk_bdev_desc_get_block_size(desc);
682 : struct spdk_iov_xfer ix;
683 : uint64_t lba;
684 : uint32_t lba_count;
685 : int rc;
686 :
687 0 : nr = cmd->cdw10_bits.dsm.nr + 1;
688 0 : if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) {
689 0 : SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n");
690 0 : response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
691 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
692 : }
693 :
694 0 : if (unmap_ctx == NULL) {
695 0 : unmap_ctx = calloc(1, sizeof(*unmap_ctx));
696 0 : if (!unmap_ctx) {
697 0 : response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
698 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
699 : }
700 :
701 0 : unmap_ctx->req = req;
702 0 : unmap_ctx->desc = desc;
703 0 : unmap_ctx->ch = ch;
704 0 : unmap_ctx->bdev = bdev;
705 :
706 0 : response->status.sct = SPDK_NVME_SCT_GENERIC;
707 0 : response->status.sc = SPDK_NVME_SC_SUCCESS;
708 0 : } else {
709 0 : unmap_ctx->count--; /* dequeued */
710 : }
711 :
712 0 : spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
713 :
714 0 : for (i = unmap_ctx->range_index; i < nr; i++) {
715 0 : struct spdk_nvme_dsm_range dsm_range = { 0 };
716 :
717 0 : spdk_iov_xfer_to_buf(&ix, &dsm_range, sizeof(dsm_range));
718 :
719 0 : lba = dsm_range.starting_lba;
720 0 : lba_count = dsm_range.length;
721 0 : if (max_discard_size > 0 && lba_count > (max_discard_size << 10) / block_size) {
722 0 : SPDK_ERRLOG("invalid unmap size %" PRIu32 " blocks, should not exceed %" PRIu64 " blocks\n",
723 : lba_count, max_discard_size << 1);
724 0 : response->status.sct = SPDK_NVME_SCT_GENERIC;
725 0 : response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
726 0 : break;
727 : }
728 :
729 0 : unmap_ctx->count++;
730 :
731 0 : rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count,
732 0 : nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx);
733 0 : if (rc) {
734 0 : if (rc == -ENOMEM) {
735 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx);
736 : /* Unmap was not yet submitted to bdev */
737 : /* unmap_ctx->count will be decremented when the request is dequeued */
738 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
739 : }
740 0 : response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
741 0 : unmap_ctx->count--;
742 : /* We can't return here - we may have to wait for any other
743 : * unmaps already sent to complete */
744 0 : break;
745 : }
746 0 : unmap_ctx->range_index++;
747 0 : }
748 :
749 0 : if (unmap_ctx->count == 0) {
750 0 : free(unmap_ctx);
751 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
752 : }
753 :
754 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
755 0 : }
756 :
757 : int
758 0 : nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
759 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
760 : {
761 0 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
762 0 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
763 :
764 0 : if (cmd->cdw11_bits.dsm.ad) {
765 0 : return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL);
766 : }
767 :
768 0 : response->status.sct = SPDK_NVME_SCT_GENERIC;
769 0 : response->status.sc = SPDK_NVME_SC_SUCCESS;
770 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
771 0 : }
772 :
773 : int
774 5 : nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
775 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
776 : {
777 5 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
778 5 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
779 5 : uint64_t sdlba = ((uint64_t)cmd->cdw11 << 32) + cmd->cdw10;
780 5 : struct spdk_nvme_scc_source_range range = { 0 };
781 : struct spdk_iov_xfer ix;
782 : int rc;
783 :
784 5 : SPDK_DEBUGLOG(nvmf, "Copy command: SDLBA %lu, NR %u, desc format %u, PRINFOR %u, "
785 : "DTYPE %u, STCW %u, PRINFOW %u, FUA %u, LR %u\n",
786 : sdlba,
787 : cmd->cdw12_bits.copy.nr,
788 : cmd->cdw12_bits.copy.df,
789 : cmd->cdw12_bits.copy.prinfor,
790 : cmd->cdw12_bits.copy.dtype,
791 : cmd->cdw12_bits.copy.stcw,
792 : cmd->cdw12_bits.copy.prinfow,
793 : cmd->cdw12_bits.copy.fua,
794 : cmd->cdw12_bits.copy.lr);
795 :
796 5 : if (spdk_unlikely(req->length != (cmd->cdw12_bits.copy.nr + 1) *
797 : sizeof(struct spdk_nvme_scc_source_range))) {
798 0 : response->status.sct = SPDK_NVME_SCT_GENERIC;
799 0 : response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
800 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
801 : }
802 :
803 : /*
804 : * We support only one source range, and rely on this with the xfer
805 : * below.
806 : */
807 5 : if (cmd->cdw12_bits.copy.nr > 0) {
808 1 : response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
809 1 : response->status.sc = SPDK_NVME_SC_CMD_SIZE_LIMIT_SIZE_EXCEEDED;
810 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
811 : }
812 :
813 4 : if (cmd->cdw12_bits.copy.df != 0) {
814 1 : response->status.sct = SPDK_NVME_SCT_GENERIC;
815 1 : response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
816 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
817 : }
818 :
819 3 : spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
820 3 : spdk_iov_xfer_to_buf(&ix, &range, sizeof(range));
821 :
822 6 : rc = spdk_bdev_copy_blocks(desc, ch, sdlba, range.slba, range.nlb + 1,
823 3 : nvmf_bdev_ctrlr_complete_cmd, req);
824 3 : if (spdk_unlikely(rc)) {
825 1 : if (rc == -ENOMEM) {
826 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
827 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
828 : }
829 :
830 1 : response->status.sct = SPDK_NVME_SCT_GENERIC;
831 1 : response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
832 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
833 : }
834 :
835 2 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
836 5 : }
837 :
838 : int
839 4 : nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
840 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
841 : {
842 : int rc;
843 :
844 8 : rc = spdk_bdev_nvme_iov_passthru_md(desc, ch, &req->cmd->nvme_cmd, req->iov, req->iovcnt,
845 4 : req->length, NULL, 0, nvmf_bdev_ctrlr_complete_cmd, req);
846 :
847 4 : if (spdk_unlikely(rc)) {
848 2 : if (rc == -ENOMEM) {
849 1 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
850 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
851 : }
852 1 : req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
853 1 : req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
854 1 : req->rsp->nvme_cpl.status.dnr = 1;
855 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
856 : }
857 :
858 2 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
859 4 : }
860 :
861 : int
862 4 : spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
863 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
864 : spdk_nvmf_nvme_passthru_cmd_cb cb_fn)
865 : {
866 : int rc;
867 :
868 4 : if (spdk_unlikely(req->iovcnt > 1)) {
869 0 : req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
870 0 : req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
871 0 : req->rsp->nvme_cpl.status.dnr = 1;
872 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
873 : }
874 :
875 4 : req->cmd_cb_fn = cb_fn;
876 :
877 8 : rc = spdk_bdev_nvme_admin_passthru(desc, ch, &req->cmd->nvme_cmd, req->iov[0].iov_base, req->length,
878 4 : nvmf_bdev_ctrlr_complete_admin_cmd, req);
879 4 : if (spdk_unlikely(rc)) {
880 2 : if (rc == -ENOMEM) {
881 1 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req);
882 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
883 : }
884 1 : req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
885 1 : if (rc == -ENOTSUP) {
886 1 : req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
887 1 : } else {
888 0 : req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
889 : }
890 :
891 1 : req->rsp->nvme_cpl.status.dnr = 1;
892 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
893 : }
894 :
895 2 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
896 4 : }
897 :
898 : static void
899 0 : nvmf_bdev_ctrlr_complete_abort_cmd(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
900 : {
901 0 : struct spdk_nvmf_request *req = cb_arg;
902 :
903 0 : if (success) {
904 0 : req->rsp->nvme_cpl.cdw0 &= ~1U;
905 0 : }
906 :
907 0 : spdk_nvmf_request_complete(req);
908 0 : spdk_bdev_free_io(bdev_io);
909 0 : }
910 :
911 : int
912 0 : spdk_nvmf_bdev_ctrlr_abort_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
913 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
914 : struct spdk_nvmf_request *req_to_abort)
915 : {
916 : int rc;
917 :
918 0 : assert((req->rsp->nvme_cpl.cdw0 & 1U) != 0);
919 :
920 0 : rc = spdk_bdev_abort(desc, ch, req_to_abort, nvmf_bdev_ctrlr_complete_abort_cmd, req);
921 0 : if (spdk_likely(rc == 0)) {
922 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
923 0 : } else if (rc == -ENOMEM) {
924 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req);
925 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
926 : } else {
927 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
928 : }
929 0 : }
930 :
931 : bool
932 2 : nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev_desc *desc, struct spdk_nvme_cmd *cmd,
933 : struct spdk_dif_ctx *dif_ctx)
934 : {
935 2 : uint32_t init_ref_tag, dif_check_flags = 0;
936 : int rc;
937 : struct spdk_dif_ctx_init_ext_opts dif_opts;
938 :
939 2 : if (spdk_bdev_desc_get_md_size(desc) == 0) {
940 1 : return false;
941 : }
942 :
943 : /* Initial Reference Tag is the lower 32 bits of the start LBA. */
944 1 : init_ref_tag = (uint32_t)from_le64(&cmd->cdw10);
945 :
946 1 : if (spdk_bdev_desc_is_dif_check_enabled(desc, SPDK_DIF_CHECK_TYPE_REFTAG)) {
947 0 : dif_check_flags |= SPDK_DIF_FLAGS_REFTAG_CHECK;
948 0 : }
949 :
950 1 : if (spdk_bdev_desc_is_dif_check_enabled(desc, SPDK_DIF_CHECK_TYPE_GUARD)) {
951 0 : dif_check_flags |= SPDK_DIF_FLAGS_GUARD_CHECK;
952 0 : }
953 :
954 1 : dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
955 1 : dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
956 2 : rc = spdk_dif_ctx_init(dif_ctx,
957 1 : spdk_bdev_desc_get_block_size(desc),
958 1 : spdk_bdev_desc_get_md_size(desc),
959 1 : spdk_bdev_desc_is_md_interleaved(desc),
960 1 : spdk_bdev_desc_is_dif_head_of_md(desc),
961 1 : spdk_bdev_desc_get_dif_type(desc),
962 1 : dif_check_flags,
963 1 : init_ref_tag, 0, 0, 0, 0, &dif_opts);
964 :
965 1 : return (rc == 0) ? true : false;
966 2 : }
967 :
968 : static void
969 0 : nvmf_bdev_ctrlr_zcopy_start_complete(struct spdk_bdev_io *bdev_io, bool success,
970 : void *cb_arg)
971 : {
972 0 : struct spdk_nvmf_request *req = cb_arg;
973 : struct iovec *iov;
974 0 : int iovcnt = 0;
975 :
976 0 : if (spdk_unlikely(!success)) {
977 0 : int sc = 0, sct = 0;
978 0 : uint32_t cdw0 = 0;
979 0 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
980 0 : spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
981 :
982 0 : response->cdw0 = cdw0;
983 0 : response->status.sc = sc;
984 0 : response->status.sct = sct;
985 :
986 0 : spdk_bdev_free_io(bdev_io);
987 0 : spdk_nvmf_request_complete(req);
988 0 : return;
989 : }
990 :
991 0 : spdk_bdev_io_get_iovec(bdev_io, &iov, &iovcnt);
992 :
993 0 : assert(iovcnt <= NVMF_REQ_MAX_BUFFERS);
994 0 : assert(iovcnt > 0);
995 :
996 0 : req->iovcnt = iovcnt;
997 :
998 0 : assert(req->iov == iov);
999 :
1000 0 : req->zcopy_bdev_io = bdev_io; /* Preserve the bdev_io for the end zcopy */
1001 :
1002 0 : spdk_nvmf_request_complete(req);
1003 : /* Don't free the bdev_io here as it is needed for the END ZCOPY */
1004 0 : }
1005 :
1006 : int
1007 3 : nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
1008 : struct spdk_bdev_desc *desc,
1009 : struct spdk_io_channel *ch,
1010 : struct spdk_nvmf_request *req)
1011 : {
1012 3 : struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1013 3 : uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
1014 3 : uint32_t block_size = spdk_bdev_desc_get_block_size(desc);
1015 : uint64_t start_lba;
1016 : uint64_t num_blocks;
1017 : int rc;
1018 :
1019 3 : nvmf_bdev_ctrlr_get_rw_params(&req->cmd->nvme_cmd, &start_lba, &num_blocks);
1020 :
1021 3 : if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
1022 1 : SPDK_ERRLOG("end of media\n");
1023 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1024 1 : rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
1025 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1026 : }
1027 :
1028 2 : if (spdk_unlikely(num_blocks * block_size > req->length)) {
1029 1 : SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
1030 : num_blocks, block_size, req->length);
1031 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1032 1 : rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
1033 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1034 : }
1035 :
1036 1 : bool populate = (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) ? true : false;
1037 :
1038 2 : rc = spdk_bdev_zcopy_start(desc, ch, req->iov, req->iovcnt, start_lba,
1039 1 : num_blocks, populate, nvmf_bdev_ctrlr_zcopy_start_complete, req);
1040 1 : if (spdk_unlikely(rc != 0)) {
1041 0 : if (rc == -ENOMEM) {
1042 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
1043 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
1044 : }
1045 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1046 0 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
1047 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1048 : }
1049 :
1050 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
1051 3 : }
1052 :
1053 : static void
1054 0 : nvmf_bdev_ctrlr_zcopy_end_complete(struct spdk_bdev_io *bdev_io, bool success,
1055 : void *cb_arg)
1056 : {
1057 0 : struct spdk_nvmf_request *req = cb_arg;
1058 :
1059 0 : if (spdk_unlikely(!success)) {
1060 0 : int sc = 0, sct = 0;
1061 0 : uint32_t cdw0 = 0;
1062 0 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
1063 0 : spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
1064 :
1065 0 : response->cdw0 = cdw0;
1066 0 : response->status.sc = sc;
1067 0 : response->status.sct = sct;
1068 0 : }
1069 :
1070 0 : spdk_bdev_free_io(bdev_io);
1071 0 : req->zcopy_bdev_io = NULL;
1072 0 : spdk_nvmf_request_complete(req);
1073 0 : }
1074 :
1075 : void
1076 0 : nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
1077 : {
1078 : int rc __attribute__((unused));
1079 :
1080 0 : rc = spdk_bdev_zcopy_end(req->zcopy_bdev_io, commit, nvmf_bdev_ctrlr_zcopy_end_complete, req);
1081 :
1082 : /* The only way spdk_bdev_zcopy_end() can fail is if we pass a bdev_io type that isn't ZCOPY */
1083 0 : assert(rc == 0);
1084 0 : }
|