Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2017 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4 : * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "nvmf_internal.h"
10 :
11 : #include "spdk/bdev.h"
12 : #include "spdk/endian.h"
13 : #include "spdk/thread.h"
14 : #include "spdk/likely.h"
15 : #include "spdk/nvme.h"
16 : #include "spdk/nvmf_cmd.h"
17 : #include "spdk/nvmf_spec.h"
18 : #include "spdk/trace.h"
19 : #include "spdk/scsi_spec.h"
20 : #include "spdk/string.h"
21 : #include "spdk/util.h"
22 :
23 : #include "spdk/log.h"
24 :
25 : static bool
26 0 : nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem,
27 : enum spdk_bdev_io_type io_type)
28 : {
29 : struct spdk_nvmf_ns *ns;
30 :
31 0 : for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
32 0 : ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
33 0 : if (ns->bdev == NULL) {
34 0 : continue;
35 : }
36 :
37 0 : if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) {
38 0 : SPDK_DEBUGLOG(nvmf,
39 : "Subsystem %s namespace %u (%s) does not support io_type %d\n",
40 : spdk_nvmf_subsystem_get_nqn(subsystem),
41 : ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type);
42 0 : return false;
43 : }
44 : }
45 :
46 0 : SPDK_DEBUGLOG(nvmf, "All devices in Subsystem %s support io_type %d\n",
47 : spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type);
48 0 : return true;
49 : }
50 :
51 : bool
52 0 : nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr)
53 : {
54 0 : return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP);
55 : }
56 :
57 : bool
58 0 : nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr)
59 : {
60 0 : return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
61 : }
62 :
63 : bool
64 0 : nvmf_ctrlr_copy_supported(struct spdk_nvmf_ctrlr *ctrlr)
65 : {
66 0 : return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_COPY);
67 : }
68 :
69 : static void
70 4 : nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success,
71 : void *cb_arg)
72 : {
73 4 : struct spdk_nvmf_request *req = cb_arg;
74 4 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
75 4 : int sc = 0, sct = 0;
76 4 : uint32_t cdw0 = 0;
77 :
78 4 : if (spdk_unlikely(req->first_fused)) {
79 0 : struct spdk_nvmf_request *first_req = req->first_fused_req;
80 0 : struct spdk_nvme_cpl *first_response = &first_req->rsp->nvme_cpl;
81 0 : int first_sc = 0, first_sct = 0;
82 :
83 : /* get status for both operations */
84 0 : spdk_bdev_io_get_nvme_fused_status(bdev_io, &cdw0, &first_sct, &first_sc, &sct, &sc);
85 0 : first_response->cdw0 = cdw0;
86 0 : first_response->status.sc = first_sc;
87 0 : first_response->status.sct = first_sct;
88 :
89 : /* first request should be completed */
90 0 : spdk_nvmf_request_complete(first_req);
91 0 : req->first_fused_req = NULL;
92 0 : req->first_fused = false;
93 : } else {
94 4 : spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
95 : }
96 :
97 4 : response->cdw0 = cdw0;
98 4 : response->status.sc = sc;
99 4 : response->status.sct = sct;
100 :
101 4 : spdk_nvmf_request_complete(req);
102 4 : spdk_bdev_free_io(bdev_io);
103 4 : }
104 :
105 : static void
106 2 : nvmf_bdev_ctrlr_complete_admin_cmd(struct spdk_bdev_io *bdev_io, bool success,
107 : void *cb_arg)
108 : {
109 2 : struct spdk_nvmf_request *req = cb_arg;
110 :
111 2 : if (req->cmd_cb_fn) {
112 0 : req->cmd_cb_fn(req);
113 : }
114 :
115 2 : nvmf_bdev_ctrlr_complete_cmd(bdev_io, success, req);
116 2 : }
117 :
118 : void
119 2 : nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
120 : bool dif_insert_or_strip)
121 : {
122 2 : struct spdk_bdev *bdev = ns->bdev;
123 : uint64_t num_blocks;
124 : uint32_t phys_blocklen;
125 : uint32_t max_copy;
126 :
127 2 : num_blocks = spdk_bdev_get_num_blocks(bdev);
128 :
129 2 : nsdata->nsze = num_blocks;
130 2 : nsdata->ncap = num_blocks;
131 2 : nsdata->nuse = num_blocks;
132 2 : nsdata->nlbaf = 0;
133 2 : nsdata->flbas.format = 0;
134 2 : nsdata->flbas.msb_format = 0;
135 2 : nsdata->nacwu = spdk_bdev_get_acwu(bdev) - 1; /* nacwu is 0-based */
136 2 : if (!dif_insert_or_strip) {
137 1 : nsdata->lbaf[0].ms = spdk_bdev_get_md_size(bdev);
138 1 : nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_block_size(bdev));
139 1 : if (nsdata->lbaf[0].ms != 0) {
140 1 : nsdata->flbas.extended = 1;
141 1 : nsdata->mc.extended = 1;
142 1 : nsdata->mc.pointer = 0;
143 1 : nsdata->dps.md_start = spdk_bdev_is_dif_head_of_md(bdev);
144 : /* NVMf library doesn't process PRACT and PRCHK flags, we
145 : * leave the use of extended LBA buffer to users.
146 : */
147 1 : nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE;
148 : }
149 : } else {
150 1 : nsdata->lbaf[0].ms = 0;
151 1 : nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_data_block_size(bdev));
152 : }
153 :
154 2 : phys_blocklen = spdk_bdev_get_physical_block_size(bdev);
155 2 : assert(phys_blocklen > 0);
156 : /* Linux driver uses min(nawupf, npwg) to set physical_block_size */
157 2 : nsdata->nsfeat.optperf = 1;
158 2 : nsdata->nsfeat.ns_atomic_write_unit = 1;
159 2 : nsdata->npwg = (phys_blocklen >> nsdata->lbaf[0].lbads) - 1;
160 2 : nsdata->nawupf = nsdata->npwg;
161 2 : nsdata->npwa = nsdata->npwg;
162 2 : nsdata->npdg = nsdata->npwg;
163 2 : nsdata->npda = nsdata->npwg;
164 :
165 2 : if (spdk_bdev_get_write_unit_size(bdev) == 1) {
166 2 : nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev);
167 : }
168 2 : nsdata->nmic.can_share = 1;
169 2 : if (nvmf_ns_is_ptpl_capable(ns)) {
170 2 : nsdata->nsrescap.rescap.persist = 1;
171 : }
172 2 : nsdata->nsrescap.rescap.write_exclusive = 1;
173 2 : nsdata->nsrescap.rescap.exclusive_access = 1;
174 2 : nsdata->nsrescap.rescap.write_exclusive_reg_only = 1;
175 2 : nsdata->nsrescap.rescap.exclusive_access_reg_only = 1;
176 2 : nsdata->nsrescap.rescap.write_exclusive_all_reg = 1;
177 2 : nsdata->nsrescap.rescap.exclusive_access_all_reg = 1;
178 2 : nsdata->nsrescap.rescap.ignore_existing_key = 1;
179 :
180 : SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch");
181 2 : memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid));
182 :
183 : SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch");
184 2 : memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64));
185 :
186 : /* For now we support just one source range for copy command */
187 2 : nsdata->msrc = 0;
188 :
189 2 : max_copy = spdk_bdev_get_max_copy(bdev);
190 2 : if (max_copy == 0 || max_copy > UINT16_MAX) {
191 : /* Zero means copy size is unlimited */
192 2 : nsdata->mcl = UINT16_MAX;
193 2 : nsdata->mssrl = UINT16_MAX;
194 : } else {
195 0 : nsdata->mcl = max_copy;
196 0 : nsdata->mssrl = max_copy;
197 : }
198 2 : }
199 :
200 : void
201 0 : nvmf_bdev_ctrlr_identify_iocs_nvm(struct spdk_nvmf_ns *ns,
202 : struct spdk_nvme_nvm_ns_data *nsdata_nvm)
203 : {
204 0 : struct spdk_bdev *bdev = ns->bdev;
205 : uint8_t _16bpists;
206 : uint32_t sts, pif;
207 :
208 0 : if (spdk_bdev_get_dif_type(bdev) == SPDK_DIF_DISABLE) {
209 0 : return;
210 : }
211 :
212 0 : pif = spdk_bdev_get_dif_pi_format(bdev);
213 :
214 : /*
215 : * 16BPISTS shall be 1 for 32/64b Guard PI.
216 : * STCRS shall be 1 if 16BPISTS is 1.
217 : * 16 is the minimum value of STS for 32b Guard PI.
218 : */
219 0 : switch (pif) {
220 0 : case SPDK_DIF_PI_FORMAT_16:
221 0 : _16bpists = 0;
222 0 : sts = 0;
223 0 : break;
224 0 : case SPDK_DIF_PI_FORMAT_32:
225 0 : _16bpists = 1;
226 0 : sts = 16;
227 0 : break;
228 0 : case SPDK_DIF_PI_FORMAT_64:
229 0 : _16bpists = 1;
230 0 : sts = 0;
231 0 : break;
232 0 : default:
233 0 : SPDK_WARNLOG("PI format %u is not supported\n", pif);
234 0 : return;
235 : }
236 :
237 : /* For 16b Guard PI, Storage Tag is not available because we set STS to 0.
238 : * In this case, we do not have to set 16BPISTM to 1. For simplicity,
239 : * set 16BPISTM to 0 and set LBSTM to all zeroes.
240 : *
241 : * We will revisit here when we find any OS uses Storage Tag.
242 : */
243 0 : nsdata_nvm->lbstm = 0;
244 0 : nsdata_nvm->pic._16bpistm = 0;
245 :
246 0 : nsdata_nvm->pic._16bpists = _16bpists;
247 0 : nsdata_nvm->pic.stcrs = 0;
248 0 : nsdata_nvm->elbaf[0].sts = sts;
249 0 : nsdata_nvm->elbaf[0].pif = pif;
250 : }
251 :
252 : static void
253 22 : nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba,
254 : uint64_t *num_blocks)
255 : {
256 : /* SLBA: CDW10 and CDW11 */
257 22 : *start_lba = from_le64(&cmd->cdw10);
258 :
259 : /* NLB: CDW12 bits 15:00, 0's based */
260 22 : *num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1;
261 22 : }
262 :
263 : static void
264 2 : nvmf_bdev_ctrlr_get_rw_ext_params(const struct spdk_nvme_cmd *cmd,
265 : struct spdk_bdev_ext_io_opts *opts)
266 : {
267 : /* Get CDW12 values */
268 2 : opts->nvme_cdw12.raw = from_le32(&cmd->cdw12);
269 :
270 : /* Get CDW13 values */
271 2 : opts->nvme_cdw13.raw = from_le32(&cmd->cdw13);
272 2 : }
273 :
274 : static bool
275 27 : nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba,
276 : uint64_t io_num_blocks)
277 : {
278 27 : if (io_start_lba + io_num_blocks > bdev_num_blocks ||
279 19 : io_start_lba + io_num_blocks < io_start_lba) {
280 10 : return false;
281 : }
282 :
283 17 : return true;
284 : }
285 :
286 : static void
287 0 : nvmf_ctrlr_process_io_cmd_resubmit(void *arg)
288 : {
289 0 : struct spdk_nvmf_request *req = arg;
290 : int rc;
291 :
292 0 : rc = nvmf_ctrlr_process_io_cmd(req);
293 0 : if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
294 0 : spdk_nvmf_request_complete(req);
295 : }
296 0 : }
297 :
298 : static void
299 0 : nvmf_ctrlr_process_admin_cmd_resubmit(void *arg)
300 : {
301 0 : struct spdk_nvmf_request *req = arg;
302 : int rc;
303 :
304 0 : rc = nvmf_ctrlr_process_admin_cmd(req);
305 0 : if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
306 0 : spdk_nvmf_request_complete(req);
307 : }
308 0 : }
309 :
310 : static void
311 2 : nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev,
312 : struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg)
313 : {
314 : int rc;
315 :
316 2 : req->bdev_io_wait.bdev = bdev;
317 2 : req->bdev_io_wait.cb_fn = cb_fn;
318 2 : req->bdev_io_wait.cb_arg = cb_arg;
319 :
320 2 : rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait);
321 2 : if (rc != 0) {
322 0 : assert(false);
323 : }
324 2 : req->qpair->group->stat.pending_bdev_io++;
325 2 : }
326 :
327 : bool
328 0 : nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
329 : {
330 0 : return spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY);
331 : }
332 :
333 : int
334 1 : nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
335 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
336 : {
337 1 : struct spdk_bdev_ext_io_opts opts = {
338 : .size = SPDK_SIZEOF(&opts, accel_sequence),
339 1 : .memory_domain = req->memory_domain,
340 1 : .memory_domain_ctx = req->memory_domain_ctx,
341 1 : .accel_sequence = req->accel_sequence,
342 : };
343 1 : uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
344 1 : uint32_t block_size = spdk_bdev_get_block_size(bdev);
345 1 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
346 1 : struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
347 1 : uint64_t start_lba;
348 1 : uint64_t num_blocks;
349 : int rc;
350 :
351 1 : nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
352 :
353 1 : if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
354 0 : SPDK_ERRLOG("end of media\n");
355 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
356 0 : rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
357 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
358 : }
359 :
360 1 : if (spdk_unlikely(num_blocks * block_size > req->length)) {
361 0 : SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
362 : num_blocks, block_size, req->length);
363 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
364 0 : rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
365 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
366 : }
367 :
368 1 : assert(!spdk_nvmf_request_using_zcopy(req));
369 :
370 1 : rc = spdk_bdev_readv_blocks_ext(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
371 : nvmf_bdev_ctrlr_complete_cmd, req, &opts);
372 1 : if (spdk_unlikely(rc)) {
373 0 : if (rc == -ENOMEM) {
374 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
375 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
376 : }
377 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
378 0 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
379 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
380 : }
381 :
382 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
383 : }
384 :
385 : int
386 1 : nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
387 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
388 : {
389 1 : struct spdk_bdev_ext_io_opts opts = {
390 : .size = SPDK_SIZEOF(&opts, nvme_cdw13),
391 1 : .memory_domain = req->memory_domain,
392 1 : .memory_domain_ctx = req->memory_domain_ctx,
393 1 : .accel_sequence = req->accel_sequence,
394 : };
395 1 : uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
396 1 : uint32_t block_size = spdk_bdev_get_block_size(bdev);
397 1 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
398 1 : struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
399 1 : uint64_t start_lba;
400 1 : uint64_t num_blocks;
401 : int rc;
402 :
403 1 : nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
404 1 : nvmf_bdev_ctrlr_get_rw_ext_params(cmd, &opts);
405 :
406 1 : if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
407 0 : SPDK_ERRLOG("end of media\n");
408 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
409 0 : rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
410 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
411 : }
412 :
413 1 : if (spdk_unlikely(num_blocks * block_size > req->length)) {
414 0 : SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
415 : num_blocks, block_size, req->length);
416 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
417 0 : rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
418 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
419 : }
420 :
421 1 : assert(!spdk_nvmf_request_using_zcopy(req));
422 :
423 1 : rc = spdk_bdev_writev_blocks_ext(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
424 : nvmf_bdev_ctrlr_complete_cmd, req, &opts);
425 1 : if (spdk_unlikely(rc)) {
426 0 : if (rc == -ENOMEM) {
427 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
428 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
429 : }
430 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
431 0 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
432 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
433 : }
434 :
435 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
436 : }
437 :
438 : int
439 4 : nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
440 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
441 : {
442 4 : uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
443 4 : uint32_t block_size = spdk_bdev_get_block_size(bdev);
444 4 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
445 4 : struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
446 4 : uint64_t start_lba;
447 4 : uint64_t num_blocks;
448 : int rc;
449 :
450 4 : nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
451 :
452 4 : if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
453 1 : SPDK_ERRLOG("end of media\n");
454 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
455 1 : rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
456 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
457 : }
458 :
459 3 : if (spdk_unlikely(num_blocks * block_size > req->length)) {
460 1 : SPDK_ERRLOG("Compare NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
461 : num_blocks, block_size, req->length);
462 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
463 1 : rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
464 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
465 : }
466 :
467 2 : rc = spdk_bdev_comparev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
468 : nvmf_bdev_ctrlr_complete_cmd, req);
469 2 : if (spdk_unlikely(rc)) {
470 1 : if (rc == -ENOMEM) {
471 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
472 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
473 : }
474 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
475 1 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
476 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
477 : }
478 :
479 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
480 : }
481 :
482 : int
483 4 : nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
484 : struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req)
485 : {
486 4 : uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
487 4 : uint32_t block_size = spdk_bdev_get_block_size(bdev);
488 4 : struct spdk_nvme_cmd *cmp_cmd = &cmp_req->cmd->nvme_cmd;
489 4 : struct spdk_nvme_cmd *write_cmd = &write_req->cmd->nvme_cmd;
490 4 : struct spdk_nvme_cpl *rsp = &write_req->rsp->nvme_cpl;
491 4 : uint64_t write_start_lba, cmp_start_lba;
492 4 : uint64_t write_num_blocks, cmp_num_blocks;
493 : int rc;
494 :
495 4 : nvmf_bdev_ctrlr_get_rw_params(cmp_cmd, &cmp_start_lba, &cmp_num_blocks);
496 4 : nvmf_bdev_ctrlr_get_rw_params(write_cmd, &write_start_lba, &write_num_blocks);
497 :
498 4 : if (spdk_unlikely(write_start_lba != cmp_start_lba || write_num_blocks != cmp_num_blocks)) {
499 1 : SPDK_ERRLOG("Fused command start lba / num blocks mismatch\n");
500 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
501 1 : rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
502 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
503 : }
504 :
505 3 : if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, write_start_lba,
506 : write_num_blocks))) {
507 1 : SPDK_ERRLOG("end of media\n");
508 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
509 1 : rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
510 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
511 : }
512 :
513 2 : if (spdk_unlikely(write_num_blocks * block_size > write_req->length)) {
514 1 : SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
515 : write_num_blocks, block_size, write_req->length);
516 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
517 1 : rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
518 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
519 : }
520 :
521 1 : rc = spdk_bdev_comparev_and_writev_blocks(desc, ch, cmp_req->iov, cmp_req->iovcnt, write_req->iov,
522 1 : write_req->iovcnt, write_start_lba, write_num_blocks, nvmf_bdev_ctrlr_complete_cmd, write_req);
523 1 : if (spdk_unlikely(rc)) {
524 0 : if (rc == -ENOMEM) {
525 0 : nvmf_bdev_ctrl_queue_io(cmp_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, cmp_req);
526 0 : nvmf_bdev_ctrl_queue_io(write_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, write_req);
527 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
528 : }
529 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
530 0 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
531 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
532 : }
533 :
534 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
535 : }
536 :
537 : int
538 4 : nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
539 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
540 : {
541 4 : uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
542 4 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
543 4 : struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
544 4 : uint64_t max_write_zeroes_size = req->qpair->ctrlr->subsys->max_write_zeroes_size_kib;
545 4 : uint64_t start_lba;
546 4 : uint64_t num_blocks;
547 : int rc;
548 :
549 4 : nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
550 4 : if (spdk_unlikely(max_write_zeroes_size > 0 &&
551 : num_blocks > (max_write_zeroes_size << 10) / spdk_bdev_get_block_size(bdev))) {
552 1 : SPDK_ERRLOG("invalid write zeroes size, should not exceed %" PRIu64 "Kib\n", max_write_zeroes_size);
553 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
554 1 : rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
555 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
556 : }
557 :
558 3 : if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
559 1 : SPDK_ERRLOG("end of media\n");
560 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
561 1 : rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
562 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
563 : }
564 :
565 2 : if (spdk_unlikely(cmd->cdw12_bits.write_zeroes.deac)) {
566 0 : SPDK_ERRLOG("Write Zeroes Deallocate is not supported\n");
567 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
568 0 : rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
569 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
570 : }
571 :
572 2 : rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks,
573 : nvmf_bdev_ctrlr_complete_cmd, req);
574 2 : if (spdk_unlikely(rc)) {
575 1 : if (rc == -ENOMEM) {
576 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
577 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
578 : }
579 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
580 1 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
581 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
582 : }
583 :
584 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
585 : }
586 :
587 : int
588 3 : nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
589 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
590 : {
591 3 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
592 : int rc;
593 :
594 : /* As for NVMeoF controller, SPDK always set volatile write
595 : * cache bit to 1, return success for those block devices
596 : * which can't support FLUSH command.
597 : */
598 3 : if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) {
599 1 : response->status.sct = SPDK_NVME_SCT_GENERIC;
600 1 : response->status.sc = SPDK_NVME_SC_SUCCESS;
601 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
602 : }
603 :
604 2 : rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev),
605 : nvmf_bdev_ctrlr_complete_cmd, req);
606 2 : if (spdk_unlikely(rc)) {
607 1 : if (rc == -ENOMEM) {
608 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
609 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
610 : }
611 1 : response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
612 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
613 : }
614 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
615 : }
616 :
617 : struct nvmf_bdev_ctrlr_unmap {
618 : struct spdk_nvmf_request *req;
619 : uint32_t count;
620 : struct spdk_bdev_desc *desc;
621 : struct spdk_bdev *bdev;
622 : struct spdk_io_channel *ch;
623 : uint32_t range_index;
624 : };
625 :
626 : static void
627 0 : nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success,
628 : void *cb_arg)
629 : {
630 0 : struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg;
631 0 : struct spdk_nvmf_request *req = unmap_ctx->req;
632 0 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
633 0 : int sc, sct;
634 0 : uint32_t cdw0;
635 :
636 0 : unmap_ctx->count--;
637 :
638 0 : if (response->status.sct == SPDK_NVME_SCT_GENERIC &&
639 0 : response->status.sc == SPDK_NVME_SC_SUCCESS) {
640 0 : spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
641 0 : response->cdw0 = cdw0;
642 0 : response->status.sc = sc;
643 0 : response->status.sct = sct;
644 : }
645 :
646 0 : if (unmap_ctx->count == 0) {
647 0 : spdk_nvmf_request_complete(req);
648 0 : free(unmap_ctx);
649 : }
650 0 : spdk_bdev_free_io(bdev_io);
651 0 : }
652 :
653 : static int nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
654 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
655 : struct nvmf_bdev_ctrlr_unmap *unmap_ctx);
656 : static void
657 0 : nvmf_bdev_ctrlr_unmap_resubmit(void *arg)
658 : {
659 0 : struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg;
660 0 : struct spdk_nvmf_request *req = unmap_ctx->req;
661 0 : struct spdk_bdev_desc *desc = unmap_ctx->desc;
662 0 : struct spdk_bdev *bdev = unmap_ctx->bdev;
663 0 : struct spdk_io_channel *ch = unmap_ctx->ch;
664 :
665 0 : nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx);
666 0 : }
667 :
668 : static int
669 0 : nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
670 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
671 : struct nvmf_bdev_ctrlr_unmap *unmap_ctx)
672 : {
673 : uint16_t nr, i;
674 0 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
675 0 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
676 0 : uint64_t max_discard_size = req->qpair->ctrlr->subsys->max_discard_size_kib;
677 0 : uint32_t block_size = spdk_bdev_get_block_size(bdev);
678 0 : struct spdk_iov_xfer ix;
679 : uint64_t lba;
680 : uint32_t lba_count;
681 : int rc;
682 :
683 0 : nr = cmd->cdw10_bits.dsm.nr + 1;
684 0 : if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) {
685 0 : SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n");
686 0 : response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
687 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
688 : }
689 :
690 0 : if (unmap_ctx == NULL) {
691 0 : unmap_ctx = calloc(1, sizeof(*unmap_ctx));
692 0 : if (!unmap_ctx) {
693 0 : response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
694 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
695 : }
696 :
697 0 : unmap_ctx->req = req;
698 0 : unmap_ctx->desc = desc;
699 0 : unmap_ctx->ch = ch;
700 0 : unmap_ctx->bdev = bdev;
701 :
702 0 : response->status.sct = SPDK_NVME_SCT_GENERIC;
703 0 : response->status.sc = SPDK_NVME_SC_SUCCESS;
704 : } else {
705 0 : unmap_ctx->count--; /* dequeued */
706 : }
707 :
708 0 : spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
709 :
710 0 : for (i = unmap_ctx->range_index; i < nr; i++) {
711 0 : struct spdk_nvme_dsm_range dsm_range = { 0 };
712 :
713 0 : spdk_iov_xfer_to_buf(&ix, &dsm_range, sizeof(dsm_range));
714 :
715 0 : lba = dsm_range.starting_lba;
716 0 : lba_count = dsm_range.length;
717 0 : if (max_discard_size > 0 && lba_count > (max_discard_size << 10) / block_size) {
718 0 : SPDK_ERRLOG("invalid unmap size %" PRIu32 " blocks, should not exceed %" PRIu64 " blocks\n",
719 : lba_count, max_discard_size << 1);
720 0 : response->status.sct = SPDK_NVME_SCT_GENERIC;
721 0 : response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
722 0 : break;
723 : }
724 :
725 0 : unmap_ctx->count++;
726 :
727 0 : rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count,
728 : nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx);
729 0 : if (rc) {
730 0 : if (rc == -ENOMEM) {
731 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx);
732 : /* Unmap was not yet submitted to bdev */
733 : /* unmap_ctx->count will be decremented when the request is dequeued */
734 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
735 : }
736 0 : response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
737 0 : unmap_ctx->count--;
738 : /* We can't return here - we may have to wait for any other
739 : * unmaps already sent to complete */
740 0 : break;
741 : }
742 0 : unmap_ctx->range_index++;
743 : }
744 :
745 0 : if (unmap_ctx->count == 0) {
746 0 : free(unmap_ctx);
747 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
748 : }
749 :
750 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
751 : }
752 :
753 : int
754 0 : nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
755 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
756 : {
757 0 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
758 0 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
759 :
760 0 : if (cmd->cdw11_bits.dsm.ad) {
761 0 : return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL);
762 : }
763 :
764 0 : response->status.sct = SPDK_NVME_SCT_GENERIC;
765 0 : response->status.sc = SPDK_NVME_SC_SUCCESS;
766 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
767 : }
768 :
769 : int
770 5 : nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
771 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
772 : {
773 5 : struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
774 5 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
775 5 : uint64_t sdlba = ((uint64_t)cmd->cdw11 << 32) + cmd->cdw10;
776 5 : struct spdk_nvme_scc_source_range range = { 0 };
777 5 : struct spdk_iov_xfer ix;
778 : int rc;
779 :
780 5 : SPDK_DEBUGLOG(nvmf, "Copy command: SDLBA %lu, NR %u, desc format %u, PRINFOR %u, "
781 : "DTYPE %u, STCW %u, PRINFOW %u, FUA %u, LR %u\n",
782 : sdlba,
783 : cmd->cdw12_bits.copy.nr,
784 : cmd->cdw12_bits.copy.df,
785 : cmd->cdw12_bits.copy.prinfor,
786 : cmd->cdw12_bits.copy.dtype,
787 : cmd->cdw12_bits.copy.stcw,
788 : cmd->cdw12_bits.copy.prinfow,
789 : cmd->cdw12_bits.copy.fua,
790 : cmd->cdw12_bits.copy.lr);
791 :
792 5 : if (spdk_unlikely(req->length != (cmd->cdw12_bits.copy.nr + 1) *
793 : sizeof(struct spdk_nvme_scc_source_range))) {
794 0 : response->status.sct = SPDK_NVME_SCT_GENERIC;
795 0 : response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
796 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
797 : }
798 :
799 : /*
800 : * We support only one source range, and rely on this with the xfer
801 : * below.
802 : */
803 5 : if (cmd->cdw12_bits.copy.nr > 0) {
804 1 : response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
805 1 : response->status.sc = SPDK_NVME_SC_CMD_SIZE_LIMIT_SIZE_EXCEEDED;
806 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
807 : }
808 :
809 4 : if (cmd->cdw12_bits.copy.df != 0) {
810 1 : response->status.sct = SPDK_NVME_SCT_GENERIC;
811 1 : response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
812 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
813 : }
814 :
815 3 : spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
816 3 : spdk_iov_xfer_to_buf(&ix, &range, sizeof(range));
817 :
818 3 : rc = spdk_bdev_copy_blocks(desc, ch, sdlba, range.slba, range.nlb + 1,
819 : nvmf_bdev_ctrlr_complete_cmd, req);
820 3 : if (spdk_unlikely(rc)) {
821 1 : if (rc == -ENOMEM) {
822 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
823 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
824 : }
825 :
826 1 : response->status.sct = SPDK_NVME_SCT_GENERIC;
827 1 : response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
828 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
829 : }
830 :
831 2 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
832 : }
833 :
834 : int
835 4 : nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
836 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
837 : {
838 : int rc;
839 :
840 4 : rc = spdk_bdev_nvme_iov_passthru_md(desc, ch, &req->cmd->nvme_cmd, req->iov, req->iovcnt,
841 4 : req->length, NULL, 0, nvmf_bdev_ctrlr_complete_cmd, req);
842 :
843 4 : if (spdk_unlikely(rc)) {
844 2 : if (rc == -ENOMEM) {
845 1 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
846 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
847 : }
848 1 : req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
849 1 : req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
850 1 : req->rsp->nvme_cpl.status.dnr = 1;
851 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
852 : }
853 :
854 2 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
855 : }
856 :
857 : int
858 4 : spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
859 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
860 : spdk_nvmf_nvme_passthru_cmd_cb cb_fn)
861 : {
862 : int rc;
863 :
864 4 : if (spdk_unlikely(req->iovcnt > 1)) {
865 0 : req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
866 0 : req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
867 0 : req->rsp->nvme_cpl.status.dnr = 1;
868 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
869 : }
870 :
871 4 : req->cmd_cb_fn = cb_fn;
872 :
873 4 : rc = spdk_bdev_nvme_admin_passthru(desc, ch, &req->cmd->nvme_cmd, req->iov[0].iov_base, req->length,
874 : nvmf_bdev_ctrlr_complete_admin_cmd, req);
875 4 : if (spdk_unlikely(rc)) {
876 2 : if (rc == -ENOMEM) {
877 1 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req);
878 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
879 : }
880 1 : req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
881 1 : if (rc == -ENOTSUP) {
882 1 : req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
883 : } else {
884 0 : req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
885 : }
886 :
887 1 : req->rsp->nvme_cpl.status.dnr = 1;
888 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
889 : }
890 :
891 2 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
892 : }
893 :
894 : static void
895 0 : nvmf_bdev_ctrlr_complete_abort_cmd(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
896 : {
897 0 : struct spdk_nvmf_request *req = cb_arg;
898 :
899 0 : if (success) {
900 0 : req->rsp->nvme_cpl.cdw0 &= ~1U;
901 : }
902 :
903 0 : spdk_nvmf_request_complete(req);
904 0 : spdk_bdev_free_io(bdev_io);
905 0 : }
906 :
907 : int
908 0 : spdk_nvmf_bdev_ctrlr_abort_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
909 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
910 : struct spdk_nvmf_request *req_to_abort)
911 : {
912 : int rc;
913 :
914 0 : assert((req->rsp->nvme_cpl.cdw0 & 1U) != 0);
915 :
916 0 : rc = spdk_bdev_abort(desc, ch, req_to_abort, nvmf_bdev_ctrlr_complete_abort_cmd, req);
917 0 : if (spdk_likely(rc == 0)) {
918 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
919 0 : } else if (rc == -ENOMEM) {
920 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req);
921 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
922 : } else {
923 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
924 : }
925 : }
926 :
927 : bool
928 2 : nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
929 : struct spdk_dif_ctx *dif_ctx)
930 : {
931 2 : uint32_t init_ref_tag, dif_check_flags = 0;
932 : int rc;
933 2 : struct spdk_dif_ctx_init_ext_opts dif_opts;
934 :
935 2 : if (spdk_bdev_get_md_size(bdev) == 0) {
936 1 : return false;
937 : }
938 :
939 : /* Initial Reference Tag is the lower 32 bits of the start LBA. */
940 1 : init_ref_tag = (uint32_t)from_le64(&cmd->cdw10);
941 :
942 1 : if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_REFTAG)) {
943 0 : dif_check_flags |= SPDK_DIF_FLAGS_REFTAG_CHECK;
944 : }
945 :
946 1 : if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_GUARD)) {
947 0 : dif_check_flags |= SPDK_DIF_FLAGS_GUARD_CHECK;
948 : }
949 :
950 1 : dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
951 1 : dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
952 2 : rc = spdk_dif_ctx_init(dif_ctx,
953 : spdk_bdev_get_block_size(bdev),
954 : spdk_bdev_get_md_size(bdev),
955 1 : spdk_bdev_is_md_interleaved(bdev),
956 1 : spdk_bdev_is_dif_head_of_md(bdev),
957 : spdk_bdev_get_dif_type(bdev),
958 : dif_check_flags,
959 : init_ref_tag, 0, 0, 0, 0, &dif_opts);
960 :
961 1 : return (rc == 0) ? true : false;
962 : }
963 :
964 : static void
965 0 : nvmf_bdev_ctrlr_zcopy_start_complete(struct spdk_bdev_io *bdev_io, bool success,
966 : void *cb_arg)
967 : {
968 0 : struct spdk_nvmf_request *req = cb_arg;
969 0 : struct iovec *iov;
970 0 : int iovcnt = 0;
971 :
972 0 : if (spdk_unlikely(!success)) {
973 0 : int sc = 0, sct = 0;
974 0 : uint32_t cdw0 = 0;
975 0 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
976 0 : spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
977 :
978 0 : response->cdw0 = cdw0;
979 0 : response->status.sc = sc;
980 0 : response->status.sct = sct;
981 :
982 0 : spdk_bdev_free_io(bdev_io);
983 0 : spdk_nvmf_request_complete(req);
984 0 : return;
985 : }
986 :
987 0 : spdk_bdev_io_get_iovec(bdev_io, &iov, &iovcnt);
988 :
989 0 : assert(iovcnt <= NVMF_REQ_MAX_BUFFERS);
990 0 : assert(iovcnt > 0);
991 :
992 0 : req->iovcnt = iovcnt;
993 :
994 0 : assert(req->iov == iov);
995 :
996 0 : req->zcopy_bdev_io = bdev_io; /* Preserve the bdev_io for the end zcopy */
997 :
998 0 : spdk_nvmf_request_complete(req);
999 : /* Don't free the bdev_io here as it is needed for the END ZCOPY */
1000 : }
1001 :
1002 : int
1003 3 : nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
1004 : struct spdk_bdev_desc *desc,
1005 : struct spdk_io_channel *ch,
1006 : struct spdk_nvmf_request *req)
1007 : {
1008 3 : struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1009 3 : uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
1010 3 : uint32_t block_size = spdk_bdev_get_block_size(bdev);
1011 3 : uint64_t start_lba;
1012 3 : uint64_t num_blocks;
1013 : int rc;
1014 :
1015 3 : nvmf_bdev_ctrlr_get_rw_params(&req->cmd->nvme_cmd, &start_lba, &num_blocks);
1016 :
1017 3 : if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
1018 1 : SPDK_ERRLOG("end of media\n");
1019 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1020 1 : rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
1021 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1022 : }
1023 :
1024 2 : if (spdk_unlikely(num_blocks * block_size > req->length)) {
1025 1 : SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
1026 : num_blocks, block_size, req->length);
1027 1 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1028 1 : rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
1029 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1030 : }
1031 :
1032 1 : bool populate = (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) ? true : false;
1033 :
1034 1 : rc = spdk_bdev_zcopy_start(desc, ch, req->iov, req->iovcnt, start_lba,
1035 : num_blocks, populate, nvmf_bdev_ctrlr_zcopy_start_complete, req);
1036 1 : if (spdk_unlikely(rc != 0)) {
1037 0 : if (rc == -ENOMEM) {
1038 0 : nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
1039 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
1040 : }
1041 0 : rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1042 0 : rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
1043 0 : return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1044 : }
1045 :
1046 1 : return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
1047 : }
1048 :
1049 : static void
1050 0 : nvmf_bdev_ctrlr_zcopy_end_complete(struct spdk_bdev_io *bdev_io, bool success,
1051 : void *cb_arg)
1052 : {
1053 0 : struct spdk_nvmf_request *req = cb_arg;
1054 :
1055 0 : if (spdk_unlikely(!success)) {
1056 0 : int sc = 0, sct = 0;
1057 0 : uint32_t cdw0 = 0;
1058 0 : struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
1059 0 : spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
1060 :
1061 0 : response->cdw0 = cdw0;
1062 0 : response->status.sc = sc;
1063 0 : response->status.sct = sct;
1064 : }
1065 :
1066 0 : spdk_bdev_free_io(bdev_io);
1067 0 : req->zcopy_bdev_io = NULL;
1068 0 : spdk_nvmf_request_complete(req);
1069 0 : }
1070 :
1071 : void
1072 0 : nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
1073 : {
1074 : int rc __attribute__((unused));
1075 :
1076 0 : rc = spdk_bdev_zcopy_end(req->zcopy_bdev_io, commit, nvmf_bdev_ctrlr_zcopy_end_complete, req);
1077 :
1078 : /* The only way spdk_bdev_zcopy_end() can fail is if we pass a bdev_io type that isn't ZCOPY */
1079 0 : assert(rc == 0);
1080 0 : }
|