Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2016 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4 : * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #ifndef __NVMF_INTERNAL_H__
8 : #define __NVMF_INTERNAL_H__
9 :
10 : #include "spdk/stdinc.h"
11 :
12 : #include "spdk/likely.h"
13 : #include "spdk/nvmf.h"
14 : #include "spdk/nvmf_cmd.h"
15 : #include "spdk/nvmf_transport.h"
16 : #include "spdk/nvmf_spec.h"
17 : #include "spdk/assert.h"
18 : #include "spdk/bdev.h"
19 : #include "spdk/queue.h"
20 : #include "spdk/util.h"
21 : #include "spdk/thread.h"
22 : #include "spdk/tree.h"
23 :
24 : /* The spec reserves cntlid values in the range FFF0h to FFFFh. */
25 : #define NVMF_MIN_CNTLID 1
26 : #define NVMF_MAX_CNTLID 0xFFEF
27 :
28 : enum spdk_nvmf_tgt_state {
29 : NVMF_TGT_IDLE = 0,
30 : NVMF_TGT_RUNNING,
31 : NVMF_TGT_PAUSING,
32 : NVMF_TGT_PAUSED,
33 : NVMF_TGT_RESUMING,
34 : };
35 :
36 : enum spdk_nvmf_subsystem_state {
37 : SPDK_NVMF_SUBSYSTEM_INACTIVE = 0,
38 : SPDK_NVMF_SUBSYSTEM_ACTIVATING,
39 : SPDK_NVMF_SUBSYSTEM_ACTIVE,
40 : SPDK_NVMF_SUBSYSTEM_PAUSING,
41 : SPDK_NVMF_SUBSYSTEM_PAUSED,
42 : SPDK_NVMF_SUBSYSTEM_RESUMING,
43 : SPDK_NVMF_SUBSYSTEM_DEACTIVATING,
44 : SPDK_NVMF_SUBSYSTEM_NUM_STATES,
45 : };
46 :
47 : RB_HEAD(subsystem_tree, spdk_nvmf_subsystem);
48 :
49 : struct spdk_nvmf_tgt {
50 : char name[NVMF_TGT_NAME_MAX_LENGTH];
51 :
52 : pthread_mutex_t mutex;
53 :
54 : uint64_t discovery_genctr;
55 :
56 : uint32_t max_subsystems;
57 :
58 : enum spdk_nvmf_tgt_discovery_filter discovery_filter;
59 :
60 : enum spdk_nvmf_tgt_state state;
61 :
62 : struct spdk_bit_array *subsystem_ids;
63 :
64 : struct subsystem_tree subsystems;
65 :
66 : TAILQ_HEAD(, spdk_nvmf_transport) transports;
67 : TAILQ_HEAD(, spdk_nvmf_poll_group) poll_groups;
68 : TAILQ_HEAD(, spdk_nvmf_referral) referrals;
69 :
70 : /* Used for round-robin assignment of connections to poll groups */
71 : struct spdk_nvmf_poll_group *next_poll_group;
72 :
73 : spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn;
74 : void *destroy_cb_arg;
75 :
76 : uint16_t crdt[3];
77 : uint16_t num_poll_groups;
78 :
79 : TAILQ_ENTRY(spdk_nvmf_tgt) link;
80 : };
81 :
82 : struct spdk_nvmf_host {
83 : char nqn[SPDK_NVMF_NQN_MAX_LEN + 1];
84 : TAILQ_ENTRY(spdk_nvmf_host) link;
85 : };
86 :
87 : struct spdk_nvmf_subsystem_listener {
88 : struct spdk_nvmf_subsystem *subsystem;
89 : spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn;
90 : void *cb_arg;
91 : struct spdk_nvme_transport_id *trid;
92 : struct spdk_nvmf_transport *transport;
93 : enum spdk_nvme_ana_state *ana_state;
94 : uint64_t ana_state_change_count;
95 : uint16_t id;
96 : struct spdk_nvmf_listener_opts opts;
97 : TAILQ_ENTRY(spdk_nvmf_subsystem_listener) link;
98 : };
99 :
100 : struct spdk_nvmf_referral {
101 : /* Discovery Log Page Entry for this referral */
102 : struct spdk_nvmf_discovery_log_page_entry entry;
103 : /* Transport ID */
104 : struct spdk_nvme_transport_id trid;
105 : TAILQ_ENTRY(spdk_nvmf_referral) link;
106 : };
107 :
108 : struct spdk_nvmf_subsystem_pg_ns_info {
109 : struct spdk_io_channel *channel;
110 : struct spdk_uuid uuid;
111 : /* current reservation key, no reservation if the value is 0 */
112 : uint64_t crkey;
113 : /* reservation type */
114 : enum spdk_nvme_reservation_type rtype;
115 : /* Host ID which holds the reservation */
116 : struct spdk_uuid holder_id;
117 : /* Host ID for the registrants with the namespace */
118 : struct spdk_uuid reg_hostid[SPDK_NVMF_MAX_NUM_REGISTRANTS];
119 : uint64_t num_blocks;
120 :
121 : /* I/O outstanding to this namespace */
122 : uint64_t io_outstanding;
123 : enum spdk_nvmf_subsystem_state state;
124 : };
125 :
126 : typedef void(*spdk_nvmf_poll_group_mod_done)(void *cb_arg, int status);
127 :
128 : struct spdk_nvmf_subsystem_poll_group {
129 : /* Array of namespace information for each namespace indexed by nsid - 1 */
130 : struct spdk_nvmf_subsystem_pg_ns_info *ns_info;
131 : uint32_t num_ns;
132 : enum spdk_nvmf_subsystem_state state;
133 :
134 : /* Number of ADMIN and FABRICS requests outstanding */
135 : uint64_t mgmt_io_outstanding;
136 : spdk_nvmf_poll_group_mod_done cb_fn;
137 : void *cb_arg;
138 :
139 : TAILQ_HEAD(, spdk_nvmf_request) queued;
140 : };
141 :
142 : struct spdk_nvmf_registrant {
143 : TAILQ_ENTRY(spdk_nvmf_registrant) link;
144 : struct spdk_uuid hostid;
145 : /* Registration key */
146 : uint64_t rkey;
147 : };
148 :
149 : struct spdk_nvmf_ns {
150 : uint32_t nsid;
151 : uint32_t anagrpid;
152 : struct spdk_nvmf_subsystem *subsystem;
153 : struct spdk_bdev *bdev;
154 : struct spdk_bdev_desc *desc;
155 : struct spdk_nvmf_ns_opts opts;
156 : /* reservation notification mask */
157 : uint32_t mask;
158 : /* generation code */
159 : uint32_t gen;
160 : /* registrants head */
161 : TAILQ_HEAD(, spdk_nvmf_registrant) registrants;
162 : /* current reservation key */
163 : uint64_t crkey;
164 : /* reservation type */
165 : enum spdk_nvme_reservation_type rtype;
166 : /* current reservation holder, only valid if reservation type can only have one holder */
167 : struct spdk_nvmf_registrant *holder;
168 : /* Persist Through Power Loss file which contains the persistent reservation */
169 : char *ptpl_file;
170 : /* Persist Through Power Loss feature is enabled */
171 : bool ptpl_activated;
172 : /* ZCOPY supported on bdev device */
173 : bool zcopy;
174 : /* Command Set Identifier */
175 : enum spdk_nvme_csi csi;
176 : };
177 :
178 : /*
179 : * NVMf reservation notification log page.
180 : */
181 : struct spdk_nvmf_reservation_log {
182 : struct spdk_nvme_reservation_notification_log log;
183 : TAILQ_ENTRY(spdk_nvmf_reservation_log) link;
184 : struct spdk_nvmf_ctrlr *ctrlr;
185 : };
186 :
187 : /*
188 : * NVMf async event completion.
189 : */
190 : struct spdk_nvmf_async_event_completion {
191 : union spdk_nvme_async_event_completion event;
192 : STAILQ_ENTRY(spdk_nvmf_async_event_completion) link;
193 : };
194 :
195 : /*
196 : * This structure represents an NVMe-oF controller,
197 : * which is like a "session" in networking terms.
198 : */
199 : struct spdk_nvmf_ctrlr {
200 : uint16_t cntlid;
201 : char hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
202 : struct spdk_nvmf_subsystem *subsys;
203 :
204 : struct spdk_nvmf_ctrlr_data cdata;
205 :
206 : struct spdk_nvmf_registers vcprop;
207 :
208 : struct spdk_nvmf_ctrlr_feat feat;
209 :
210 : struct spdk_nvmf_qpair *admin_qpair;
211 : struct spdk_thread *thread;
212 : struct spdk_bit_array *qpair_mask;
213 :
214 : const struct spdk_nvmf_subsystem_listener *listener;
215 :
216 : struct spdk_nvmf_request *aer_req[SPDK_NVMF_MAX_ASYNC_EVENTS];
217 : STAILQ_HEAD(, spdk_nvmf_async_event_completion) async_events;
218 : uint64_t notice_aen_mask;
219 : uint8_t nr_aer_reqs;
220 : struct spdk_uuid hostid;
221 :
222 : uint32_t association_timeout; /* in milliseconds */
223 : uint16_t changed_ns_list_count;
224 : struct spdk_nvme_ns_list changed_ns_list;
225 : uint64_t log_page_count;
226 : uint8_t num_avail_log_pages;
227 : TAILQ_HEAD(log_page_head, spdk_nvmf_reservation_log) log_head;
228 :
229 : /* Time to trigger keep-alive--poller_time = now_tick + period */
230 : uint64_t last_keep_alive_tick;
231 : struct spdk_poller *keep_alive_poller;
232 :
233 : struct spdk_poller *association_timer;
234 :
235 : struct spdk_poller *cc_timer;
236 : uint64_t cc_timeout_tsc;
237 : struct spdk_poller *cc_timeout_timer;
238 :
239 : bool dif_insert_or_strip;
240 : bool in_destruct;
241 : bool disconnect_in_progress;
242 : /* valid only when disconnect_in_progress is true */
243 : bool disconnect_is_shn;
244 : bool acre_enabled;
245 : bool dynamic_ctrlr;
246 :
247 : TAILQ_ENTRY(spdk_nvmf_ctrlr) link;
248 : };
249 :
250 : #define NVMF_MAX_LISTENERS_PER_SUBSYSTEM 16
251 :
252 : struct nvmf_subsystem_state_change_ctx {
253 : struct spdk_nvmf_subsystem *subsystem;
254 : uint16_t nsid;
255 :
256 : enum spdk_nvmf_subsystem_state original_state;
257 : enum spdk_nvmf_subsystem_state requested_state;
258 : int status;
259 : struct spdk_thread *thread;
260 :
261 : spdk_nvmf_subsystem_state_change_done cb_fn;
262 : void *cb_arg;
263 : TAILQ_ENTRY(nvmf_subsystem_state_change_ctx) link;
264 : };
265 :
266 : struct spdk_nvmf_subsystem {
267 : struct spdk_thread *thread;
268 :
269 : uint32_t id;
270 :
271 : enum spdk_nvmf_subsystem_state state;
272 : enum spdk_nvmf_subtype subtype;
273 :
274 : uint16_t next_cntlid;
275 : struct {
276 : uint8_t allow_any_host : 1;
277 : uint8_t allow_any_listener : 1;
278 : uint8_t ana_reporting : 1;
279 : uint8_t reserved : 5;
280 : } flags;
281 :
282 : bool destroying;
283 : bool async_destroy;
284 :
285 : /* Zoned storage related fields */
286 : bool zone_append_supported;
287 : uint64_t max_zone_append_size_kib;
288 :
289 : struct spdk_nvmf_tgt *tgt;
290 : RB_ENTRY(spdk_nvmf_subsystem) link;
291 :
292 : /* Array of pointers to namespaces of size max_nsid indexed by nsid - 1 */
293 : struct spdk_nvmf_ns **ns;
294 : uint32_t max_nsid;
295 :
296 : uint16_t min_cntlid;
297 : uint16_t max_cntlid;
298 :
299 : uint64_t max_discard_size_kib;
300 : uint64_t max_write_zeroes_size_kib;
301 :
302 : TAILQ_HEAD(, spdk_nvmf_ctrlr) ctrlrs;
303 :
304 : /* A mutex used to protect the hosts list and allow_any_host flag. Unlike the namespace
305 : * array, this list is not used on the I/O path (it's needed for handling things like
306 : * the CONNECT command), so use a mutex to protect it instead of requiring the subsystem
307 : * state to be paused. This removes the requirement to pause the subsystem when hosts
308 : * are added or removed dynamically. */
309 : pthread_mutex_t mutex;
310 : TAILQ_HEAD(, spdk_nvmf_host) hosts;
311 : TAILQ_HEAD(, spdk_nvmf_subsystem_listener) listeners;
312 : struct spdk_bit_array *used_listener_ids;
313 :
314 : TAILQ_ENTRY(spdk_nvmf_subsystem) entries;
315 :
316 : nvmf_subsystem_destroy_cb async_destroy_cb;
317 : void *async_destroy_cb_arg;
318 :
319 : char sn[SPDK_NVME_CTRLR_SN_LEN + 1];
320 : char mn[SPDK_NVME_CTRLR_MN_LEN + 1];
321 : char subnqn[SPDK_NVMF_NQN_MAX_LEN + 1];
322 :
323 : /* Array of namespace count per ANA group of size max_nsid indexed anagrpid - 1
324 : * It will be enough for ANA group to use the same size as namespaces.
325 : */
326 : uint32_t *ana_group;
327 : /* Queue of a state change requests */
328 : TAILQ_HEAD(, nvmf_subsystem_state_change_ctx) state_changes;
329 : };
330 :
331 : static int
332 1 : subsystem_cmp(struct spdk_nvmf_subsystem *subsystem1, struct spdk_nvmf_subsystem *subsystem2)
333 : {
334 1 : return strncmp(subsystem1->subnqn, subsystem2->subnqn, sizeof(subsystem1->subnqn));
335 : }
336 :
337 142 : RB_GENERATE_STATIC(subsystem_tree, spdk_nvmf_subsystem, link, subsystem_cmp);
338 :
339 : int nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
340 : struct spdk_nvmf_subsystem *subsystem);
341 : int nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
342 : struct spdk_nvmf_subsystem *subsystem,
343 : spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
344 : void nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
345 : struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
346 : void nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
347 : struct spdk_nvmf_subsystem *subsystem,
348 : uint32_t nsid,
349 : spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
350 : void nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
351 : struct spdk_nvmf_subsystem *subsystem, spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg);
352 :
353 : void nvmf_update_discovery_log(struct spdk_nvmf_tgt *tgt, const char *hostnqn);
354 : void nvmf_get_discovery_log_page(struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
355 : uint32_t iovcnt, uint64_t offset, uint32_t length,
356 : struct spdk_nvme_transport_id *cmd_source_trid);
357 :
358 : void nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr);
359 : int nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req);
360 : int nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req);
361 : bool nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr);
362 : bool nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr);
363 : bool nvmf_ctrlr_copy_supported(struct spdk_nvmf_ctrlr *ctrlr);
364 : void nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid);
365 : bool nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req);
366 :
367 : void nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
368 : bool dif_insert_or_strip);
369 : int nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
370 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
371 : int nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
372 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
373 : int nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
374 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
375 : int nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
376 : struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req);
377 : int nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
378 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
379 : int nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
380 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
381 : int nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
382 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
383 : int nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
384 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
385 : int nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
386 : struct spdk_io_channel *ch, struct spdk_nvmf_request *req);
387 : bool nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
388 : struct spdk_dif_ctx *dif_ctx);
389 : bool nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev);
390 :
391 : int nvmf_subsystem_add_ctrlr(struct spdk_nvmf_subsystem *subsystem,
392 : struct spdk_nvmf_ctrlr *ctrlr);
393 : void nvmf_subsystem_remove_ctrlr(struct spdk_nvmf_subsystem *subsystem,
394 : struct spdk_nvmf_ctrlr *ctrlr);
395 : void nvmf_subsystem_remove_all_listeners(struct spdk_nvmf_subsystem *subsystem,
396 : bool stop);
397 : struct spdk_nvmf_ctrlr *nvmf_subsystem_get_ctrlr(struct spdk_nvmf_subsystem *subsystem,
398 : uint16_t cntlid);
399 : struct spdk_nvmf_subsystem_listener *nvmf_subsystem_find_listener(
400 : struct spdk_nvmf_subsystem *subsystem,
401 : const struct spdk_nvme_transport_id *trid);
402 : struct spdk_nvmf_listener *nvmf_transport_find_listener(
403 : struct spdk_nvmf_transport *transport,
404 : const struct spdk_nvme_transport_id *trid);
405 : void nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w,
406 : bool named);
407 : void nvmf_transport_listen_dump_trid(const struct spdk_nvme_transport_id *trid,
408 : struct spdk_json_write_ctx *w);
409 :
410 : /**
411 : * Sets the controller ID range for a subsystem.
412 : * Valid range is [1, 0xFFEF].
413 : *
414 : * May only be performed on subsystems in the INACTIVE state.
415 : *
416 : * \param subsystem Subsystem to modify.
417 : * \param min_cntlid Minimum controller ID.
418 : * \param max_cntlid Maximum controller ID.
419 : *
420 : * \return 0 on success, or negated errno value on failure.
421 : */
422 : int nvmf_subsystem_set_cntlid_range(struct spdk_nvmf_subsystem *subsystem,
423 : uint16_t min_cntlid, uint16_t max_cntlid);
424 :
425 : int nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr);
426 : int nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr);
427 : void nvmf_ctrlr_async_event_discovery_log_change_notice(void *ctx);
428 : void nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr);
429 :
430 : void nvmf_ns_reservation_request(void *ctx);
431 : void nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
432 : struct spdk_nvmf_ns *ns,
433 : enum spdk_nvme_reservation_notification_log_page_type type);
434 :
435 : bool nvmf_ns_is_ptpl_capable(const struct spdk_nvmf_ns *ns);
436 :
437 : /*
438 : * Abort zero-copy requests that already got the buffer (received zcopy_start cb), but haven't
439 : * started zcopy_end. These requests are kept on the outstanding queue, but are not waiting for a
440 : * completion from the bdev layer, so, when a qpair is being disconnected, we need to kick them to
441 : * force their completion.
442 : */
443 : void nvmf_qpair_abort_pending_zcopy_reqs(struct spdk_nvmf_qpair *qpair);
444 :
445 : /*
446 : * Free aer simply frees the rdma resources for the aer without informing the host.
447 : * This function should be called when deleting a qpair when one wants to make sure
448 : * the qpair is completely empty before freeing the request. The reason we free the
449 : * AER without sending a completion is to prevent the host from sending another AER.
450 : */
451 : void nvmf_qpair_free_aer(struct spdk_nvmf_qpair *qpair);
452 :
453 : int nvmf_ctrlr_abort_request(struct spdk_nvmf_request *req);
454 :
455 : void nvmf_ctrlr_set_fatal_status(struct spdk_nvmf_ctrlr *ctrlr);
456 :
457 : static inline struct spdk_nvmf_ns *
458 67 : _nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
459 : {
460 : /* NOTE: This implicitly also checks for 0, since 0 - 1 wraps around to UINT32_MAX. */
461 67 : if (spdk_unlikely(nsid - 1 >= subsystem->max_nsid)) {
462 17 : return NULL;
463 : }
464 :
465 50 : return subsystem->ns[nsid - 1];
466 : }
467 :
468 : static inline bool
469 314 : nvmf_qpair_is_admin_queue(struct spdk_nvmf_qpair *qpair)
470 : {
471 314 : return qpair->qid == 0;
472 : }
473 :
474 : static inline bool
475 19 : nvmf_request_is_fabric_connect(struct spdk_nvmf_request *req)
476 : {
477 35 : return req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC &&
478 16 : req->cmd->nvmf_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT;
479 : }
480 :
481 : /*
482 : * Tests whether a given string represents a valid NQN.
483 : */
484 : bool nvmf_nqn_is_valid(const char *nqn);
485 :
486 : /*
487 : * Tests whether a given NQN describes a discovery subsystem.
488 : */
489 : bool nvmf_nqn_is_discovery(const char *nqn);
490 :
491 : /**
492 : * Initiates a zcopy start operation
493 : *
494 : * \param bdev The \ref spdk_bdev
495 : * \param desc The \ref spdk_bdev_desc
496 : * \param ch The \ref spdk_io_channel
497 : * \param req The \ref spdk_nvmf_request passed to the bdev for processing
498 : *
499 : * \return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE if the command was completed immediately or
500 : * SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS if the command was submitted and will be
501 : * completed asynchronously. Asynchronous completions are notified through
502 : * spdk_nvmf_request_complete().
503 : */
504 : int nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
505 : struct spdk_bdev_desc *desc,
506 : struct spdk_io_channel *ch,
507 : struct spdk_nvmf_request *req);
508 :
509 : /**
510 : * Ends a zcopy operation
511 : *
512 : * \param req The NVMe-oF request
513 : * \param commit Flag indicating whether the buffers should be committed
514 : */
515 : void nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit);
516 :
517 : #endif /* __NVMF_INTERNAL_H__ */
|