Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2016 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4 : * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : */
6 :
7 : #include "spdk/stdinc.h"
8 :
9 : #include "spdk/bdev.h"
10 :
11 : #include "spdk/accel.h"
12 : #include "spdk/config.h"
13 : #include "spdk/env.h"
14 : #include "spdk/thread.h"
15 : #include "spdk/likely.h"
16 : #include "spdk/queue.h"
17 : #include "spdk/nvme_spec.h"
18 : #include "spdk/scsi_spec.h"
19 : #include "spdk/notify.h"
20 : #include "spdk/util.h"
21 : #include "spdk/trace.h"
22 : #include "spdk/dma.h"
23 :
24 : #include "spdk/bdev_module.h"
25 : #include "spdk/log.h"
26 : #include "spdk/string.h"
27 :
28 : #include "bdev_internal.h"
29 : #include "spdk_internal/trace_defs.h"
30 : #include "spdk_internal/assert.h"
31 :
32 : #ifdef SPDK_CONFIG_VTUNE
33 : #include "ittnotify.h"
34 : #include "ittnotify_types.h"
35 : int __itt_init_ittlib(const char *, __itt_group_id);
36 : #endif
37 :
38 : #define SPDK_BDEV_IO_POOL_SIZE (64 * 1024 - 1)
39 : #define SPDK_BDEV_IO_CACHE_SIZE 256
40 : #define SPDK_BDEV_AUTO_EXAMINE true
41 : #define BUF_SMALL_CACHE_SIZE 128
42 : #define BUF_LARGE_CACHE_SIZE 16
43 : #define NOMEM_THRESHOLD_COUNT 8
44 :
45 : #define SPDK_BDEV_QOS_TIMESLICE_IN_USEC 1000
46 : #define SPDK_BDEV_QOS_MIN_IO_PER_TIMESLICE 1
47 : #define SPDK_BDEV_QOS_MIN_BYTE_PER_TIMESLICE 512
48 : #define SPDK_BDEV_QOS_MIN_IOS_PER_SEC 1000
49 : #define SPDK_BDEV_QOS_MIN_BYTES_PER_SEC (1024 * 1024)
50 : #define SPDK_BDEV_QOS_MAX_MBYTES_PER_SEC (UINT64_MAX / (1024 * 1024))
51 : #define SPDK_BDEV_QOS_LIMIT_NOT_DEFINED UINT64_MAX
52 : #define SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC 1000
53 :
54 : /* The maximum number of children requests for a UNMAP or WRITE ZEROES command
55 : * when splitting into children requests at a time.
56 : */
57 : #define SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS (8)
58 : #define BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD 1000000
59 :
60 : /* The maximum number of children requests for a COPY command
61 : * when splitting into children requests at a time.
62 : */
63 : #define SPDK_BDEV_MAX_CHILDREN_COPY_REQS (8)
64 :
65 : #define LOG_ALREADY_CLAIMED_ERROR(detail, bdev) \
66 : log_already_claimed(SPDK_LOG_ERROR, __LINE__, __func__, detail, bdev)
67 : #ifdef DEBUG
68 : #define LOG_ALREADY_CLAIMED_DEBUG(detail, bdev) \
69 : log_already_claimed(SPDK_LOG_DEBUG, __LINE__, __func__, detail, bdev)
70 : #else
71 : #define LOG_ALREADY_CLAIMED_DEBUG(detail, bdev) do {} while(0)
72 : #endif
73 :
74 : static void log_already_claimed(enum spdk_log_level level, const int line, const char *func,
75 : const char *detail, struct spdk_bdev *bdev);
76 :
77 : static const char *qos_rpc_type[] = {"rw_ios_per_sec",
78 : "rw_mbytes_per_sec", "r_mbytes_per_sec", "w_mbytes_per_sec"
79 : };
80 :
81 : TAILQ_HEAD(spdk_bdev_list, spdk_bdev);
82 :
83 : RB_HEAD(bdev_name_tree, spdk_bdev_name);
84 :
85 : static int
86 568 : bdev_name_cmp(struct spdk_bdev_name *name1, struct spdk_bdev_name *name2)
87 : {
88 568 : return strcmp(name1->name, name2->name);
89 : }
90 :
91 1699 : RB_GENERATE_STATIC(bdev_name_tree, spdk_bdev_name, node, bdev_name_cmp);
92 :
93 : struct spdk_bdev_mgr {
94 : struct spdk_mempool *bdev_io_pool;
95 :
96 : void *zero_buffer;
97 :
98 : TAILQ_HEAD(bdev_module_list, spdk_bdev_module) bdev_modules;
99 :
100 : struct spdk_bdev_list bdevs;
101 : struct bdev_name_tree bdev_names;
102 :
103 : bool init_complete;
104 : bool module_init_complete;
105 :
106 : struct spdk_spinlock spinlock;
107 :
108 : TAILQ_HEAD(, spdk_bdev_open_async_ctx) async_bdev_opens;
109 :
110 : #ifdef SPDK_CONFIG_VTUNE
111 : __itt_domain *domain;
112 : #endif
113 : };
114 :
115 : static struct spdk_bdev_mgr g_bdev_mgr = {
116 : .bdev_modules = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.bdev_modules),
117 : .bdevs = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.bdevs),
118 : .bdev_names = RB_INITIALIZER(g_bdev_mgr.bdev_names),
119 : .init_complete = false,
120 : .module_init_complete = false,
121 : .async_bdev_opens = TAILQ_HEAD_INITIALIZER(g_bdev_mgr.async_bdev_opens),
122 : };
123 :
124 : static void
125 : __attribute__((constructor))
126 3 : _bdev_init(void)
127 : {
128 3 : spdk_spin_init(&g_bdev_mgr.spinlock);
129 3 : }
130 :
131 : typedef void (*lock_range_cb)(struct lba_range *range, void *ctx, int status);
132 :
133 : typedef void (*bdev_copy_bounce_buffer_cpl)(void *ctx, int rc);
134 :
135 : struct lba_range {
136 : struct spdk_bdev *bdev;
137 : uint64_t offset;
138 : uint64_t length;
139 : bool quiesce;
140 : void *locked_ctx;
141 : struct spdk_thread *owner_thread;
142 : struct spdk_bdev_channel *owner_ch;
143 : TAILQ_ENTRY(lba_range) tailq;
144 : TAILQ_ENTRY(lba_range) tailq_module;
145 : };
146 :
147 : static struct spdk_bdev_opts g_bdev_opts = {
148 : .bdev_io_pool_size = SPDK_BDEV_IO_POOL_SIZE,
149 : .bdev_io_cache_size = SPDK_BDEV_IO_CACHE_SIZE,
150 : .bdev_auto_examine = SPDK_BDEV_AUTO_EXAMINE,
151 : .iobuf_small_cache_size = BUF_SMALL_CACHE_SIZE,
152 : .iobuf_large_cache_size = BUF_LARGE_CACHE_SIZE,
153 : };
154 :
155 : static spdk_bdev_init_cb g_init_cb_fn = NULL;
156 : static void *g_init_cb_arg = NULL;
157 :
158 : static spdk_bdev_fini_cb g_fini_cb_fn = NULL;
159 : static void *g_fini_cb_arg = NULL;
160 : static struct spdk_thread *g_fini_thread = NULL;
161 :
162 : struct spdk_bdev_qos_limit {
163 : /** IOs or bytes allowed per second (i.e., 1s). */
164 : uint64_t limit;
165 :
166 : /** Remaining IOs or bytes allowed in current timeslice (e.g., 1ms).
167 : * For remaining bytes, allowed to run negative if an I/O is submitted when
168 : * some bytes are remaining, but the I/O is bigger than that amount. The
169 : * excess will be deducted from the next timeslice.
170 : */
171 : int64_t remaining_this_timeslice;
172 :
173 : /** Minimum allowed IOs or bytes to be issued in one timeslice (e.g., 1ms). */
174 : uint32_t min_per_timeslice;
175 :
176 : /** Maximum allowed IOs or bytes to be issued in one timeslice (e.g., 1ms). */
177 : uint32_t max_per_timeslice;
178 :
179 : /** Function to check whether to queue the IO.
180 : * If The IO is allowed to pass, the quota will be reduced correspondingly.
181 : */
182 : bool (*queue_io)(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io);
183 :
184 : /** Function to rewind the quota once the IO was allowed to be sent by this
185 : * limit but queued due to one of the further limits.
186 : */
187 : void (*rewind_quota)(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io);
188 : };
189 :
190 : struct spdk_bdev_qos {
191 : /** Types of structure of rate limits. */
192 : struct spdk_bdev_qos_limit rate_limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES];
193 :
194 : /** The channel that all I/O are funneled through. */
195 : struct spdk_bdev_channel *ch;
196 :
197 : /** The thread on which the poller is running. */
198 : struct spdk_thread *thread;
199 :
200 : /** Size of a timeslice in tsc ticks. */
201 : uint64_t timeslice_size;
202 :
203 : /** Timestamp of start of last timeslice. */
204 : uint64_t last_timeslice;
205 :
206 : /** Poller that processes queued I/O commands each time slice. */
207 : struct spdk_poller *poller;
208 : };
209 :
210 : struct spdk_bdev_mgmt_channel {
211 : /*
212 : * Each thread keeps a cache of bdev_io - this allows
213 : * bdev threads which are *not* DPDK threads to still
214 : * benefit from a per-thread bdev_io cache. Without
215 : * this, non-DPDK threads fetching from the mempool
216 : * incur a cmpxchg on get and put.
217 : */
218 : bdev_io_stailq_t per_thread_cache;
219 : uint32_t per_thread_cache_count;
220 : uint32_t bdev_io_cache_size;
221 :
222 : struct spdk_iobuf_channel iobuf;
223 :
224 : TAILQ_HEAD(, spdk_bdev_shared_resource) shared_resources;
225 : TAILQ_HEAD(, spdk_bdev_io_wait_entry) io_wait_queue;
226 : };
227 :
228 : /*
229 : * Per-module (or per-io_device) data. Multiple bdevs built on the same io_device
230 : * will queue here their IO that awaits retry. It makes it possible to retry sending
231 : * IO to one bdev after IO from other bdev completes.
232 : */
233 : struct spdk_bdev_shared_resource {
234 : /* The bdev management channel */
235 : struct spdk_bdev_mgmt_channel *mgmt_ch;
236 :
237 : /*
238 : * Count of I/O submitted to bdev module and waiting for completion.
239 : * Incremented before submit_request() is called on an spdk_bdev_io.
240 : */
241 : uint64_t io_outstanding;
242 :
243 : /*
244 : * Queue of IO awaiting retry because of a previous NOMEM status returned
245 : * on this channel.
246 : */
247 : bdev_io_tailq_t nomem_io;
248 :
249 : /*
250 : * Threshold which io_outstanding must drop to before retrying nomem_io.
251 : */
252 : uint64_t nomem_threshold;
253 :
254 : /* I/O channel allocated by a bdev module */
255 : struct spdk_io_channel *shared_ch;
256 :
257 : struct spdk_poller *nomem_poller;
258 :
259 : /* Refcount of bdev channels using this resource */
260 : uint32_t ref;
261 :
262 : TAILQ_ENTRY(spdk_bdev_shared_resource) link;
263 : };
264 :
265 : #define BDEV_CH_RESET_IN_PROGRESS (1 << 0)
266 : #define BDEV_CH_QOS_ENABLED (1 << 1)
267 :
268 : struct spdk_bdev_channel {
269 : struct spdk_bdev *bdev;
270 :
271 : /* The channel for the underlying device */
272 : struct spdk_io_channel *channel;
273 :
274 : /* Accel channel */
275 : struct spdk_io_channel *accel_channel;
276 :
277 : /* Per io_device per thread data */
278 : struct spdk_bdev_shared_resource *shared_resource;
279 :
280 : struct spdk_bdev_io_stat *stat;
281 :
282 : /*
283 : * Count of I/O submitted to the underlying dev module through this channel
284 : * and waiting for completion.
285 : */
286 : uint64_t io_outstanding;
287 :
288 : /*
289 : * List of all submitted I/Os including I/O that are generated via splitting.
290 : */
291 : bdev_io_tailq_t io_submitted;
292 :
293 : /*
294 : * List of spdk_bdev_io that are currently queued because they write to a locked
295 : * LBA range.
296 : */
297 : bdev_io_tailq_t io_locked;
298 :
299 : /* List of I/Os with accel sequence being currently executed */
300 : bdev_io_tailq_t io_accel_exec;
301 :
302 : /* List of I/Os doing memory domain pull/push */
303 : bdev_io_tailq_t io_memory_domain;
304 :
305 : uint32_t flags;
306 :
307 : /* Counts number of bdev_io in the io_submitted TAILQ */
308 : uint16_t queue_depth;
309 :
310 : uint16_t trace_id;
311 :
312 : struct spdk_histogram_data *histogram;
313 :
314 : #ifdef SPDK_CONFIG_VTUNE
315 : uint64_t start_tsc;
316 : uint64_t interval_tsc;
317 : __itt_string_handle *handle;
318 : struct spdk_bdev_io_stat *prev_stat;
319 : #endif
320 :
321 : lba_range_tailq_t locked_ranges;
322 :
323 : /** List of I/Os queued by QoS. */
324 : bdev_io_tailq_t qos_queued_io;
325 : };
326 :
327 : struct media_event_entry {
328 : struct spdk_bdev_media_event event;
329 : TAILQ_ENTRY(media_event_entry) tailq;
330 : };
331 :
332 : #define MEDIA_EVENT_POOL_SIZE 64
333 :
334 : struct spdk_bdev_desc {
335 : struct spdk_bdev *bdev;
336 : struct spdk_thread *thread;
337 : struct {
338 : spdk_bdev_event_cb_t event_fn;
339 : void *ctx;
340 : } callback;
341 : bool closed;
342 : bool write;
343 : bool memory_domains_supported;
344 : bool accel_sequence_supported[SPDK_BDEV_NUM_IO_TYPES];
345 : struct spdk_spinlock spinlock;
346 : uint32_t refs;
347 : TAILQ_HEAD(, media_event_entry) pending_media_events;
348 : TAILQ_HEAD(, media_event_entry) free_media_events;
349 : struct media_event_entry *media_events_buffer;
350 : TAILQ_ENTRY(spdk_bdev_desc) link;
351 :
352 : uint64_t timeout_in_sec;
353 : spdk_bdev_io_timeout_cb cb_fn;
354 : void *cb_arg;
355 : struct spdk_poller *io_timeout_poller;
356 : struct spdk_bdev_module_claim *claim;
357 : };
358 :
359 : struct spdk_bdev_iostat_ctx {
360 : struct spdk_bdev_io_stat *stat;
361 : enum spdk_bdev_reset_stat_mode reset_mode;
362 : spdk_bdev_get_device_stat_cb cb;
363 : void *cb_arg;
364 : };
365 :
366 : struct set_qos_limit_ctx {
367 : void (*cb_fn)(void *cb_arg, int status);
368 : void *cb_arg;
369 : struct spdk_bdev *bdev;
370 : };
371 :
372 : struct spdk_bdev_channel_iter {
373 : spdk_bdev_for_each_channel_msg fn;
374 : spdk_bdev_for_each_channel_done cpl;
375 : struct spdk_io_channel_iter *i;
376 : void *ctx;
377 : };
378 :
379 : struct spdk_bdev_io_error_stat {
380 : uint32_t error_status[-SPDK_MIN_BDEV_IO_STATUS];
381 : };
382 :
383 : enum bdev_io_retry_state {
384 : BDEV_IO_RETRY_STATE_INVALID,
385 : BDEV_IO_RETRY_STATE_PULL,
386 : BDEV_IO_RETRY_STATE_PULL_MD,
387 : BDEV_IO_RETRY_STATE_SUBMIT,
388 : BDEV_IO_RETRY_STATE_PUSH,
389 : BDEV_IO_RETRY_STATE_PUSH_MD,
390 : };
391 :
392 : #define __bdev_to_io_dev(bdev) (((char *)bdev) + 1)
393 : #define __bdev_from_io_dev(io_dev) ((struct spdk_bdev *)(((char *)io_dev) - 1))
394 : #define __io_ch_to_bdev_ch(io_ch) ((struct spdk_bdev_channel *)spdk_io_channel_get_ctx(io_ch))
395 : #define __io_ch_to_bdev_mgmt_ch(io_ch) ((struct spdk_bdev_mgmt_channel *)spdk_io_channel_get_ctx(io_ch))
396 :
397 : static inline void bdev_io_complete(void *ctx);
398 : static inline void bdev_io_complete_unsubmitted(struct spdk_bdev_io *bdev_io);
399 : static void bdev_io_push_bounce_md_buf(struct spdk_bdev_io *bdev_io);
400 : static void bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io);
401 :
402 : static void bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
403 : static int bdev_write_zero_buffer(struct spdk_bdev_io *bdev_io);
404 :
405 : static void bdev_enable_qos_msg(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
406 : struct spdk_io_channel *ch, void *_ctx);
407 : static void bdev_enable_qos_done(struct spdk_bdev *bdev, void *_ctx, int status);
408 :
409 : static int bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
410 : struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks,
411 : uint64_t num_blocks,
412 : struct spdk_memory_domain *domain, void *domain_ctx,
413 : struct spdk_accel_sequence *seq, uint32_t dif_check_flags,
414 : spdk_bdev_io_completion_cb cb, void *cb_arg);
415 : static int bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
416 : struct iovec *iov, int iovcnt, void *md_buf,
417 : uint64_t offset_blocks, uint64_t num_blocks,
418 : struct spdk_memory_domain *domain, void *domain_ctx,
419 : struct spdk_accel_sequence *seq, uint32_t dif_check_flags,
420 : uint32_t nvme_cdw12_raw, uint32_t nvme_cdw13_raw,
421 : spdk_bdev_io_completion_cb cb, void *cb_arg);
422 :
423 : static int bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
424 : uint64_t offset, uint64_t length,
425 : lock_range_cb cb_fn, void *cb_arg);
426 :
427 : static int bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
428 : uint64_t offset, uint64_t length,
429 : lock_range_cb cb_fn, void *cb_arg);
430 :
431 : static bool bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort);
432 : static bool bdev_abort_buf_io(struct spdk_bdev_mgmt_channel *ch, struct spdk_bdev_io *bio_to_abort);
433 :
434 : static bool claim_type_is_v2(enum spdk_bdev_claim_type type);
435 : static void bdev_desc_release_claims(struct spdk_bdev_desc *desc);
436 : static void claim_reset(struct spdk_bdev *bdev);
437 :
438 : static void bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch);
439 :
440 : static bool bdev_io_should_split(struct spdk_bdev_io *bdev_io);
441 :
442 : #define bdev_get_ext_io_opt(opts, field, defval) \
443 : ((opts) != NULL ? SPDK_GET_FIELD(opts, field, defval) : (defval))
444 :
445 : static inline void
446 671 : bdev_ch_add_to_io_submitted(struct spdk_bdev_io *bdev_io)
447 : {
448 671 : TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link);
449 671 : bdev_io->internal.ch->queue_depth++;
450 671 : }
451 :
452 : static inline void
453 671 : bdev_ch_remove_from_io_submitted(struct spdk_bdev_io *bdev_io)
454 : {
455 671 : TAILQ_REMOVE(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link);
456 671 : bdev_io->internal.ch->queue_depth--;
457 671 : }
458 :
459 : void
460 16 : spdk_bdev_get_opts(struct spdk_bdev_opts *opts, size_t opts_size)
461 : {
462 16 : if (!opts) {
463 0 : SPDK_ERRLOG("opts should not be NULL\n");
464 0 : return;
465 : }
466 :
467 16 : if (!opts_size) {
468 0 : SPDK_ERRLOG("opts_size should not be zero value\n");
469 0 : return;
470 : }
471 :
472 16 : opts->opts_size = opts_size;
473 :
474 : #define SET_FIELD(field) \
475 : if (offsetof(struct spdk_bdev_opts, field) + sizeof(opts->field) <= opts_size) { \
476 : opts->field = g_bdev_opts.field; \
477 : } \
478 :
479 16 : SET_FIELD(bdev_io_pool_size);
480 16 : SET_FIELD(bdev_io_cache_size);
481 16 : SET_FIELD(bdev_auto_examine);
482 16 : SET_FIELD(iobuf_small_cache_size);
483 16 : SET_FIELD(iobuf_large_cache_size);
484 :
485 : /* Do not remove this statement, you should always update this statement when you adding a new field,
486 : * and do not forget to add the SET_FIELD statement for your added field. */
487 : SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_opts) == 32, "Incorrect size");
488 :
489 : #undef SET_FIELD
490 : }
491 :
492 : int
493 17 : spdk_bdev_set_opts(struct spdk_bdev_opts *opts)
494 : {
495 : uint32_t min_pool_size;
496 :
497 17 : if (!opts) {
498 0 : SPDK_ERRLOG("opts cannot be NULL\n");
499 0 : return -1;
500 : }
501 :
502 17 : if (!opts->opts_size) {
503 1 : SPDK_ERRLOG("opts_size inside opts cannot be zero value\n");
504 1 : return -1;
505 : }
506 :
507 : /*
508 : * Add 1 to the thread count to account for the extra mgmt_ch that gets created during subsystem
509 : * initialization. A second mgmt_ch will be created on the same thread when the application starts
510 : * but before the deferred put_io_channel event is executed for the first mgmt_ch.
511 : */
512 16 : min_pool_size = opts->bdev_io_cache_size * (spdk_thread_get_count() + 1);
513 16 : if (opts->bdev_io_pool_size < min_pool_size) {
514 0 : SPDK_ERRLOG("bdev_io_pool_size %" PRIu32 " is not compatible with bdev_io_cache_size %" PRIu32
515 : " and %" PRIu32 " threads\n", opts->bdev_io_pool_size, opts->bdev_io_cache_size,
516 : spdk_thread_get_count());
517 0 : SPDK_ERRLOG("bdev_io_pool_size must be at least %" PRIu32 "\n", min_pool_size);
518 0 : return -1;
519 : }
520 :
521 : #define SET_FIELD(field) \
522 : if (offsetof(struct spdk_bdev_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
523 : g_bdev_opts.field = opts->field; \
524 : } \
525 :
526 16 : SET_FIELD(bdev_io_pool_size);
527 16 : SET_FIELD(bdev_io_cache_size);
528 16 : SET_FIELD(bdev_auto_examine);
529 16 : SET_FIELD(iobuf_small_cache_size);
530 16 : SET_FIELD(iobuf_large_cache_size);
531 :
532 16 : g_bdev_opts.opts_size = opts->opts_size;
533 :
534 : #undef SET_FIELD
535 :
536 16 : return 0;
537 : }
538 :
539 : static struct spdk_bdev *
540 153 : bdev_get_by_name(const char *bdev_name)
541 : {
542 153 : struct spdk_bdev_name find;
543 : struct spdk_bdev_name *res;
544 :
545 153 : find.name = (char *)bdev_name;
546 153 : res = RB_FIND(bdev_name_tree, &g_bdev_mgr.bdev_names, &find);
547 153 : if (res != NULL) {
548 146 : return res->bdev;
549 : }
550 :
551 7 : return NULL;
552 : }
553 :
554 : struct spdk_bdev *
555 19 : spdk_bdev_get_by_name(const char *bdev_name)
556 : {
557 : struct spdk_bdev *bdev;
558 :
559 19 : spdk_spin_lock(&g_bdev_mgr.spinlock);
560 19 : bdev = bdev_get_by_name(bdev_name);
561 19 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
562 :
563 19 : return bdev;
564 : }
565 :
566 : struct bdev_io_status_string {
567 : enum spdk_bdev_io_status status;
568 : const char *str;
569 : };
570 :
571 : static const struct bdev_io_status_string bdev_io_status_strings[] = {
572 : { SPDK_BDEV_IO_STATUS_AIO_ERROR, "aio_error" },
573 : { SPDK_BDEV_IO_STATUS_ABORTED, "aborted" },
574 : { SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED, "first_fused_failed" },
575 : { SPDK_BDEV_IO_STATUS_MISCOMPARE, "miscompare" },
576 : { SPDK_BDEV_IO_STATUS_NOMEM, "nomem" },
577 : { SPDK_BDEV_IO_STATUS_SCSI_ERROR, "scsi_error" },
578 : { SPDK_BDEV_IO_STATUS_NVME_ERROR, "nvme_error" },
579 : { SPDK_BDEV_IO_STATUS_FAILED, "failed" },
580 : { SPDK_BDEV_IO_STATUS_PENDING, "pending" },
581 : { SPDK_BDEV_IO_STATUS_SUCCESS, "success" },
582 : };
583 :
584 : static const char *
585 0 : bdev_io_status_get_string(enum spdk_bdev_io_status status)
586 : {
587 : uint32_t i;
588 :
589 0 : for (i = 0; i < SPDK_COUNTOF(bdev_io_status_strings); i++) {
590 0 : if (bdev_io_status_strings[i].status == status) {
591 0 : return bdev_io_status_strings[i].str;
592 : }
593 : }
594 :
595 0 : return "reserved";
596 : }
597 :
598 : struct spdk_bdev_wait_for_examine_ctx {
599 : struct spdk_poller *poller;
600 : spdk_bdev_wait_for_examine_cb cb_fn;
601 : void *cb_arg;
602 : };
603 :
604 : static bool bdev_module_all_actions_completed(void);
605 :
606 : static int
607 201 : bdev_wait_for_examine_cb(void *arg)
608 : {
609 201 : struct spdk_bdev_wait_for_examine_ctx *ctx = arg;
610 :
611 201 : if (!bdev_module_all_actions_completed()) {
612 0 : return SPDK_POLLER_IDLE;
613 : }
614 :
615 201 : spdk_poller_unregister(&ctx->poller);
616 201 : ctx->cb_fn(ctx->cb_arg);
617 201 : free(ctx);
618 :
619 201 : return SPDK_POLLER_BUSY;
620 : }
621 :
622 : int
623 201 : spdk_bdev_wait_for_examine(spdk_bdev_wait_for_examine_cb cb_fn, void *cb_arg)
624 : {
625 : struct spdk_bdev_wait_for_examine_ctx *ctx;
626 :
627 201 : ctx = calloc(1, sizeof(*ctx));
628 201 : if (ctx == NULL) {
629 0 : return -ENOMEM;
630 : }
631 201 : ctx->cb_fn = cb_fn;
632 201 : ctx->cb_arg = cb_arg;
633 201 : ctx->poller = SPDK_POLLER_REGISTER(bdev_wait_for_examine_cb, ctx, 0);
634 :
635 201 : return 0;
636 : }
637 :
638 : struct spdk_bdev_examine_item {
639 : char *name;
640 : TAILQ_ENTRY(spdk_bdev_examine_item) link;
641 : };
642 :
643 : TAILQ_HEAD(spdk_bdev_examine_allowlist, spdk_bdev_examine_item);
644 :
645 : struct spdk_bdev_examine_allowlist g_bdev_examine_allowlist = TAILQ_HEAD_INITIALIZER(
646 : g_bdev_examine_allowlist);
647 :
648 : static inline bool
649 20 : bdev_examine_allowlist_check(const char *name)
650 : {
651 : struct spdk_bdev_examine_item *item;
652 20 : TAILQ_FOREACH(item, &g_bdev_examine_allowlist, link) {
653 3 : if (strcmp(name, item->name) == 0) {
654 3 : return true;
655 : }
656 : }
657 17 : return false;
658 : }
659 :
660 : static inline void
661 254 : bdev_examine_allowlist_remove(const char *name)
662 : {
663 : struct spdk_bdev_examine_item *item;
664 254 : TAILQ_FOREACH(item, &g_bdev_examine_allowlist, link) {
665 3 : if (strcmp(name, item->name) == 0) {
666 3 : TAILQ_REMOVE(&g_bdev_examine_allowlist, item, link);
667 3 : free(item->name);
668 3 : free(item);
669 3 : break;
670 : }
671 : }
672 254 : }
673 :
674 : static inline void
675 68 : bdev_examine_allowlist_free(void)
676 : {
677 : struct spdk_bdev_examine_item *item;
678 68 : while (!TAILQ_EMPTY(&g_bdev_examine_allowlist)) {
679 0 : item = TAILQ_FIRST(&g_bdev_examine_allowlist);
680 0 : TAILQ_REMOVE(&g_bdev_examine_allowlist, item, link);
681 0 : free(item->name);
682 0 : free(item);
683 : }
684 68 : }
685 :
686 : static inline bool
687 10 : bdev_in_examine_allowlist(struct spdk_bdev *bdev)
688 : {
689 : struct spdk_bdev_alias *tmp;
690 10 : if (bdev_examine_allowlist_check(bdev->name)) {
691 3 : return true;
692 : }
693 14 : TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
694 7 : if (bdev_examine_allowlist_check(tmp->alias.name)) {
695 0 : return true;
696 : }
697 : }
698 7 : return false;
699 : }
700 :
701 : static inline bool
702 131 : bdev_ok_to_examine(struct spdk_bdev *bdev)
703 : {
704 : /* Some bdevs may not support the READ command.
705 : * Do not try to examine them.
706 : */
707 131 : if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_READ)) {
708 0 : return false;
709 : }
710 :
711 131 : if (g_bdev_opts.bdev_auto_examine) {
712 121 : return true;
713 : } else {
714 10 : return bdev_in_examine_allowlist(bdev);
715 : }
716 : }
717 :
718 : static void
719 131 : bdev_examine(struct spdk_bdev *bdev)
720 : {
721 : struct spdk_bdev_module *module;
722 : struct spdk_bdev_module_claim *claim, *tmpclaim;
723 : uint32_t action;
724 :
725 131 : if (!bdev_ok_to_examine(bdev)) {
726 7 : return;
727 : }
728 :
729 506 : TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
730 382 : if (module->examine_config) {
731 258 : spdk_spin_lock(&module->internal.spinlock);
732 258 : action = module->internal.action_in_progress;
733 258 : module->internal.action_in_progress++;
734 258 : spdk_spin_unlock(&module->internal.spinlock);
735 258 : module->examine_config(bdev);
736 258 : if (action != module->internal.action_in_progress) {
737 0 : SPDK_ERRLOG("examine_config for module %s did not call "
738 : "spdk_bdev_module_examine_done()\n", module->name);
739 : }
740 : }
741 : }
742 :
743 124 : spdk_spin_lock(&bdev->internal.spinlock);
744 :
745 124 : switch (bdev->internal.claim_type) {
746 116 : case SPDK_BDEV_CLAIM_NONE:
747 : /* Examine by all bdev modules */
748 466 : TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
749 350 : if (module->examine_disk) {
750 225 : spdk_spin_lock(&module->internal.spinlock);
751 225 : module->internal.action_in_progress++;
752 225 : spdk_spin_unlock(&module->internal.spinlock);
753 225 : spdk_spin_unlock(&bdev->internal.spinlock);
754 225 : module->examine_disk(bdev);
755 225 : spdk_spin_lock(&bdev->internal.spinlock);
756 : }
757 : }
758 116 : break;
759 1 : case SPDK_BDEV_CLAIM_EXCL_WRITE:
760 : /* Examine by the one bdev module with a v1 claim */
761 1 : module = bdev->internal.claim.v1.module;
762 1 : if (module->examine_disk) {
763 1 : spdk_spin_lock(&module->internal.spinlock);
764 1 : module->internal.action_in_progress++;
765 1 : spdk_spin_unlock(&module->internal.spinlock);
766 1 : spdk_spin_unlock(&bdev->internal.spinlock);
767 1 : module->examine_disk(bdev);
768 1 : return;
769 : }
770 0 : break;
771 7 : default:
772 : /* Examine by all bdev modules with a v2 claim */
773 7 : assert(claim_type_is_v2(bdev->internal.claim_type));
774 : /*
775 : * Removal of tailq nodes while iterating can cause the iteration to jump out of the
776 : * list, perhaps accessing freed memory. Without protection, this could happen
777 : * while the lock is dropped during the examine callback.
778 : */
779 7 : bdev->internal.examine_in_progress++;
780 :
781 16 : TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) {
782 9 : module = claim->module;
783 :
784 9 : if (module == NULL) {
785 : /* This is a vestigial claim, held by examine_count */
786 0 : continue;
787 : }
788 :
789 9 : if (module->examine_disk == NULL) {
790 0 : continue;
791 : }
792 :
793 9 : spdk_spin_lock(&module->internal.spinlock);
794 9 : module->internal.action_in_progress++;
795 9 : spdk_spin_unlock(&module->internal.spinlock);
796 :
797 : /* Call examine_disk without holding internal.spinlock. */
798 9 : spdk_spin_unlock(&bdev->internal.spinlock);
799 9 : module->examine_disk(bdev);
800 9 : spdk_spin_lock(&bdev->internal.spinlock);
801 : }
802 :
803 7 : assert(bdev->internal.examine_in_progress > 0);
804 7 : bdev->internal.examine_in_progress--;
805 7 : if (bdev->internal.examine_in_progress == 0) {
806 : /* Remove any claims that were released during examine_disk */
807 16 : TAILQ_FOREACH_SAFE(claim, &bdev->internal.claim.v2.claims, link, tmpclaim) {
808 9 : if (claim->desc != NULL) {
809 9 : continue;
810 : }
811 :
812 0 : TAILQ_REMOVE(&bdev->internal.claim.v2.claims, claim, link);
813 0 : free(claim);
814 : }
815 7 : if (TAILQ_EMPTY(&bdev->internal.claim.v2.claims)) {
816 0 : claim_reset(bdev);
817 : }
818 : }
819 : }
820 :
821 123 : spdk_spin_unlock(&bdev->internal.spinlock);
822 : }
823 :
824 : int
825 4 : spdk_bdev_examine(const char *name)
826 : {
827 : struct spdk_bdev *bdev;
828 : struct spdk_bdev_examine_item *item;
829 4 : struct spdk_thread *thread = spdk_get_thread();
830 :
831 4 : if (spdk_unlikely(!spdk_thread_is_app_thread(thread))) {
832 1 : SPDK_ERRLOG("Cannot examine bdev %s on thread %p (%s)\n", name, thread,
833 : thread ? spdk_thread_get_name(thread) : "null");
834 1 : return -EINVAL;
835 : }
836 :
837 3 : if (g_bdev_opts.bdev_auto_examine) {
838 0 : SPDK_ERRLOG("Manual examine is not allowed if auto examine is enabled\n");
839 0 : return -EINVAL;
840 : }
841 :
842 3 : if (bdev_examine_allowlist_check(name)) {
843 0 : SPDK_ERRLOG("Duplicate bdev name for manual examine: %s\n", name);
844 0 : return -EEXIST;
845 : }
846 :
847 3 : item = calloc(1, sizeof(*item));
848 3 : if (!item) {
849 0 : return -ENOMEM;
850 : }
851 3 : item->name = strdup(name);
852 3 : if (!item->name) {
853 0 : free(item);
854 0 : return -ENOMEM;
855 : }
856 3 : TAILQ_INSERT_TAIL(&g_bdev_examine_allowlist, item, link);
857 :
858 3 : bdev = spdk_bdev_get_by_name(name);
859 3 : if (bdev) {
860 3 : bdev_examine(bdev);
861 : }
862 3 : return 0;
863 : }
864 :
865 : static inline void
866 0 : bdev_examine_allowlist_config_json(struct spdk_json_write_ctx *w)
867 : {
868 : struct spdk_bdev_examine_item *item;
869 0 : TAILQ_FOREACH(item, &g_bdev_examine_allowlist, link) {
870 0 : spdk_json_write_object_begin(w);
871 0 : spdk_json_write_named_string(w, "method", "bdev_examine");
872 0 : spdk_json_write_named_object_begin(w, "params");
873 0 : spdk_json_write_named_string(w, "name", item->name);
874 0 : spdk_json_write_object_end(w);
875 0 : spdk_json_write_object_end(w);
876 : }
877 0 : }
878 :
879 : struct spdk_bdev *
880 1 : spdk_bdev_first(void)
881 : {
882 : struct spdk_bdev *bdev;
883 :
884 1 : bdev = TAILQ_FIRST(&g_bdev_mgr.bdevs);
885 1 : if (bdev) {
886 1 : SPDK_DEBUGLOG(bdev, "Starting bdev iteration at %s\n", bdev->name);
887 : }
888 :
889 1 : return bdev;
890 : }
891 :
892 : struct spdk_bdev *
893 8 : spdk_bdev_next(struct spdk_bdev *prev)
894 : {
895 : struct spdk_bdev *bdev;
896 :
897 8 : bdev = TAILQ_NEXT(prev, internal.link);
898 8 : if (bdev) {
899 7 : SPDK_DEBUGLOG(bdev, "Continuing bdev iteration at %s\n", bdev->name);
900 : }
901 :
902 8 : return bdev;
903 : }
904 :
905 : static struct spdk_bdev *
906 6 : _bdev_next_leaf(struct spdk_bdev *bdev)
907 : {
908 9 : while (bdev != NULL) {
909 8 : if (bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE) {
910 5 : return bdev;
911 : } else {
912 3 : bdev = TAILQ_NEXT(bdev, internal.link);
913 : }
914 : }
915 :
916 1 : return bdev;
917 : }
918 :
919 : struct spdk_bdev *
920 1 : spdk_bdev_first_leaf(void)
921 : {
922 : struct spdk_bdev *bdev;
923 :
924 1 : bdev = _bdev_next_leaf(TAILQ_FIRST(&g_bdev_mgr.bdevs));
925 :
926 1 : if (bdev) {
927 1 : SPDK_DEBUGLOG(bdev, "Starting bdev iteration at %s\n", bdev->name);
928 : }
929 :
930 1 : return bdev;
931 : }
932 :
933 : struct spdk_bdev *
934 5 : spdk_bdev_next_leaf(struct spdk_bdev *prev)
935 : {
936 : struct spdk_bdev *bdev;
937 :
938 5 : bdev = _bdev_next_leaf(TAILQ_NEXT(prev, internal.link));
939 :
940 5 : if (bdev) {
941 4 : SPDK_DEBUGLOG(bdev, "Continuing bdev iteration at %s\n", bdev->name);
942 : }
943 :
944 5 : return bdev;
945 : }
946 :
947 : static inline bool
948 816 : bdev_io_use_memory_domain(struct spdk_bdev_io *bdev_io)
949 : {
950 816 : return bdev_io->internal.f.has_memory_domain;
951 : }
952 :
953 : static inline bool
954 1555 : bdev_io_use_accel_sequence(struct spdk_bdev_io *bdev_io)
955 : {
956 1555 : return bdev_io->internal.f.has_accel_sequence;
957 : }
958 :
959 : static inline void
960 7 : bdev_queue_nomem_io_head(struct spdk_bdev_shared_resource *shared_resource,
961 : struct spdk_bdev_io *bdev_io, enum bdev_io_retry_state state)
962 : {
963 : /* Wait for some of the outstanding I/O to complete before we retry any of the nomem_io.
964 : * Normally we will wait for NOMEM_THRESHOLD_COUNT I/O to complete but for low queue depth
965 : * channels we will instead wait for half to complete.
966 : */
967 7 : shared_resource->nomem_threshold = spdk_max((int64_t)shared_resource->io_outstanding / 2,
968 : (int64_t)shared_resource->io_outstanding - NOMEM_THRESHOLD_COUNT);
969 :
970 7 : assert(state != BDEV_IO_RETRY_STATE_INVALID);
971 7 : bdev_io->internal.retry_state = state;
972 7 : TAILQ_INSERT_HEAD(&shared_resource->nomem_io, bdev_io, internal.link);
973 7 : }
974 :
975 : static inline void
976 43 : bdev_queue_nomem_io_tail(struct spdk_bdev_shared_resource *shared_resource,
977 : struct spdk_bdev_io *bdev_io, enum bdev_io_retry_state state)
978 : {
979 : /* We only queue IOs at the end of the nomem_io queue if they're submitted by the user while
980 : * the queue isn't empty, so we don't need to update the nomem_threshold here */
981 43 : assert(!TAILQ_EMPTY(&shared_resource->nomem_io));
982 :
983 43 : assert(state != BDEV_IO_RETRY_STATE_INVALID);
984 43 : bdev_io->internal.retry_state = state;
985 43 : TAILQ_INSERT_TAIL(&shared_resource->nomem_io, bdev_io, internal.link);
986 43 : }
987 :
988 : void
989 16 : spdk_bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len)
990 : {
991 : struct iovec *iovs;
992 :
993 16 : if (bdev_io->u.bdev.iovs == NULL) {
994 3 : bdev_io->u.bdev.iovs = &bdev_io->iov;
995 3 : bdev_io->u.bdev.iovcnt = 1;
996 : }
997 :
998 16 : iovs = bdev_io->u.bdev.iovs;
999 :
1000 16 : assert(iovs != NULL);
1001 16 : assert(bdev_io->u.bdev.iovcnt >= 1);
1002 :
1003 16 : iovs[0].iov_base = buf;
1004 16 : iovs[0].iov_len = len;
1005 16 : }
1006 :
1007 : void
1008 3 : spdk_bdev_io_set_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len)
1009 : {
1010 3 : assert((len / spdk_bdev_get_md_size(bdev_io->bdev)) >= bdev_io->u.bdev.num_blocks);
1011 3 : bdev_io->u.bdev.md_buf = md_buf;
1012 3 : }
1013 :
1014 : static bool
1015 167 : _is_buf_allocated(const struct iovec *iovs)
1016 : {
1017 167 : if (iovs == NULL) {
1018 6 : return false;
1019 : }
1020 :
1021 161 : return iovs[0].iov_base != NULL;
1022 : }
1023 :
1024 : static bool
1025 50 : _are_iovs_aligned(struct iovec *iovs, int iovcnt, uint32_t alignment)
1026 : {
1027 : int i;
1028 : uintptr_t iov_base;
1029 :
1030 50 : if (spdk_likely(alignment == 1)) {
1031 21 : return true;
1032 : }
1033 :
1034 36 : for (i = 0; i < iovcnt; i++) {
1035 29 : iov_base = (uintptr_t)iovs[i].iov_base;
1036 29 : if ((iov_base & (alignment - 1)) != 0) {
1037 22 : return false;
1038 : }
1039 : }
1040 :
1041 7 : return true;
1042 : }
1043 :
1044 : static inline bool
1045 856 : bdev_io_needs_sequence_exec(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io)
1046 : {
1047 856 : if (!bdev_io_use_accel_sequence(bdev_io)) {
1048 856 : return false;
1049 : }
1050 :
1051 : /* For now, we don't allow splitting IOs with an accel sequence and will treat them as if
1052 : * bdev module didn't support accel sequences */
1053 0 : return !desc->accel_sequence_supported[bdev_io->type] || bdev_io->internal.f.split;
1054 : }
1055 :
1056 : static inline void
1057 592 : bdev_io_increment_outstanding(struct spdk_bdev_channel *bdev_ch,
1058 : struct spdk_bdev_shared_resource *shared_resource)
1059 : {
1060 592 : bdev_ch->io_outstanding++;
1061 592 : shared_resource->io_outstanding++;
1062 592 : }
1063 :
1064 : static inline void
1065 592 : bdev_io_decrement_outstanding(struct spdk_bdev_channel *bdev_ch,
1066 : struct spdk_bdev_shared_resource *shared_resource)
1067 : {
1068 592 : assert(bdev_ch->io_outstanding > 0);
1069 592 : assert(shared_resource->io_outstanding > 0);
1070 592 : bdev_ch->io_outstanding--;
1071 592 : shared_resource->io_outstanding--;
1072 592 : }
1073 :
1074 : static void
1075 0 : bdev_io_submit_sequence_cb(void *ctx, int status)
1076 : {
1077 0 : struct spdk_bdev_io *bdev_io = ctx;
1078 :
1079 0 : assert(bdev_io_use_accel_sequence(bdev_io));
1080 :
1081 0 : bdev_io->u.bdev.accel_sequence = NULL;
1082 0 : bdev_io->internal.f.has_accel_sequence = false;
1083 :
1084 0 : if (spdk_unlikely(status != 0)) {
1085 0 : SPDK_ERRLOG("Failed to execute accel sequence, status=%d\n", status);
1086 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1087 0 : bdev_io_complete_unsubmitted(bdev_io);
1088 0 : return;
1089 : }
1090 :
1091 0 : bdev_io_submit(bdev_io);
1092 : }
1093 :
1094 : static void
1095 0 : bdev_io_exec_sequence_cb(void *ctx, int status)
1096 : {
1097 0 : struct spdk_bdev_io *bdev_io = ctx;
1098 0 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1099 :
1100 0 : TAILQ_REMOVE(&bdev_io->internal.ch->io_accel_exec, bdev_io, internal.link);
1101 0 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1102 :
1103 0 : if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1104 0 : bdev_ch_retry_io(ch);
1105 : }
1106 :
1107 0 : bdev_io->internal.data_transfer_cpl(bdev_io, status);
1108 0 : }
1109 :
1110 : static void
1111 0 : bdev_io_exec_sequence(struct spdk_bdev_io *bdev_io, void (*cb_fn)(void *ctx, int status))
1112 : {
1113 0 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1114 :
1115 0 : assert(bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io));
1116 0 : assert(bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE || bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
1117 0 : assert(bdev_io_use_accel_sequence(bdev_io));
1118 :
1119 : /* Since the operations are appended during submission, they're in the opposite order than
1120 : * how we want to execute them for reads (i.e. we need to execute the most recently added
1121 : * operation first), so reverse the sequence before executing it.
1122 : */
1123 0 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
1124 0 : spdk_accel_sequence_reverse(bdev_io->internal.accel_sequence);
1125 : }
1126 :
1127 0 : TAILQ_INSERT_TAIL(&bdev_io->internal.ch->io_accel_exec, bdev_io, internal.link);
1128 0 : bdev_io_increment_outstanding(ch, ch->shared_resource);
1129 0 : bdev_io->internal.data_transfer_cpl = cb_fn;
1130 :
1131 0 : spdk_accel_sequence_finish(bdev_io->internal.accel_sequence,
1132 : bdev_io_exec_sequence_cb, bdev_io);
1133 0 : }
1134 :
1135 : static void
1136 42 : bdev_io_get_buf_complete(struct spdk_bdev_io *bdev_io, bool status)
1137 : {
1138 42 : struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
1139 : void *buf;
1140 :
1141 42 : if (spdk_unlikely(bdev_io->internal.get_aux_buf_cb != NULL)) {
1142 0 : buf = bdev_io->internal.buf.ptr;
1143 0 : bdev_io->internal.buf.ptr = NULL;
1144 0 : bdev_io->internal.f.has_buf = false;
1145 0 : bdev_io->internal.get_aux_buf_cb(ch, bdev_io, buf);
1146 0 : bdev_io->internal.get_aux_buf_cb = NULL;
1147 : } else {
1148 42 : assert(bdev_io->internal.get_buf_cb != NULL);
1149 42 : bdev_io->internal.get_buf_cb(ch, bdev_io, status);
1150 42 : bdev_io->internal.get_buf_cb = NULL;
1151 : }
1152 42 : }
1153 :
1154 : static void
1155 4 : _bdev_io_pull_buffer_cpl(void *ctx, int rc)
1156 : {
1157 4 : struct spdk_bdev_io *bdev_io = ctx;
1158 :
1159 4 : if (rc) {
1160 0 : SPDK_ERRLOG("Set bounce buffer failed with rc %d\n", rc);
1161 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1162 : }
1163 4 : bdev_io_get_buf_complete(bdev_io, !rc);
1164 4 : }
1165 :
1166 : static void
1167 2 : bdev_io_pull_md_buf_done(void *ctx, int status)
1168 : {
1169 2 : struct spdk_bdev_io *bdev_io = ctx;
1170 2 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1171 :
1172 2 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1173 2 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1174 :
1175 2 : if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1176 0 : bdev_ch_retry_io(ch);
1177 : }
1178 :
1179 2 : assert(bdev_io->internal.data_transfer_cpl);
1180 2 : bdev_io->internal.data_transfer_cpl(bdev_io, status);
1181 2 : }
1182 :
1183 : static void
1184 4 : bdev_io_pull_md_buf(struct spdk_bdev_io *bdev_io)
1185 : {
1186 4 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1187 4 : int rc = 0;
1188 :
1189 4 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1190 2 : assert(bdev_io->internal.f.has_bounce_buf);
1191 2 : if (bdev_io_use_memory_domain(bdev_io)) {
1192 2 : TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
1193 2 : bdev_io_increment_outstanding(ch, ch->shared_resource);
1194 2 : rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
1195 : bdev_io->internal.memory_domain_ctx,
1196 : &bdev_io->internal.bounce_buf.orig_md_iov, 1,
1197 : &bdev_io->internal.bounce_buf.md_iov, 1,
1198 : bdev_io_pull_md_buf_done, bdev_io);
1199 2 : if (rc == 0) {
1200 : /* Continue to submit IO in completion callback */
1201 2 : return;
1202 : }
1203 0 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1204 0 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1205 0 : if (rc != -ENOMEM) {
1206 0 : SPDK_ERRLOG("Failed to pull data from memory domain %s, rc %d\n",
1207 : spdk_memory_domain_get_dma_device_id(
1208 : bdev_io->internal.memory_domain), rc);
1209 : }
1210 : } else {
1211 0 : memcpy(bdev_io->internal.bounce_buf.md_iov.iov_base,
1212 0 : bdev_io->internal.bounce_buf.orig_md_iov.iov_base,
1213 : bdev_io->internal.bounce_buf.orig_md_iov.iov_len);
1214 : }
1215 : }
1216 :
1217 2 : if (spdk_unlikely(rc == -ENOMEM)) {
1218 0 : bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PULL_MD);
1219 : } else {
1220 2 : assert(bdev_io->internal.data_transfer_cpl);
1221 2 : bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1222 : }
1223 : }
1224 :
1225 : static void
1226 4 : _bdev_io_pull_bounce_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len)
1227 : {
1228 4 : assert(bdev_io->internal.f.has_bounce_buf);
1229 :
1230 : /* save original md_buf */
1231 4 : bdev_io->internal.bounce_buf.orig_md_iov.iov_base = bdev_io->u.bdev.md_buf;
1232 4 : bdev_io->internal.bounce_buf.orig_md_iov.iov_len = len;
1233 4 : bdev_io->internal.bounce_buf.md_iov.iov_base = md_buf;
1234 4 : bdev_io->internal.bounce_buf.md_iov.iov_len = len;
1235 : /* set bounce md_buf */
1236 4 : bdev_io->u.bdev.md_buf = md_buf;
1237 :
1238 4 : bdev_io_pull_md_buf(bdev_io);
1239 4 : }
1240 :
1241 : static void
1242 42 : _bdev_io_set_md_buf(struct spdk_bdev_io *bdev_io)
1243 : {
1244 42 : struct spdk_bdev *bdev = bdev_io->bdev;
1245 : uint64_t md_len;
1246 : void *buf;
1247 :
1248 42 : if (spdk_bdev_is_md_separate(bdev)) {
1249 7 : assert(!bdev_io_use_accel_sequence(bdev_io));
1250 :
1251 7 : buf = (char *)bdev_io->u.bdev.iovs[0].iov_base + bdev_io->u.bdev.iovs[0].iov_len;
1252 7 : md_len = bdev_io->u.bdev.num_blocks * bdev->md_len;
1253 :
1254 7 : assert(((uintptr_t)buf & (spdk_bdev_get_buf_align(bdev) - 1)) == 0);
1255 :
1256 7 : if (bdev_io->u.bdev.md_buf != NULL) {
1257 4 : _bdev_io_pull_bounce_md_buf(bdev_io, buf, md_len);
1258 4 : return;
1259 : } else {
1260 3 : spdk_bdev_io_set_md_buf(bdev_io, buf, md_len);
1261 : }
1262 : }
1263 :
1264 38 : bdev_io_get_buf_complete(bdev_io, true);
1265 : }
1266 :
1267 : static inline void
1268 26 : bdev_io_pull_data_done(struct spdk_bdev_io *bdev_io, int rc)
1269 : {
1270 26 : if (rc) {
1271 0 : SPDK_ERRLOG("Failed to get data buffer\n");
1272 0 : assert(bdev_io->internal.data_transfer_cpl);
1273 0 : bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1274 0 : return;
1275 : }
1276 :
1277 26 : _bdev_io_set_md_buf(bdev_io);
1278 : }
1279 :
1280 : static void
1281 2 : bdev_io_pull_data_done_and_track(void *ctx, int status)
1282 : {
1283 2 : struct spdk_bdev_io *bdev_io = ctx;
1284 2 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1285 :
1286 2 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1287 2 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1288 :
1289 2 : if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1290 0 : bdev_ch_retry_io(ch);
1291 : }
1292 :
1293 2 : bdev_io_pull_data_done(bdev_io, status);
1294 2 : }
1295 :
1296 : static void
1297 27 : bdev_io_pull_data(struct spdk_bdev_io *bdev_io)
1298 : {
1299 27 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1300 27 : int rc = 0;
1301 :
1302 : /* If we need to exec an accel sequence or the IO uses a memory domain buffer and has a
1303 : * sequence, append a copy operation making accel change the src/dst buffers of the previous
1304 : * operation */
1305 54 : if (bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io) ||
1306 27 : (bdev_io_use_accel_sequence(bdev_io) && bdev_io_use_memory_domain(bdev_io))) {
1307 0 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1308 0 : assert(bdev_io_use_accel_sequence(bdev_io));
1309 0 : assert(bdev_io->internal.f.has_bounce_buf);
1310 0 : rc = spdk_accel_append_copy(&bdev_io->internal.accel_sequence, ch->accel_channel,
1311 0 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
1312 : NULL, NULL,
1313 : bdev_io->internal.bounce_buf.orig_iovs,
1314 0 : bdev_io->internal.bounce_buf.orig_iovcnt,
1315 0 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain : NULL,
1316 0 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain_ctx : NULL,
1317 : NULL, NULL);
1318 : } else {
1319 : /* We need to reverse the src/dst for reads */
1320 0 : assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
1321 0 : assert(bdev_io_use_accel_sequence(bdev_io));
1322 0 : assert(bdev_io->internal.f.has_bounce_buf);
1323 0 : rc = spdk_accel_append_copy(&bdev_io->internal.accel_sequence, ch->accel_channel,
1324 : bdev_io->internal.bounce_buf.orig_iovs,
1325 0 : bdev_io->internal.bounce_buf.orig_iovcnt,
1326 0 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain : NULL,
1327 0 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain_ctx : NULL,
1328 0 : bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
1329 : NULL, NULL, NULL, NULL);
1330 : }
1331 :
1332 0 : if (spdk_unlikely(rc != 0 && rc != -ENOMEM)) {
1333 0 : SPDK_ERRLOG("Failed to append copy to accel sequence: %p\n",
1334 : bdev_io->internal.accel_sequence);
1335 : }
1336 27 : } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1337 : /* if this is write path, copy data from original buffer to bounce buffer */
1338 17 : if (bdev_io_use_memory_domain(bdev_io)) {
1339 3 : assert(bdev_io->internal.f.has_bounce_buf);
1340 3 : TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
1341 3 : bdev_io_increment_outstanding(ch, ch->shared_resource);
1342 3 : rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
1343 : bdev_io->internal.memory_domain_ctx,
1344 : bdev_io->internal.bounce_buf.orig_iovs,
1345 3 : (uint32_t)bdev_io->internal.bounce_buf.orig_iovcnt,
1346 : bdev_io->u.bdev.iovs, 1,
1347 : bdev_io_pull_data_done_and_track,
1348 : bdev_io);
1349 3 : if (rc == 0) {
1350 : /* Continue to submit IO in completion callback */
1351 2 : return;
1352 : }
1353 1 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1354 1 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1355 1 : if (rc != -ENOMEM) {
1356 0 : SPDK_ERRLOG("Failed to pull data from memory domain %s\n",
1357 : spdk_memory_domain_get_dma_device_id(
1358 : bdev_io->internal.memory_domain));
1359 : }
1360 : } else {
1361 14 : assert(bdev_io->u.bdev.iovcnt == 1);
1362 14 : assert(bdev_io->internal.f.has_bounce_buf);
1363 14 : spdk_copy_iovs_to_buf(bdev_io->u.bdev.iovs[0].iov_base,
1364 14 : bdev_io->u.bdev.iovs[0].iov_len,
1365 : bdev_io->internal.bounce_buf.orig_iovs,
1366 : bdev_io->internal.bounce_buf.orig_iovcnt);
1367 : }
1368 : }
1369 :
1370 25 : if (spdk_unlikely(rc == -ENOMEM)) {
1371 1 : bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PULL);
1372 : } else {
1373 24 : bdev_io_pull_data_done(bdev_io, rc);
1374 : }
1375 : }
1376 :
1377 : static void
1378 26 : _bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len,
1379 : bdev_copy_bounce_buffer_cpl cpl_cb)
1380 : {
1381 26 : struct spdk_bdev_shared_resource *shared_resource = bdev_io->internal.ch->shared_resource;
1382 :
1383 26 : assert(bdev_io->internal.f.has_bounce_buf == false);
1384 :
1385 26 : bdev_io->internal.data_transfer_cpl = cpl_cb;
1386 26 : bdev_io->internal.f.has_bounce_buf = true;
1387 : /* save original iovec */
1388 26 : bdev_io->internal.bounce_buf.orig_iovs = bdev_io->u.bdev.iovs;
1389 26 : bdev_io->internal.bounce_buf.orig_iovcnt = bdev_io->u.bdev.iovcnt;
1390 : /* zero the other data members */
1391 26 : bdev_io->internal.bounce_buf.iov.iov_base = NULL;
1392 26 : bdev_io->internal.bounce_buf.md_iov.iov_base = NULL;
1393 26 : bdev_io->internal.bounce_buf.orig_md_iov.iov_base = NULL;
1394 : /* set bounce iov */
1395 26 : bdev_io->u.bdev.iovs = &bdev_io->internal.bounce_buf.iov;
1396 26 : bdev_io->u.bdev.iovcnt = 1;
1397 : /* set bounce buffer for this operation */
1398 26 : bdev_io->u.bdev.iovs[0].iov_base = buf;
1399 26 : bdev_io->u.bdev.iovs[0].iov_len = len;
1400 : /* Now we use 1 iov, the split condition could have been changed */
1401 26 : bdev_io->internal.f.split = bdev_io_should_split(bdev_io);
1402 :
1403 26 : if (spdk_unlikely(!TAILQ_EMPTY(&shared_resource->nomem_io))) {
1404 0 : bdev_queue_nomem_io_tail(shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PULL);
1405 : } else {
1406 26 : bdev_io_pull_data(bdev_io);
1407 : }
1408 26 : }
1409 :
1410 : static void
1411 42 : _bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, uint64_t len)
1412 : {
1413 42 : struct spdk_bdev *bdev = bdev_io->bdev;
1414 : bool buf_allocated;
1415 : uint64_t alignment;
1416 : void *aligned_buf;
1417 :
1418 42 : bdev_io->internal.buf.ptr = buf;
1419 42 : bdev_io->internal.f.has_buf = true;
1420 :
1421 42 : if (spdk_unlikely(bdev_io->internal.get_aux_buf_cb != NULL)) {
1422 0 : bdev_io_get_buf_complete(bdev_io, true);
1423 0 : return;
1424 : }
1425 :
1426 42 : alignment = spdk_bdev_get_buf_align(bdev);
1427 42 : buf_allocated = _is_buf_allocated(bdev_io->u.bdev.iovs);
1428 42 : aligned_buf = (void *)(((uintptr_t)buf + (alignment - 1)) & ~(alignment - 1));
1429 :
1430 42 : if (buf_allocated) {
1431 26 : _bdev_io_pull_bounce_data_buf(bdev_io, aligned_buf, len, _bdev_io_pull_buffer_cpl);
1432 : /* Continue in completion callback */
1433 26 : return;
1434 : } else {
1435 16 : spdk_bdev_io_set_buf(bdev_io, aligned_buf, len);
1436 : }
1437 :
1438 16 : _bdev_io_set_md_buf(bdev_io);
1439 : }
1440 :
1441 : static inline uint64_t
1442 84 : bdev_io_get_max_buf_len(struct spdk_bdev_io *bdev_io, uint64_t len)
1443 : {
1444 84 : struct spdk_bdev *bdev = bdev_io->bdev;
1445 : uint64_t md_len, alignment;
1446 :
1447 84 : md_len = spdk_bdev_is_md_separate(bdev) ? bdev_io->u.bdev.num_blocks * bdev->md_len : 0;
1448 :
1449 : /* 1 byte alignment needs 0 byte of extra space, 64 bytes alignment needs 63 bytes of extra space, etc. */
1450 84 : alignment = spdk_bdev_get_buf_align(bdev) - 1;
1451 :
1452 84 : return len + alignment + md_len;
1453 : }
1454 :
1455 : static void
1456 42 : _bdev_io_put_buf(struct spdk_bdev_io *bdev_io, void *buf, uint64_t buf_len)
1457 : {
1458 : struct spdk_bdev_mgmt_channel *ch;
1459 :
1460 42 : ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
1461 42 : spdk_iobuf_put(&ch->iobuf, buf, bdev_io_get_max_buf_len(bdev_io, buf_len));
1462 42 : }
1463 :
1464 : static void
1465 42 : bdev_io_put_buf(struct spdk_bdev_io *bdev_io)
1466 : {
1467 42 : assert(bdev_io->internal.f.has_buf);
1468 42 : _bdev_io_put_buf(bdev_io, bdev_io->internal.buf.ptr, bdev_io->internal.buf.len);
1469 42 : bdev_io->internal.buf.ptr = NULL;
1470 42 : bdev_io->internal.f.has_buf = false;
1471 42 : }
1472 :
1473 3 : SPDK_LOG_DEPRECATION_REGISTER(spdk_bdev_io_put_aux_buf,
1474 : "spdk_bdev_io_put_aux_buf is deprecated", "v25.01", 0);
1475 :
1476 : void
1477 0 : spdk_bdev_io_put_aux_buf(struct spdk_bdev_io *bdev_io, void *buf)
1478 : {
1479 0 : uint64_t len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
1480 :
1481 0 : SPDK_LOG_DEPRECATED(spdk_bdev_io_put_aux_buf);
1482 :
1483 0 : assert(buf != NULL);
1484 0 : _bdev_io_put_buf(bdev_io, buf, len);
1485 0 : }
1486 :
1487 : static inline void
1488 549 : bdev_submit_request(struct spdk_bdev *bdev, struct spdk_io_channel *ioch,
1489 : struct spdk_bdev_io *bdev_io)
1490 : {
1491 : /* After a request is submitted to a bdev module, the ownership of an accel sequence
1492 : * associated with that bdev_io is transferred to the bdev module. So, clear the internal
1493 : * sequence pointer to make sure we won't touch it anymore. */
1494 549 : if ((bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE ||
1495 549 : bdev_io->type == SPDK_BDEV_IO_TYPE_READ) && bdev_io->u.bdev.accel_sequence != NULL) {
1496 0 : assert(!bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io));
1497 0 : bdev_io->internal.f.has_accel_sequence = false;
1498 : }
1499 :
1500 549 : bdev->fn_table->submit_request(ioch, bdev_io);
1501 549 : }
1502 :
1503 : static inline void
1504 10 : bdev_ch_resubmit_io(struct spdk_bdev_shared_resource *shared_resource, struct spdk_bdev_io *bdev_io)
1505 : {
1506 10 : struct spdk_bdev *bdev = bdev_io->bdev;
1507 :
1508 10 : bdev_io_increment_outstanding(bdev_io->internal.ch, shared_resource);
1509 10 : bdev_io->internal.error.nvme.cdw0 = 0;
1510 10 : bdev_io->num_retries++;
1511 10 : bdev_submit_request(bdev, spdk_bdev_io_get_io_channel(bdev_io), bdev_io);
1512 10 : }
1513 :
1514 : static void
1515 63 : bdev_shared_ch_retry_io(struct spdk_bdev_shared_resource *shared_resource)
1516 : {
1517 : struct spdk_bdev_io *bdev_io;
1518 :
1519 63 : if (shared_resource->io_outstanding > shared_resource->nomem_threshold) {
1520 : /*
1521 : * Allow some more I/O to complete before retrying the nomem_io queue.
1522 : * Some drivers (such as nvme) cannot immediately take a new I/O in
1523 : * the context of a completion, because the resources for the I/O are
1524 : * not released until control returns to the bdev poller. Also, we
1525 : * may require several small I/O to complete before a larger I/O
1526 : * (that requires splitting) can be submitted.
1527 : */
1528 58 : return;
1529 : }
1530 :
1531 16 : while (!TAILQ_EMPTY(&shared_resource->nomem_io)) {
1532 12 : bdev_io = TAILQ_FIRST(&shared_resource->nomem_io);
1533 12 : TAILQ_REMOVE(&shared_resource->nomem_io, bdev_io, internal.link);
1534 :
1535 12 : switch (bdev_io->internal.retry_state) {
1536 10 : case BDEV_IO_RETRY_STATE_SUBMIT:
1537 10 : bdev_ch_resubmit_io(shared_resource, bdev_io);
1538 10 : break;
1539 1 : case BDEV_IO_RETRY_STATE_PULL:
1540 1 : bdev_io_pull_data(bdev_io);
1541 1 : break;
1542 0 : case BDEV_IO_RETRY_STATE_PULL_MD:
1543 0 : bdev_io_pull_md_buf(bdev_io);
1544 0 : break;
1545 1 : case BDEV_IO_RETRY_STATE_PUSH:
1546 1 : bdev_io_push_bounce_data(bdev_io);
1547 1 : break;
1548 0 : case BDEV_IO_RETRY_STATE_PUSH_MD:
1549 0 : bdev_io_push_bounce_md_buf(bdev_io);
1550 0 : break;
1551 0 : default:
1552 0 : assert(0 && "invalid retry state");
1553 : break;
1554 : }
1555 :
1556 12 : if (bdev_io == TAILQ_FIRST(&shared_resource->nomem_io)) {
1557 : /* This IO completed again with NOMEM status, so break the loop and
1558 : * don't try anymore. Note that a bdev_io that fails with NOMEM
1559 : * always gets requeued at the front of the list, to maintain
1560 : * ordering.
1561 : */
1562 1 : break;
1563 : }
1564 : }
1565 : }
1566 :
1567 : static void
1568 63 : bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch)
1569 : {
1570 63 : bdev_shared_ch_retry_io(bdev_ch->shared_resource);
1571 63 : }
1572 :
1573 : static int
1574 0 : bdev_no_mem_poller(void *ctx)
1575 : {
1576 0 : struct spdk_bdev_shared_resource *shared_resource = ctx;
1577 :
1578 0 : spdk_poller_unregister(&shared_resource->nomem_poller);
1579 :
1580 0 : if (!TAILQ_EMPTY(&shared_resource->nomem_io)) {
1581 0 : bdev_shared_ch_retry_io(shared_resource);
1582 : }
1583 : /* the retry cb may re-register the poller so double check */
1584 0 : if (!TAILQ_EMPTY(&shared_resource->nomem_io) &&
1585 0 : shared_resource->io_outstanding == 0 && shared_resource->nomem_poller == NULL) {
1586 : /* No IOs were submitted, try again */
1587 0 : shared_resource->nomem_poller = SPDK_POLLER_REGISTER(bdev_no_mem_poller, shared_resource,
1588 : SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC * 10);
1589 : }
1590 :
1591 0 : return SPDK_POLLER_BUSY;
1592 : }
1593 :
1594 : static inline bool
1595 556 : _bdev_io_handle_no_mem(struct spdk_bdev_io *bdev_io, enum bdev_io_retry_state state)
1596 : {
1597 556 : struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
1598 556 : struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
1599 :
1600 556 : if (spdk_unlikely(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM)) {
1601 5 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
1602 5 : bdev_queue_nomem_io_head(shared_resource, bdev_io, state);
1603 :
1604 5 : if (shared_resource->io_outstanding == 0 && !shared_resource->nomem_poller) {
1605 : /* Special case when we have nomem IOs and no outstanding IOs which completions
1606 : * could trigger retry of queued IOs
1607 : * Any IOs submitted may trigger retry of queued IOs. This poller handles a case when no
1608 : * new IOs submitted, e.g. qd==1 */
1609 0 : shared_resource->nomem_poller = SPDK_POLLER_REGISTER(bdev_no_mem_poller, shared_resource,
1610 : SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC * 10);
1611 : }
1612 : /* If bdev module completed an I/O that has an accel sequence with NOMEM status, the
1613 : * ownership of that sequence is transferred back to the bdev layer, so we need to
1614 : * restore internal.accel_sequence to make sure that the sequence is handled
1615 : * correctly in case the I/O is later aborted. */
1616 5 : if ((bdev_io->type == SPDK_BDEV_IO_TYPE_READ ||
1617 5 : bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) && bdev_io->u.bdev.accel_sequence) {
1618 0 : assert(!bdev_io_use_accel_sequence(bdev_io));
1619 0 : bdev_io->internal.f.has_accel_sequence = true;
1620 0 : bdev_io->internal.accel_sequence = bdev_io->u.bdev.accel_sequence;
1621 : }
1622 :
1623 5 : return true;
1624 : }
1625 :
1626 551 : if (spdk_unlikely(!TAILQ_EMPTY(&shared_resource->nomem_io))) {
1627 63 : bdev_ch_retry_io(bdev_ch);
1628 : }
1629 :
1630 551 : return false;
1631 : }
1632 :
1633 : static void
1634 26 : _bdev_io_complete_push_bounce_done(void *ctx, int rc)
1635 : {
1636 26 : struct spdk_bdev_io *bdev_io = ctx;
1637 26 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1638 :
1639 26 : if (rc) {
1640 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1641 : }
1642 : /* We want to free the bounce buffer here since we know we're done with it (as opposed
1643 : * to waiting for the conditional free of internal.buf.ptr in spdk_bdev_free_io()).
1644 : */
1645 26 : bdev_io_put_buf(bdev_io);
1646 :
1647 26 : if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1648 0 : bdev_ch_retry_io(ch);
1649 : }
1650 :
1651 : /* Continue with IO completion flow */
1652 26 : bdev_io_complete(bdev_io);
1653 26 : }
1654 :
1655 : static void
1656 2 : bdev_io_push_bounce_md_buf_done(void *ctx, int rc)
1657 : {
1658 2 : struct spdk_bdev_io *bdev_io = ctx;
1659 2 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1660 :
1661 2 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1662 2 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1663 2 : bdev_io->internal.f.has_bounce_buf = false;
1664 :
1665 2 : if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1666 0 : bdev_ch_retry_io(ch);
1667 : }
1668 :
1669 2 : bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1670 2 : }
1671 :
1672 : static inline void
1673 26 : bdev_io_push_bounce_md_buf(struct spdk_bdev_io *bdev_io)
1674 : {
1675 26 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1676 26 : int rc = 0;
1677 :
1678 26 : assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1679 26 : assert(bdev_io->internal.f.has_bounce_buf);
1680 :
1681 : /* do the same for metadata buffer */
1682 26 : if (spdk_unlikely(bdev_io->internal.bounce_buf.orig_md_iov.iov_base != NULL)) {
1683 4 : assert(spdk_bdev_is_md_separate(bdev_io->bdev));
1684 :
1685 4 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
1686 2 : if (bdev_io_use_memory_domain(bdev_io)) {
1687 2 : TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
1688 2 : bdev_io_increment_outstanding(ch, ch->shared_resource);
1689 : /* If memory domain is used then we need to call async push function */
1690 2 : rc = spdk_memory_domain_push_data(bdev_io->internal.memory_domain,
1691 : bdev_io->internal.memory_domain_ctx,
1692 : &bdev_io->internal.bounce_buf.orig_md_iov,
1693 2 : (uint32_t)bdev_io->internal.bounce_buf.orig_iovcnt,
1694 : &bdev_io->internal.bounce_buf.md_iov, 1,
1695 : bdev_io_push_bounce_md_buf_done,
1696 : bdev_io);
1697 2 : if (rc == 0) {
1698 : /* Continue IO completion in async callback */
1699 2 : return;
1700 : }
1701 0 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1702 0 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1703 0 : if (rc != -ENOMEM) {
1704 0 : SPDK_ERRLOG("Failed to push md to memory domain %s\n",
1705 : spdk_memory_domain_get_dma_device_id(
1706 : bdev_io->internal.memory_domain));
1707 : }
1708 : } else {
1709 0 : memcpy(bdev_io->internal.bounce_buf.orig_md_iov.iov_base, bdev_io->u.bdev.md_buf,
1710 : bdev_io->internal.bounce_buf.orig_md_iov.iov_len);
1711 : }
1712 : }
1713 : }
1714 :
1715 24 : if (spdk_unlikely(rc == -ENOMEM)) {
1716 0 : bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PUSH_MD);
1717 : } else {
1718 24 : assert(bdev_io->internal.data_transfer_cpl);
1719 24 : bdev_io->internal.f.has_bounce_buf = false;
1720 24 : bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1721 : }
1722 : }
1723 :
1724 : static inline void
1725 26 : bdev_io_push_bounce_data_done(struct spdk_bdev_io *bdev_io, int rc)
1726 : {
1727 26 : assert(bdev_io->internal.data_transfer_cpl);
1728 26 : if (rc) {
1729 0 : bdev_io->internal.data_transfer_cpl(bdev_io, rc);
1730 0 : return;
1731 : }
1732 :
1733 : /* set original buffer for this io */
1734 26 : bdev_io->u.bdev.iovcnt = bdev_io->internal.bounce_buf.orig_iovcnt;
1735 26 : bdev_io->u.bdev.iovs = bdev_io->internal.bounce_buf.orig_iovs;
1736 :
1737 : /* We don't set bdev_io->internal.f.has_bounce_buf to false here because
1738 : * we still need to clear the md buf */
1739 :
1740 26 : bdev_io_push_bounce_md_buf(bdev_io);
1741 : }
1742 :
1743 : static void
1744 2 : bdev_io_push_bounce_data_done_and_track(void *ctx, int status)
1745 : {
1746 2 : struct spdk_bdev_io *bdev_io = ctx;
1747 2 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1748 :
1749 2 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1750 2 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1751 :
1752 2 : if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
1753 0 : bdev_ch_retry_io(ch);
1754 : }
1755 :
1756 2 : bdev_io_push_bounce_data_done(bdev_io, status);
1757 2 : }
1758 :
1759 : static inline void
1760 27 : bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io)
1761 : {
1762 27 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
1763 27 : int rc = 0;
1764 :
1765 27 : assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1766 27 : assert(!bdev_io_use_accel_sequence(bdev_io));
1767 27 : assert(bdev_io->internal.f.has_bounce_buf);
1768 :
1769 : /* if this is read path, copy data from bounce buffer to original buffer */
1770 27 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
1771 11 : if (bdev_io_use_memory_domain(bdev_io)) {
1772 3 : TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
1773 3 : bdev_io_increment_outstanding(ch, ch->shared_resource);
1774 : /* If memory domain is used then we need to call async push function */
1775 3 : rc = spdk_memory_domain_push_data(bdev_io->internal.memory_domain,
1776 : bdev_io->internal.memory_domain_ctx,
1777 : bdev_io->internal.bounce_buf.orig_iovs,
1778 3 : (uint32_t)bdev_io->internal.bounce_buf.orig_iovcnt,
1779 : &bdev_io->internal.bounce_buf.iov, 1,
1780 : bdev_io_push_bounce_data_done_and_track,
1781 : bdev_io);
1782 3 : if (rc == 0) {
1783 : /* Continue IO completion in async callback */
1784 2 : return;
1785 : }
1786 :
1787 1 : TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
1788 1 : bdev_io_decrement_outstanding(ch, ch->shared_resource);
1789 1 : if (rc != -ENOMEM) {
1790 0 : SPDK_ERRLOG("Failed to push data to memory domain %s\n",
1791 : spdk_memory_domain_get_dma_device_id(
1792 : bdev_io->internal.memory_domain));
1793 : }
1794 : } else {
1795 8 : spdk_copy_buf_to_iovs(bdev_io->internal.bounce_buf.orig_iovs,
1796 : bdev_io->internal.bounce_buf.orig_iovcnt,
1797 : bdev_io->internal.bounce_buf.iov.iov_base,
1798 : bdev_io->internal.bounce_buf.iov.iov_len);
1799 : }
1800 : }
1801 :
1802 25 : if (spdk_unlikely(rc == -ENOMEM)) {
1803 1 : bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PUSH);
1804 : } else {
1805 24 : bdev_io_push_bounce_data_done(bdev_io, rc);
1806 : }
1807 : }
1808 :
1809 : static inline void
1810 26 : _bdev_io_push_bounce_data_buffer(struct spdk_bdev_io *bdev_io, bdev_copy_bounce_buffer_cpl cpl_cb)
1811 : {
1812 26 : bdev_io->internal.data_transfer_cpl = cpl_cb;
1813 26 : bdev_io_push_bounce_data(bdev_io);
1814 26 : }
1815 :
1816 : static void
1817 0 : bdev_io_get_iobuf_cb(struct spdk_iobuf_entry *iobuf, void *buf)
1818 : {
1819 : struct spdk_bdev_io *bdev_io;
1820 :
1821 0 : bdev_io = SPDK_CONTAINEROF(iobuf, struct spdk_bdev_io, internal.iobuf);
1822 0 : _bdev_io_set_buf(bdev_io, buf, bdev_io->internal.buf.len);
1823 0 : }
1824 :
1825 : static void
1826 42 : bdev_io_get_buf(struct spdk_bdev_io *bdev_io, uint64_t len)
1827 : {
1828 : struct spdk_bdev_mgmt_channel *mgmt_ch;
1829 : uint64_t max_len;
1830 : void *buf;
1831 :
1832 42 : assert(spdk_bdev_io_get_thread(bdev_io) == spdk_get_thread());
1833 42 : mgmt_ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
1834 42 : max_len = bdev_io_get_max_buf_len(bdev_io, len);
1835 :
1836 42 : if (spdk_unlikely(max_len > mgmt_ch->iobuf.cache[0].large.bufsize)) {
1837 0 : SPDK_ERRLOG("Length %" PRIu64 " is larger than allowed\n", max_len);
1838 0 : bdev_io_get_buf_complete(bdev_io, false);
1839 0 : return;
1840 : }
1841 :
1842 42 : bdev_io->internal.buf.len = len;
1843 42 : buf = spdk_iobuf_get(&mgmt_ch->iobuf, max_len, &bdev_io->internal.iobuf,
1844 : bdev_io_get_iobuf_cb);
1845 42 : if (buf != NULL) {
1846 42 : _bdev_io_set_buf(bdev_io, buf, len);
1847 : }
1848 : }
1849 :
1850 : void
1851 56 : spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
1852 : {
1853 56 : struct spdk_bdev *bdev = bdev_io->bdev;
1854 : uint64_t alignment;
1855 :
1856 56 : assert(cb != NULL);
1857 56 : bdev_io->internal.get_buf_cb = cb;
1858 :
1859 56 : alignment = spdk_bdev_get_buf_align(bdev);
1860 :
1861 96 : if (_is_buf_allocated(bdev_io->u.bdev.iovs) &&
1862 40 : _are_iovs_aligned(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, alignment)) {
1863 : /* Buffer already present and aligned */
1864 18 : cb(spdk_bdev_io_get_io_channel(bdev_io), bdev_io, true);
1865 18 : return;
1866 : }
1867 :
1868 38 : bdev_io_get_buf(bdev_io, len);
1869 : }
1870 :
1871 : static void
1872 4 : _bdev_memory_domain_get_io_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
1873 : bool success)
1874 : {
1875 4 : if (!success) {
1876 0 : SPDK_ERRLOG("Failed to get data buffer, completing IO\n");
1877 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1878 0 : bdev_io_complete_unsubmitted(bdev_io);
1879 0 : return;
1880 : }
1881 :
1882 4 : if (bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io)) {
1883 0 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1884 0 : bdev_io_exec_sequence(bdev_io, bdev_io_submit_sequence_cb);
1885 0 : return;
1886 : }
1887 : /* For reads we'll execute the sequence after the data is read, so, for now, only
1888 : * clear out accel_sequence pointer and submit the IO */
1889 0 : assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
1890 0 : bdev_io->u.bdev.accel_sequence = NULL;
1891 : }
1892 :
1893 4 : bdev_io_submit(bdev_io);
1894 : }
1895 :
1896 : static void
1897 4 : _bdev_memory_domain_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
1898 : uint64_t len)
1899 : {
1900 4 : assert(cb != NULL);
1901 4 : bdev_io->internal.get_buf_cb = cb;
1902 :
1903 4 : bdev_io_get_buf(bdev_io, len);
1904 4 : }
1905 :
1906 :
1907 3 : SPDK_LOG_DEPRECATION_REGISTER(spdk_bdev_io_get_aux_buf,
1908 : "spdk_bdev_io_get_aux_buf is deprecated", "v25.01", 0);
1909 :
1910 : void
1911 0 : spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
1912 : {
1913 0 : uint64_t len = bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
1914 :
1915 0 : SPDK_LOG_DEPRECATED(spdk_bdev_io_get_aux_buf);
1916 :
1917 0 : assert(cb != NULL);
1918 0 : assert(bdev_io->internal.get_aux_buf_cb == NULL);
1919 0 : bdev_io->internal.get_aux_buf_cb = cb;
1920 0 : bdev_io_get_buf(bdev_io, len);
1921 0 : }
1922 :
1923 : static int
1924 68 : bdev_module_get_max_ctx_size(void)
1925 : {
1926 : struct spdk_bdev_module *bdev_module;
1927 68 : int max_bdev_module_size = 0;
1928 :
1929 266 : TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
1930 198 : if (bdev_module->get_ctx_size && bdev_module->get_ctx_size() > max_bdev_module_size) {
1931 67 : max_bdev_module_size = bdev_module->get_ctx_size();
1932 : }
1933 : }
1934 :
1935 68 : return max_bdev_module_size;
1936 : }
1937 :
1938 : static void
1939 0 : bdev_enable_histogram_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
1940 : {
1941 0 : if (!bdev->internal.histogram_enabled) {
1942 0 : return;
1943 : }
1944 :
1945 0 : spdk_json_write_object_begin(w);
1946 0 : spdk_json_write_named_string(w, "method", "bdev_enable_histogram");
1947 :
1948 0 : spdk_json_write_named_object_begin(w, "params");
1949 0 : spdk_json_write_named_string(w, "name", bdev->name);
1950 :
1951 0 : spdk_json_write_named_bool(w, "enable", bdev->internal.histogram_enabled);
1952 :
1953 0 : if (bdev->internal.histogram_io_type) {
1954 0 : spdk_json_write_named_string(w, "opc",
1955 0 : spdk_bdev_get_io_type_name(bdev->internal.histogram_io_type));
1956 : }
1957 :
1958 0 : spdk_json_write_object_end(w);
1959 :
1960 0 : spdk_json_write_object_end(w);
1961 : }
1962 :
1963 : static void
1964 0 : bdev_qos_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
1965 : {
1966 : int i;
1967 0 : struct spdk_bdev_qos *qos = bdev->internal.qos;
1968 0 : uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES];
1969 :
1970 0 : if (!qos) {
1971 0 : return;
1972 : }
1973 :
1974 0 : spdk_bdev_get_qos_rate_limits(bdev, limits);
1975 :
1976 0 : spdk_json_write_object_begin(w);
1977 0 : spdk_json_write_named_string(w, "method", "bdev_set_qos_limit");
1978 :
1979 0 : spdk_json_write_named_object_begin(w, "params");
1980 0 : spdk_json_write_named_string(w, "name", bdev->name);
1981 0 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1982 0 : if (limits[i] > 0) {
1983 0 : spdk_json_write_named_uint64(w, qos_rpc_type[i], limits[i]);
1984 : }
1985 : }
1986 0 : spdk_json_write_object_end(w);
1987 :
1988 0 : spdk_json_write_object_end(w);
1989 : }
1990 :
1991 : void
1992 0 : spdk_bdev_subsystem_config_json(struct spdk_json_write_ctx *w)
1993 : {
1994 : struct spdk_bdev_module *bdev_module;
1995 : struct spdk_bdev *bdev;
1996 :
1997 0 : assert(w != NULL);
1998 :
1999 0 : spdk_json_write_array_begin(w);
2000 :
2001 0 : spdk_json_write_object_begin(w);
2002 0 : spdk_json_write_named_string(w, "method", "bdev_set_options");
2003 0 : spdk_json_write_named_object_begin(w, "params");
2004 0 : spdk_json_write_named_uint32(w, "bdev_io_pool_size", g_bdev_opts.bdev_io_pool_size);
2005 0 : spdk_json_write_named_uint32(w, "bdev_io_cache_size", g_bdev_opts.bdev_io_cache_size);
2006 0 : spdk_json_write_named_bool(w, "bdev_auto_examine", g_bdev_opts.bdev_auto_examine);
2007 0 : spdk_json_write_named_uint32(w, "iobuf_small_cache_size", g_bdev_opts.iobuf_small_cache_size);
2008 0 : spdk_json_write_named_uint32(w, "iobuf_large_cache_size", g_bdev_opts.iobuf_large_cache_size);
2009 0 : spdk_json_write_object_end(w);
2010 0 : spdk_json_write_object_end(w);
2011 :
2012 0 : bdev_examine_allowlist_config_json(w);
2013 :
2014 0 : TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
2015 0 : if (bdev_module->config_json) {
2016 0 : bdev_module->config_json(w);
2017 : }
2018 : }
2019 :
2020 0 : spdk_spin_lock(&g_bdev_mgr.spinlock);
2021 :
2022 0 : TAILQ_FOREACH(bdev, &g_bdev_mgr.bdevs, internal.link) {
2023 0 : if (bdev->fn_table->write_config_json) {
2024 0 : bdev->fn_table->write_config_json(bdev, w);
2025 : }
2026 :
2027 0 : bdev_qos_config_json(bdev, w);
2028 0 : bdev_enable_histogram_config_json(bdev, w);
2029 : }
2030 :
2031 0 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
2032 :
2033 : /* This has to be last RPC in array to make sure all bdevs finished examine */
2034 0 : spdk_json_write_object_begin(w);
2035 0 : spdk_json_write_named_string(w, "method", "bdev_wait_for_examine");
2036 0 : spdk_json_write_object_end(w);
2037 :
2038 0 : spdk_json_write_array_end(w);
2039 0 : }
2040 :
2041 : static void
2042 72 : bdev_mgmt_channel_destroy(void *io_device, void *ctx_buf)
2043 : {
2044 72 : struct spdk_bdev_mgmt_channel *ch = ctx_buf;
2045 : struct spdk_bdev_io *bdev_io;
2046 :
2047 72 : spdk_iobuf_channel_fini(&ch->iobuf);
2048 :
2049 10226 : while (!STAILQ_EMPTY(&ch->per_thread_cache)) {
2050 10154 : bdev_io = STAILQ_FIRST(&ch->per_thread_cache);
2051 10154 : STAILQ_REMOVE_HEAD(&ch->per_thread_cache, internal.buf_link);
2052 10154 : ch->per_thread_cache_count--;
2053 10154 : spdk_mempool_put(g_bdev_mgr.bdev_io_pool, (void *)bdev_io);
2054 : }
2055 :
2056 72 : assert(ch->per_thread_cache_count == 0);
2057 72 : }
2058 :
2059 : static int
2060 72 : bdev_mgmt_channel_create(void *io_device, void *ctx_buf)
2061 : {
2062 72 : struct spdk_bdev_mgmt_channel *ch = ctx_buf;
2063 : struct spdk_bdev_io *bdev_io;
2064 : uint32_t i;
2065 : int rc;
2066 :
2067 72 : rc = spdk_iobuf_channel_init(&ch->iobuf, "bdev",
2068 : g_bdev_opts.iobuf_small_cache_size,
2069 : g_bdev_opts.iobuf_large_cache_size);
2070 72 : if (rc != 0) {
2071 0 : SPDK_ERRLOG("Failed to create iobuf channel: %s\n", spdk_strerror(-rc));
2072 0 : return -1;
2073 : }
2074 :
2075 72 : STAILQ_INIT(&ch->per_thread_cache);
2076 72 : ch->bdev_io_cache_size = g_bdev_opts.bdev_io_cache_size;
2077 :
2078 : /* Pre-populate bdev_io cache to ensure this thread cannot be starved. */
2079 72 : ch->per_thread_cache_count = 0;
2080 10226 : for (i = 0; i < ch->bdev_io_cache_size; i++) {
2081 10154 : bdev_io = spdk_mempool_get(g_bdev_mgr.bdev_io_pool);
2082 10154 : if (bdev_io == NULL) {
2083 0 : SPDK_ERRLOG("You need to increase bdev_io_pool_size using bdev_set_options RPC.\n");
2084 0 : assert(false);
2085 : bdev_mgmt_channel_destroy(io_device, ctx_buf);
2086 : return -1;
2087 : }
2088 10154 : ch->per_thread_cache_count++;
2089 10154 : STAILQ_INSERT_HEAD(&ch->per_thread_cache, bdev_io, internal.buf_link);
2090 : }
2091 :
2092 72 : TAILQ_INIT(&ch->shared_resources);
2093 72 : TAILQ_INIT(&ch->io_wait_queue);
2094 :
2095 72 : return 0;
2096 : }
2097 :
2098 : static void
2099 68 : bdev_init_complete(int rc)
2100 : {
2101 68 : spdk_bdev_init_cb cb_fn = g_init_cb_fn;
2102 68 : void *cb_arg = g_init_cb_arg;
2103 : struct spdk_bdev_module *m;
2104 :
2105 68 : g_bdev_mgr.init_complete = true;
2106 68 : g_init_cb_fn = NULL;
2107 68 : g_init_cb_arg = NULL;
2108 :
2109 : /*
2110 : * For modules that need to know when subsystem init is complete,
2111 : * inform them now.
2112 : */
2113 68 : if (rc == 0) {
2114 266 : TAILQ_FOREACH(m, &g_bdev_mgr.bdev_modules, internal.tailq) {
2115 198 : if (m->init_complete) {
2116 24 : m->init_complete();
2117 : }
2118 : }
2119 : }
2120 :
2121 68 : cb_fn(cb_arg, rc);
2122 68 : }
2123 :
2124 : static bool
2125 269 : bdev_module_all_actions_completed(void)
2126 : {
2127 : struct spdk_bdev_module *m;
2128 :
2129 1068 : TAILQ_FOREACH(m, &g_bdev_mgr.bdev_modules, internal.tailq) {
2130 799 : if (m->internal.action_in_progress > 0) {
2131 0 : return false;
2132 : }
2133 : }
2134 269 : return true;
2135 : }
2136 :
2137 : static void
2138 629 : bdev_module_action_complete(void)
2139 : {
2140 : /*
2141 : * Don't finish bdev subsystem initialization if
2142 : * module pre-initialization is still in progress, or
2143 : * the subsystem been already initialized.
2144 : */
2145 629 : if (!g_bdev_mgr.module_init_complete || g_bdev_mgr.init_complete) {
2146 561 : return;
2147 : }
2148 :
2149 : /*
2150 : * Check all bdev modules for inits/examinations in progress. If any
2151 : * exist, return immediately since we cannot finish bdev subsystem
2152 : * initialization until all are completed.
2153 : */
2154 68 : if (!bdev_module_all_actions_completed()) {
2155 0 : return;
2156 : }
2157 :
2158 : /*
2159 : * Modules already finished initialization - now that all
2160 : * the bdev modules have finished their asynchronous I/O
2161 : * processing, the entire bdev layer can be marked as complete.
2162 : */
2163 68 : bdev_init_complete(0);
2164 : }
2165 :
2166 : static void
2167 561 : bdev_module_action_done(struct spdk_bdev_module *module)
2168 : {
2169 561 : spdk_spin_lock(&module->internal.spinlock);
2170 561 : assert(module->internal.action_in_progress > 0);
2171 561 : module->internal.action_in_progress--;
2172 561 : spdk_spin_unlock(&module->internal.spinlock);
2173 561 : bdev_module_action_complete();
2174 561 : }
2175 :
2176 : void
2177 68 : spdk_bdev_module_init_done(struct spdk_bdev_module *module)
2178 : {
2179 68 : assert(module->async_init);
2180 68 : bdev_module_action_done(module);
2181 68 : }
2182 :
2183 : void
2184 493 : spdk_bdev_module_examine_done(struct spdk_bdev_module *module)
2185 : {
2186 493 : bdev_module_action_done(module);
2187 493 : }
2188 :
2189 : /** The last initialized bdev module */
2190 : static struct spdk_bdev_module *g_resume_bdev_module = NULL;
2191 :
2192 : static void
2193 0 : bdev_init_failed(void *cb_arg)
2194 : {
2195 0 : struct spdk_bdev_module *module = cb_arg;
2196 :
2197 0 : spdk_spin_lock(&module->internal.spinlock);
2198 0 : assert(module->internal.action_in_progress > 0);
2199 0 : module->internal.action_in_progress--;
2200 0 : spdk_spin_unlock(&module->internal.spinlock);
2201 0 : bdev_init_complete(-1);
2202 0 : }
2203 :
2204 : static int
2205 68 : bdev_modules_init(void)
2206 : {
2207 : struct spdk_bdev_module *module;
2208 68 : int rc = 0;
2209 :
2210 266 : TAILQ_FOREACH(module, &g_bdev_mgr.bdev_modules, internal.tailq) {
2211 198 : g_resume_bdev_module = module;
2212 198 : if (module->async_init) {
2213 68 : spdk_spin_lock(&module->internal.spinlock);
2214 68 : module->internal.action_in_progress = 1;
2215 68 : spdk_spin_unlock(&module->internal.spinlock);
2216 : }
2217 198 : rc = module->module_init();
2218 198 : if (rc != 0) {
2219 : /* Bump action_in_progress to prevent other modules from completion of modules_init
2220 : * Send message to defer application shutdown until resources are cleaned up */
2221 0 : spdk_spin_lock(&module->internal.spinlock);
2222 0 : module->internal.action_in_progress = 1;
2223 0 : spdk_spin_unlock(&module->internal.spinlock);
2224 0 : spdk_thread_send_msg(spdk_get_thread(), bdev_init_failed, module);
2225 0 : return rc;
2226 : }
2227 : }
2228 :
2229 68 : g_resume_bdev_module = NULL;
2230 68 : return 0;
2231 : }
2232 :
2233 : void
2234 68 : spdk_bdev_initialize(spdk_bdev_init_cb cb_fn, void *cb_arg)
2235 : {
2236 68 : int rc = 0;
2237 68 : char mempool_name[32];
2238 :
2239 68 : assert(cb_fn != NULL);
2240 :
2241 68 : g_init_cb_fn = cb_fn;
2242 68 : g_init_cb_arg = cb_arg;
2243 :
2244 68 : spdk_notify_type_register("bdev_register");
2245 68 : spdk_notify_type_register("bdev_unregister");
2246 :
2247 68 : snprintf(mempool_name, sizeof(mempool_name), "bdev_io_%d", getpid());
2248 :
2249 68 : rc = spdk_iobuf_register_module("bdev");
2250 68 : if (rc != 0) {
2251 0 : SPDK_ERRLOG("could not register bdev iobuf module: %s\n", spdk_strerror(-rc));
2252 0 : bdev_init_complete(-1);
2253 0 : return;
2254 : }
2255 :
2256 136 : g_bdev_mgr.bdev_io_pool = spdk_mempool_create(mempool_name,
2257 68 : g_bdev_opts.bdev_io_pool_size,
2258 : sizeof(struct spdk_bdev_io) +
2259 68 : bdev_module_get_max_ctx_size(),
2260 : 0,
2261 : SPDK_ENV_NUMA_ID_ANY);
2262 :
2263 68 : if (g_bdev_mgr.bdev_io_pool == NULL) {
2264 0 : SPDK_ERRLOG("could not allocate spdk_bdev_io pool\n");
2265 0 : bdev_init_complete(-1);
2266 0 : return;
2267 : }
2268 :
2269 68 : g_bdev_mgr.zero_buffer = spdk_zmalloc(ZERO_BUFFER_SIZE, ZERO_BUFFER_SIZE,
2270 : NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
2271 68 : if (!g_bdev_mgr.zero_buffer) {
2272 0 : SPDK_ERRLOG("create bdev zero buffer failed\n");
2273 0 : bdev_init_complete(-1);
2274 0 : return;
2275 : }
2276 :
2277 : #ifdef SPDK_CONFIG_VTUNE
2278 : g_bdev_mgr.domain = __itt_domain_create("spdk_bdev");
2279 : #endif
2280 :
2281 68 : spdk_io_device_register(&g_bdev_mgr, bdev_mgmt_channel_create,
2282 : bdev_mgmt_channel_destroy,
2283 : sizeof(struct spdk_bdev_mgmt_channel),
2284 : "bdev_mgr");
2285 :
2286 68 : rc = bdev_modules_init();
2287 68 : g_bdev_mgr.module_init_complete = true;
2288 68 : if (rc != 0) {
2289 0 : SPDK_ERRLOG("bdev modules init failed\n");
2290 0 : return;
2291 : }
2292 :
2293 68 : bdev_module_action_complete();
2294 : }
2295 :
2296 : static void
2297 68 : bdev_mgr_unregister_cb(void *io_device)
2298 : {
2299 68 : spdk_bdev_fini_cb cb_fn = g_fini_cb_fn;
2300 :
2301 68 : if (g_bdev_mgr.bdev_io_pool) {
2302 68 : if (spdk_mempool_count(g_bdev_mgr.bdev_io_pool) != g_bdev_opts.bdev_io_pool_size) {
2303 0 : SPDK_ERRLOG("bdev IO pool count is %zu but should be %u\n",
2304 : spdk_mempool_count(g_bdev_mgr.bdev_io_pool),
2305 : g_bdev_opts.bdev_io_pool_size);
2306 : }
2307 :
2308 68 : spdk_mempool_free(g_bdev_mgr.bdev_io_pool);
2309 : }
2310 :
2311 68 : spdk_free(g_bdev_mgr.zero_buffer);
2312 :
2313 68 : bdev_examine_allowlist_free();
2314 :
2315 68 : cb_fn(g_fini_cb_arg);
2316 68 : g_fini_cb_fn = NULL;
2317 68 : g_fini_cb_arg = NULL;
2318 68 : g_bdev_mgr.init_complete = false;
2319 68 : g_bdev_mgr.module_init_complete = false;
2320 68 : }
2321 :
2322 : static void
2323 68 : bdev_module_fini_iter(void *arg)
2324 : {
2325 : struct spdk_bdev_module *bdev_module;
2326 :
2327 : /* FIXME: Handling initialization failures is broken now,
2328 : * so we won't even try cleaning up after successfully
2329 : * initialized modules. if module_init_complete is false,
2330 : * just call spdk_bdev_mgr_unregister_cb
2331 : */
2332 68 : if (!g_bdev_mgr.module_init_complete) {
2333 0 : bdev_mgr_unregister_cb(NULL);
2334 0 : return;
2335 : }
2336 :
2337 : /* Start iterating from the last touched module */
2338 68 : if (!g_resume_bdev_module) {
2339 68 : bdev_module = TAILQ_LAST(&g_bdev_mgr.bdev_modules, bdev_module_list);
2340 : } else {
2341 0 : bdev_module = TAILQ_PREV(g_resume_bdev_module, bdev_module_list,
2342 : internal.tailq);
2343 : }
2344 :
2345 266 : while (bdev_module) {
2346 198 : if (bdev_module->async_fini) {
2347 : /* Save our place so we can resume later. We must
2348 : * save the variable here, before calling module_fini()
2349 : * below, because in some cases the module may immediately
2350 : * call spdk_bdev_module_fini_done() and re-enter
2351 : * this function to continue iterating. */
2352 0 : g_resume_bdev_module = bdev_module;
2353 : }
2354 :
2355 198 : if (bdev_module->module_fini) {
2356 198 : bdev_module->module_fini();
2357 : }
2358 :
2359 198 : if (bdev_module->async_fini) {
2360 0 : return;
2361 : }
2362 :
2363 198 : bdev_module = TAILQ_PREV(bdev_module, bdev_module_list,
2364 : internal.tailq);
2365 : }
2366 :
2367 68 : g_resume_bdev_module = NULL;
2368 68 : spdk_io_device_unregister(&g_bdev_mgr, bdev_mgr_unregister_cb);
2369 : }
2370 :
2371 : void
2372 0 : spdk_bdev_module_fini_done(void)
2373 : {
2374 0 : if (spdk_get_thread() != g_fini_thread) {
2375 0 : spdk_thread_send_msg(g_fini_thread, bdev_module_fini_iter, NULL);
2376 : } else {
2377 0 : bdev_module_fini_iter(NULL);
2378 : }
2379 0 : }
2380 :
2381 : static void
2382 68 : bdev_finish_unregister_bdevs_iter(void *cb_arg, int bdeverrno)
2383 : {
2384 68 : struct spdk_bdev *bdev = cb_arg;
2385 :
2386 68 : if (bdeverrno && bdev) {
2387 0 : SPDK_WARNLOG("Unable to unregister bdev '%s' during spdk_bdev_finish()\n",
2388 : bdev->name);
2389 :
2390 : /*
2391 : * Since the call to spdk_bdev_unregister() failed, we have no way to free this
2392 : * bdev; try to continue by manually removing this bdev from the list and continue
2393 : * with the next bdev in the list.
2394 : */
2395 0 : TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, internal.link);
2396 : }
2397 :
2398 68 : if (TAILQ_EMPTY(&g_bdev_mgr.bdevs)) {
2399 68 : SPDK_DEBUGLOG(bdev, "Done unregistering bdevs\n");
2400 : /*
2401 : * Bdev module finish need to be deferred as we might be in the middle of some context
2402 : * (like bdev part free) that will use this bdev (or private bdev driver ctx data)
2403 : * after returning.
2404 : */
2405 68 : spdk_thread_send_msg(spdk_get_thread(), bdev_module_fini_iter, NULL);
2406 68 : return;
2407 : }
2408 :
2409 : /*
2410 : * Unregister last unclaimed bdev in the list, to ensure that bdev subsystem
2411 : * shutdown proceeds top-down. The goal is to give virtual bdevs an opportunity
2412 : * to detect clean shutdown as opposed to run-time hot removal of the underlying
2413 : * base bdevs.
2414 : *
2415 : * Also, walk the list in the reverse order.
2416 : */
2417 0 : for (bdev = TAILQ_LAST(&g_bdev_mgr.bdevs, spdk_bdev_list);
2418 0 : bdev; bdev = TAILQ_PREV(bdev, spdk_bdev_list, internal.link)) {
2419 0 : spdk_spin_lock(&bdev->internal.spinlock);
2420 0 : if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
2421 0 : LOG_ALREADY_CLAIMED_DEBUG("claimed, skipping", bdev);
2422 0 : spdk_spin_unlock(&bdev->internal.spinlock);
2423 0 : continue;
2424 : }
2425 0 : spdk_spin_unlock(&bdev->internal.spinlock);
2426 :
2427 0 : SPDK_DEBUGLOG(bdev, "Unregistering bdev '%s'\n", bdev->name);
2428 0 : spdk_bdev_unregister(bdev, bdev_finish_unregister_bdevs_iter, bdev);
2429 0 : return;
2430 : }
2431 :
2432 : /*
2433 : * If any bdev fails to unclaim underlying bdev properly, we may face the
2434 : * case of bdev list consisting of claimed bdevs only (if claims are managed
2435 : * correctly, this would mean there's a loop in the claims graph which is
2436 : * clearly impossible). Warn and unregister last bdev on the list then.
2437 : */
2438 0 : for (bdev = TAILQ_LAST(&g_bdev_mgr.bdevs, spdk_bdev_list);
2439 0 : bdev; bdev = TAILQ_PREV(bdev, spdk_bdev_list, internal.link)) {
2440 0 : SPDK_WARNLOG("Unregistering claimed bdev '%s'!\n", bdev->name);
2441 0 : spdk_bdev_unregister(bdev, bdev_finish_unregister_bdevs_iter, bdev);
2442 0 : return;
2443 : }
2444 : }
2445 :
2446 : static void
2447 68 : bdev_module_fini_start_iter(void *arg)
2448 : {
2449 : struct spdk_bdev_module *bdev_module;
2450 :
2451 68 : if (!g_resume_bdev_module) {
2452 68 : bdev_module = TAILQ_LAST(&g_bdev_mgr.bdev_modules, bdev_module_list);
2453 : } else {
2454 0 : bdev_module = TAILQ_PREV(g_resume_bdev_module, bdev_module_list, internal.tailq);
2455 : }
2456 :
2457 266 : while (bdev_module) {
2458 198 : if (bdev_module->async_fini_start) {
2459 : /* Save our place so we can resume later. We must
2460 : * save the variable here, before calling fini_start()
2461 : * below, because in some cases the module may immediately
2462 : * call spdk_bdev_module_fini_start_done() and re-enter
2463 : * this function to continue iterating. */
2464 0 : g_resume_bdev_module = bdev_module;
2465 : }
2466 :
2467 198 : if (bdev_module->fini_start) {
2468 24 : bdev_module->fini_start();
2469 : }
2470 :
2471 198 : if (bdev_module->async_fini_start) {
2472 0 : return;
2473 : }
2474 :
2475 198 : bdev_module = TAILQ_PREV(bdev_module, bdev_module_list, internal.tailq);
2476 : }
2477 :
2478 68 : g_resume_bdev_module = NULL;
2479 :
2480 68 : bdev_finish_unregister_bdevs_iter(NULL, 0);
2481 : }
2482 :
2483 : void
2484 0 : spdk_bdev_module_fini_start_done(void)
2485 : {
2486 0 : if (spdk_get_thread() != g_fini_thread) {
2487 0 : spdk_thread_send_msg(g_fini_thread, bdev_module_fini_start_iter, NULL);
2488 : } else {
2489 0 : bdev_module_fini_start_iter(NULL);
2490 : }
2491 0 : }
2492 :
2493 : static void
2494 68 : bdev_finish_wait_for_examine_done(void *cb_arg)
2495 : {
2496 68 : bdev_module_fini_start_iter(NULL);
2497 68 : }
2498 :
2499 : static void bdev_open_async_fini(void);
2500 :
2501 : void
2502 68 : spdk_bdev_finish(spdk_bdev_fini_cb cb_fn, void *cb_arg)
2503 : {
2504 : int rc;
2505 :
2506 68 : assert(cb_fn != NULL);
2507 :
2508 68 : g_fini_thread = spdk_get_thread();
2509 :
2510 68 : g_fini_cb_fn = cb_fn;
2511 68 : g_fini_cb_arg = cb_arg;
2512 :
2513 68 : bdev_open_async_fini();
2514 :
2515 68 : rc = spdk_bdev_wait_for_examine(bdev_finish_wait_for_examine_done, NULL);
2516 68 : if (rc != 0) {
2517 0 : SPDK_ERRLOG("wait_for_examine failed: %s\n", spdk_strerror(-rc));
2518 0 : bdev_finish_wait_for_examine_done(NULL);
2519 : }
2520 68 : }
2521 :
2522 : struct spdk_bdev_io *
2523 699 : bdev_channel_get_io(struct spdk_bdev_channel *channel)
2524 : {
2525 699 : struct spdk_bdev_mgmt_channel *ch = channel->shared_resource->mgmt_ch;
2526 : struct spdk_bdev_io *bdev_io;
2527 :
2528 699 : if (ch->per_thread_cache_count > 0) {
2529 639 : bdev_io = STAILQ_FIRST(&ch->per_thread_cache);
2530 639 : STAILQ_REMOVE_HEAD(&ch->per_thread_cache, internal.buf_link);
2531 639 : ch->per_thread_cache_count--;
2532 60 : } else if (spdk_unlikely(!TAILQ_EMPTY(&ch->io_wait_queue))) {
2533 : /*
2534 : * Don't try to look for bdev_ios in the global pool if there are
2535 : * waiters on bdev_ios - we don't want this caller to jump the line.
2536 : */
2537 0 : bdev_io = NULL;
2538 : } else {
2539 60 : bdev_io = spdk_mempool_get(g_bdev_mgr.bdev_io_pool);
2540 : }
2541 :
2542 699 : return bdev_io;
2543 : }
2544 :
2545 : void
2546 693 : spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
2547 : {
2548 : struct spdk_bdev_mgmt_channel *ch;
2549 :
2550 693 : assert(bdev_io != NULL);
2551 693 : assert(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_PENDING);
2552 :
2553 693 : ch = bdev_io->internal.ch->shared_resource->mgmt_ch;
2554 :
2555 693 : if (bdev_io->internal.f.has_buf) {
2556 16 : bdev_io_put_buf(bdev_io);
2557 : }
2558 :
2559 693 : if (ch->per_thread_cache_count < ch->bdev_io_cache_size) {
2560 639 : ch->per_thread_cache_count++;
2561 639 : STAILQ_INSERT_HEAD(&ch->per_thread_cache, bdev_io, internal.buf_link);
2562 643 : while (ch->per_thread_cache_count > 0 && !TAILQ_EMPTY(&ch->io_wait_queue)) {
2563 : struct spdk_bdev_io_wait_entry *entry;
2564 :
2565 4 : entry = TAILQ_FIRST(&ch->io_wait_queue);
2566 4 : TAILQ_REMOVE(&ch->io_wait_queue, entry, link);
2567 4 : entry->cb_fn(entry->cb_arg);
2568 : }
2569 : } else {
2570 : /* We should never have a full cache with entries on the io wait queue. */
2571 54 : assert(TAILQ_EMPTY(&ch->io_wait_queue));
2572 54 : spdk_mempool_put(g_bdev_mgr.bdev_io_pool, (void *)bdev_io);
2573 : }
2574 693 : }
2575 :
2576 : static bool
2577 72 : bdev_qos_is_iops_rate_limit(enum spdk_bdev_qos_rate_limit_type limit)
2578 : {
2579 72 : assert(limit != SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES);
2580 :
2581 72 : switch (limit) {
2582 18 : case SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT:
2583 18 : return true;
2584 54 : case SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT:
2585 : case SPDK_BDEV_QOS_R_BPS_RATE_LIMIT:
2586 : case SPDK_BDEV_QOS_W_BPS_RATE_LIMIT:
2587 54 : return false;
2588 0 : case SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES:
2589 : default:
2590 0 : return false;
2591 : }
2592 : }
2593 :
2594 : static bool
2595 25 : bdev_qos_io_to_limit(struct spdk_bdev_io *bdev_io)
2596 : {
2597 25 : switch (bdev_io->type) {
2598 23 : case SPDK_BDEV_IO_TYPE_NVME_IO:
2599 : case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
2600 : case SPDK_BDEV_IO_TYPE_READ:
2601 : case SPDK_BDEV_IO_TYPE_WRITE:
2602 23 : return true;
2603 0 : case SPDK_BDEV_IO_TYPE_ZCOPY:
2604 0 : if (bdev_io->u.bdev.zcopy.start) {
2605 0 : return true;
2606 : } else {
2607 0 : return false;
2608 : }
2609 2 : default:
2610 2 : return false;
2611 : }
2612 : }
2613 :
2614 : static bool
2615 33 : bdev_is_read_io(struct spdk_bdev_io *bdev_io)
2616 : {
2617 33 : switch (bdev_io->type) {
2618 0 : case SPDK_BDEV_IO_TYPE_NVME_IO:
2619 : case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
2620 : /* Bit 1 (0x2) set for read operation */
2621 0 : if (bdev_io->u.nvme_passthru.cmd.opc & SPDK_NVME_OPC_READ) {
2622 0 : return true;
2623 : } else {
2624 0 : return false;
2625 : }
2626 30 : case SPDK_BDEV_IO_TYPE_READ:
2627 30 : return true;
2628 0 : case SPDK_BDEV_IO_TYPE_ZCOPY:
2629 : /* Populate to read from disk */
2630 0 : if (bdev_io->u.bdev.zcopy.populate) {
2631 0 : return true;
2632 : } else {
2633 0 : return false;
2634 : }
2635 3 : default:
2636 3 : return false;
2637 : }
2638 : }
2639 :
2640 : static uint64_t
2641 43 : bdev_get_io_size_in_byte(struct spdk_bdev_io *bdev_io)
2642 : {
2643 43 : struct spdk_bdev *bdev = bdev_io->bdev;
2644 :
2645 43 : switch (bdev_io->type) {
2646 0 : case SPDK_BDEV_IO_TYPE_NVME_IO:
2647 : case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
2648 0 : return bdev_io->u.nvme_passthru.nbytes;
2649 43 : case SPDK_BDEV_IO_TYPE_READ:
2650 : case SPDK_BDEV_IO_TYPE_WRITE:
2651 43 : return bdev_io->u.bdev.num_blocks * bdev->blocklen;
2652 0 : case SPDK_BDEV_IO_TYPE_ZCOPY:
2653 : /* Track the data in the start phase only */
2654 0 : if (bdev_io->u.bdev.zcopy.start) {
2655 0 : return bdev_io->u.bdev.num_blocks * bdev->blocklen;
2656 : } else {
2657 0 : return 0;
2658 : }
2659 0 : default:
2660 0 : return 0;
2661 : }
2662 : }
2663 :
2664 : static inline bool
2665 64 : bdev_qos_rw_queue_io(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io, uint64_t delta)
2666 : {
2667 : int64_t remaining_this_timeslice;
2668 :
2669 64 : if (!limit->max_per_timeslice) {
2670 : /* The QoS is disabled */
2671 0 : return false;
2672 : }
2673 :
2674 64 : remaining_this_timeslice = __atomic_sub_fetch(&limit->remaining_this_timeslice, delta,
2675 : __ATOMIC_RELAXED);
2676 64 : if (remaining_this_timeslice + (int64_t)delta > 0) {
2677 : /* There was still a quota for this delta -> the IO shouldn't be queued
2678 : *
2679 : * We allow a slight quota overrun here so an IO bigger than the per-timeslice
2680 : * quota can be allowed once a while. Such overrun then taken into account in
2681 : * the QoS poller, where the next timeslice quota is calculated.
2682 : */
2683 59 : return false;
2684 : }
2685 :
2686 : /* There was no quota for this delta -> the IO should be queued
2687 : * The remaining_this_timeslice must be rewinded so it reflects the real
2688 : * amount of IOs or bytes allowed.
2689 : */
2690 5 : __atomic_add_fetch(
2691 5 : &limit->remaining_this_timeslice, delta, __ATOMIC_RELAXED);
2692 5 : return true;
2693 : }
2694 :
2695 : static inline void
2696 5 : bdev_qos_rw_rewind_io(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io, uint64_t delta)
2697 : {
2698 5 : __atomic_add_fetch(&limit->remaining_this_timeslice, delta, __ATOMIC_RELAXED);
2699 5 : }
2700 :
2701 : static bool
2702 23 : bdev_qos_rw_iops_queue(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2703 : {
2704 23 : return bdev_qos_rw_queue_io(limit, io, 1);
2705 : }
2706 :
2707 : static void
2708 3 : bdev_qos_rw_iops_rewind_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2709 : {
2710 3 : bdev_qos_rw_rewind_io(limit, io, 1);
2711 3 : }
2712 :
2713 : static bool
2714 41 : bdev_qos_rw_bps_queue(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2715 : {
2716 41 : return bdev_qos_rw_queue_io(limit, io, bdev_get_io_size_in_byte(io));
2717 : }
2718 :
2719 : static void
2720 2 : bdev_qos_rw_bps_rewind_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2721 : {
2722 2 : bdev_qos_rw_rewind_io(limit, io, bdev_get_io_size_in_byte(io));
2723 2 : }
2724 :
2725 : static bool
2726 19 : bdev_qos_r_bps_queue(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2727 : {
2728 19 : if (bdev_is_read_io(io) == false) {
2729 1 : return false;
2730 : }
2731 :
2732 18 : return bdev_qos_rw_bps_queue(limit, io);
2733 : }
2734 :
2735 : static void
2736 0 : bdev_qos_r_bps_rewind_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2737 : {
2738 0 : if (bdev_is_read_io(io) != false) {
2739 0 : bdev_qos_rw_rewind_io(limit, io, bdev_get_io_size_in_byte(io));
2740 : }
2741 0 : }
2742 :
2743 : static bool
2744 14 : bdev_qos_w_bps_queue(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2745 : {
2746 14 : if (bdev_is_read_io(io) == true) {
2747 12 : return false;
2748 : }
2749 :
2750 2 : return bdev_qos_rw_bps_queue(limit, io);
2751 : }
2752 :
2753 : static void
2754 0 : bdev_qos_w_bps_rewind_quota(struct spdk_bdev_qos_limit *limit, struct spdk_bdev_io *io)
2755 : {
2756 0 : if (bdev_is_read_io(io) != true) {
2757 0 : bdev_qos_rw_rewind_io(limit, io, bdev_get_io_size_in_byte(io));
2758 : }
2759 0 : }
2760 :
2761 : static void
2762 10 : bdev_qos_set_ops(struct spdk_bdev_qos *qos)
2763 : {
2764 : int i;
2765 :
2766 50 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2767 40 : if (qos->rate_limits[i].limit == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
2768 15 : qos->rate_limits[i].queue_io = NULL;
2769 15 : continue;
2770 : }
2771 :
2772 25 : switch (i) {
2773 9 : case SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT:
2774 9 : qos->rate_limits[i].queue_io = bdev_qos_rw_iops_queue;
2775 9 : qos->rate_limits[i].rewind_quota = bdev_qos_rw_iops_rewind_quota;
2776 9 : break;
2777 7 : case SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT:
2778 7 : qos->rate_limits[i].queue_io = bdev_qos_rw_bps_queue;
2779 7 : qos->rate_limits[i].rewind_quota = bdev_qos_rw_bps_rewind_quota;
2780 7 : break;
2781 5 : case SPDK_BDEV_QOS_R_BPS_RATE_LIMIT:
2782 5 : qos->rate_limits[i].queue_io = bdev_qos_r_bps_queue;
2783 5 : qos->rate_limits[i].rewind_quota = bdev_qos_r_bps_rewind_quota;
2784 5 : break;
2785 4 : case SPDK_BDEV_QOS_W_BPS_RATE_LIMIT:
2786 4 : qos->rate_limits[i].queue_io = bdev_qos_w_bps_queue;
2787 4 : qos->rate_limits[i].rewind_quota = bdev_qos_w_bps_rewind_quota;
2788 4 : break;
2789 0 : default:
2790 0 : break;
2791 : }
2792 : }
2793 10 : }
2794 :
2795 : static void
2796 6 : _bdev_io_complete_in_submit(struct spdk_bdev_channel *bdev_ch,
2797 : struct spdk_bdev_io *bdev_io,
2798 : enum spdk_bdev_io_status status)
2799 : {
2800 6 : bdev_io->internal.f.in_submit_request = true;
2801 6 : bdev_io_increment_outstanding(bdev_ch, bdev_ch->shared_resource);
2802 6 : spdk_bdev_io_complete(bdev_io, status);
2803 6 : bdev_io->internal.f.in_submit_request = false;
2804 6 : }
2805 :
2806 : static inline void
2807 574 : bdev_io_do_submit(struct spdk_bdev_channel *bdev_ch, struct spdk_bdev_io *bdev_io)
2808 : {
2809 574 : struct spdk_bdev *bdev = bdev_io->bdev;
2810 574 : struct spdk_io_channel *ch = bdev_ch->channel;
2811 574 : struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
2812 :
2813 574 : if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT)) {
2814 16 : struct spdk_bdev_mgmt_channel *mgmt_channel = shared_resource->mgmt_ch;
2815 16 : struct spdk_bdev_io *bio_to_abort = bdev_io->u.abort.bio_to_abort;
2816 :
2817 32 : if (bdev_abort_queued_io(&shared_resource->nomem_io, bio_to_abort) ||
2818 16 : bdev_abort_buf_io(mgmt_channel, bio_to_abort)) {
2819 0 : _bdev_io_complete_in_submit(bdev_ch, bdev_io,
2820 : SPDK_BDEV_IO_STATUS_SUCCESS);
2821 0 : return;
2822 : }
2823 : }
2824 :
2825 574 : if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE &&
2826 : bdev_io->bdev->split_on_write_unit &&
2827 : bdev_io->u.bdev.num_blocks < bdev_io->bdev->write_unit_size)) {
2828 4 : SPDK_ERRLOG("IO num_blocks %lu does not match the write_unit_size %u\n",
2829 : bdev_io->u.bdev.num_blocks, bdev_io->bdev->write_unit_size);
2830 4 : _bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
2831 4 : return;
2832 : }
2833 :
2834 570 : if (spdk_likely(TAILQ_EMPTY(&shared_resource->nomem_io))) {
2835 527 : bdev_io_increment_outstanding(bdev_ch, shared_resource);
2836 527 : bdev_io->internal.f.in_submit_request = true;
2837 527 : bdev_submit_request(bdev, ch, bdev_io);
2838 527 : bdev_io->internal.f.in_submit_request = false;
2839 : } else {
2840 43 : bdev_queue_nomem_io_tail(shared_resource, bdev_io, BDEV_IO_RETRY_STATE_SUBMIT);
2841 43 : if (shared_resource->nomem_threshold == 0 && shared_resource->io_outstanding == 0) {
2842 : /* Special case when we have nomem IOs and no outstanding IOs which completions
2843 : * could trigger retry of queued IOs */
2844 0 : bdev_shared_ch_retry_io(shared_resource);
2845 : }
2846 : }
2847 : }
2848 :
2849 : static bool
2850 25 : bdev_qos_queue_io(struct spdk_bdev_qos *qos, struct spdk_bdev_io *bdev_io)
2851 : {
2852 : int i;
2853 :
2854 25 : if (bdev_qos_io_to_limit(bdev_io) == true) {
2855 100 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
2856 82 : if (!qos->rate_limits[i].queue_io) {
2857 5 : continue;
2858 : }
2859 :
2860 77 : if (qos->rate_limits[i].queue_io(&qos->rate_limits[i],
2861 : bdev_io) == true) {
2862 10 : for (i -= 1; i >= 0 ; i--) {
2863 5 : if (!qos->rate_limits[i].queue_io) {
2864 0 : continue;
2865 : }
2866 :
2867 5 : qos->rate_limits[i].rewind_quota(&qos->rate_limits[i], bdev_io);
2868 : }
2869 5 : return true;
2870 : }
2871 : }
2872 : }
2873 :
2874 20 : return false;
2875 : }
2876 :
2877 : static int
2878 27 : bdev_qos_io_submit(struct spdk_bdev_channel *ch, struct spdk_bdev_qos *qos)
2879 : {
2880 27 : struct spdk_bdev_io *bdev_io = NULL, *tmp = NULL;
2881 27 : int submitted_ios = 0;
2882 :
2883 52 : TAILQ_FOREACH_SAFE(bdev_io, &ch->qos_queued_io, internal.link, tmp) {
2884 25 : if (!bdev_qos_queue_io(qos, bdev_io)) {
2885 20 : TAILQ_REMOVE(&ch->qos_queued_io, bdev_io, internal.link);
2886 20 : bdev_io_do_submit(ch, bdev_io);
2887 :
2888 20 : submitted_ios++;
2889 : }
2890 : }
2891 :
2892 27 : return submitted_ios;
2893 : }
2894 :
2895 : static void
2896 2 : bdev_queue_io_wait_with_cb(struct spdk_bdev_io *bdev_io, spdk_bdev_io_wait_cb cb_fn)
2897 : {
2898 : int rc;
2899 :
2900 2 : bdev_io->internal.waitq_entry.bdev = bdev_io->bdev;
2901 2 : bdev_io->internal.waitq_entry.cb_fn = cb_fn;
2902 2 : bdev_io->internal.waitq_entry.cb_arg = bdev_io;
2903 2 : rc = spdk_bdev_queue_io_wait(bdev_io->bdev, spdk_io_channel_from_ctx(bdev_io->internal.ch),
2904 : &bdev_io->internal.waitq_entry);
2905 2 : if (rc != 0) {
2906 0 : SPDK_ERRLOG("Queue IO failed, rc=%d\n", rc);
2907 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
2908 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
2909 : }
2910 2 : }
2911 :
2912 : static bool
2913 621 : bdev_rw_should_split(struct spdk_bdev_io *bdev_io)
2914 : {
2915 : uint32_t io_boundary;
2916 621 : struct spdk_bdev *bdev = bdev_io->bdev;
2917 621 : uint32_t max_segment_size = bdev->max_segment_size;
2918 621 : uint32_t max_size = bdev->max_rw_size;
2919 621 : int max_segs = bdev->max_num_segments;
2920 :
2921 621 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE && bdev->split_on_write_unit) {
2922 24 : io_boundary = bdev->write_unit_size;
2923 597 : } else if (bdev->split_on_optimal_io_boundary) {
2924 168 : io_boundary = bdev->optimal_io_boundary;
2925 : } else {
2926 429 : io_boundary = 0;
2927 : }
2928 :
2929 621 : if (spdk_likely(!io_boundary && !max_segs && !max_segment_size && !max_size)) {
2930 243 : return false;
2931 : }
2932 :
2933 378 : if (io_boundary) {
2934 : uint64_t start_stripe, end_stripe;
2935 :
2936 192 : start_stripe = bdev_io->u.bdev.offset_blocks;
2937 192 : end_stripe = start_stripe + bdev_io->u.bdev.num_blocks - 1;
2938 : /* Avoid expensive div operations if possible. These spdk_u32 functions are very cheap. */
2939 192 : if (spdk_likely(spdk_u32_is_pow2(io_boundary))) {
2940 192 : start_stripe >>= spdk_u32log2(io_boundary);
2941 192 : end_stripe >>= spdk_u32log2(io_boundary);
2942 : } else {
2943 0 : start_stripe /= io_boundary;
2944 0 : end_stripe /= io_boundary;
2945 : }
2946 :
2947 192 : if (start_stripe != end_stripe) {
2948 75 : return true;
2949 : }
2950 : }
2951 :
2952 303 : if (max_segs) {
2953 150 : if (bdev_io->u.bdev.iovcnt > max_segs) {
2954 15 : return true;
2955 : }
2956 : }
2957 :
2958 288 : if (max_segment_size) {
2959 470 : for (int i = 0; i < bdev_io->u.bdev.iovcnt; i++) {
2960 346 : if (bdev_io->u.bdev.iovs[i].iov_len > max_segment_size) {
2961 12 : return true;
2962 : }
2963 : }
2964 : }
2965 :
2966 276 : if (max_size) {
2967 52 : if (bdev_io->u.bdev.num_blocks > max_size) {
2968 7 : return true;
2969 : }
2970 : }
2971 :
2972 269 : return false;
2973 : }
2974 :
2975 : static bool
2976 24 : bdev_unmap_should_split(struct spdk_bdev_io *bdev_io)
2977 : {
2978 : uint32_t num_unmap_segments;
2979 :
2980 24 : if (!bdev_io->bdev->max_unmap || !bdev_io->bdev->max_unmap_segments) {
2981 3 : return false;
2982 : }
2983 21 : num_unmap_segments = spdk_divide_round_up(bdev_io->u.bdev.num_blocks, bdev_io->bdev->max_unmap);
2984 21 : if (num_unmap_segments > bdev_io->bdev->max_unmap_segments) {
2985 4 : return true;
2986 : }
2987 :
2988 17 : return false;
2989 : }
2990 :
2991 : static bool
2992 37 : bdev_write_zeroes_should_split(struct spdk_bdev_io *bdev_io)
2993 : {
2994 37 : if (!bdev_io->bdev->max_write_zeroes) {
2995 4 : return false;
2996 : }
2997 :
2998 33 : if (bdev_io->u.bdev.num_blocks > bdev_io->bdev->max_write_zeroes) {
2999 10 : return true;
3000 : }
3001 :
3002 23 : return false;
3003 : }
3004 :
3005 : static bool
3006 30 : bdev_copy_should_split(struct spdk_bdev_io *bdev_io)
3007 : {
3008 30 : if (bdev_io->bdev->max_copy != 0 &&
3009 25 : bdev_io->u.bdev.num_blocks > bdev_io->bdev->max_copy) {
3010 6 : return true;
3011 : }
3012 :
3013 24 : return false;
3014 : }
3015 :
3016 : static bool
3017 794 : bdev_io_should_split(struct spdk_bdev_io *bdev_io)
3018 : {
3019 794 : switch (bdev_io->type) {
3020 621 : case SPDK_BDEV_IO_TYPE_READ:
3021 : case SPDK_BDEV_IO_TYPE_WRITE:
3022 621 : return bdev_rw_should_split(bdev_io);
3023 24 : case SPDK_BDEV_IO_TYPE_UNMAP:
3024 24 : return bdev_unmap_should_split(bdev_io);
3025 37 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3026 37 : return bdev_write_zeroes_should_split(bdev_io);
3027 30 : case SPDK_BDEV_IO_TYPE_COPY:
3028 30 : return bdev_copy_should_split(bdev_io);
3029 82 : default:
3030 82 : return false;
3031 : }
3032 : }
3033 :
3034 : static uint32_t
3035 249 : _to_next_boundary(uint64_t offset, uint32_t boundary)
3036 : {
3037 249 : return (boundary - (offset % boundary));
3038 : }
3039 :
3040 : static void bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
3041 :
3042 : static void _bdev_rw_split(void *_bdev_io);
3043 :
3044 : static void bdev_unmap_split(struct spdk_bdev_io *bdev_io);
3045 :
3046 : static void
3047 0 : _bdev_unmap_split(void *_bdev_io)
3048 : {
3049 0 : return bdev_unmap_split((struct spdk_bdev_io *)_bdev_io);
3050 : }
3051 :
3052 : static void bdev_write_zeroes_split(struct spdk_bdev_io *bdev_io);
3053 :
3054 : static void
3055 0 : _bdev_write_zeroes_split(void *_bdev_io)
3056 : {
3057 0 : return bdev_write_zeroes_split((struct spdk_bdev_io *)_bdev_io);
3058 : }
3059 :
3060 : static void bdev_copy_split(struct spdk_bdev_io *bdev_io);
3061 :
3062 : static void
3063 0 : _bdev_copy_split(void *_bdev_io)
3064 : {
3065 0 : return bdev_copy_split((struct spdk_bdev_io *)_bdev_io);
3066 : }
3067 :
3068 : static int
3069 305 : bdev_io_split_submit(struct spdk_bdev_io *bdev_io, struct iovec *iov, int iovcnt, void *md_buf,
3070 : uint64_t num_blocks, uint64_t *offset, uint64_t *remaining)
3071 : {
3072 : int rc;
3073 : uint64_t current_offset, current_remaining, current_src_offset;
3074 : spdk_bdev_io_wait_cb io_wait_fn;
3075 :
3076 305 : current_offset = *offset;
3077 305 : current_remaining = *remaining;
3078 :
3079 305 : assert(bdev_io->internal.f.split);
3080 :
3081 305 : bdev_io->internal.split.outstanding++;
3082 :
3083 305 : io_wait_fn = _bdev_rw_split;
3084 305 : switch (bdev_io->type) {
3085 196 : case SPDK_BDEV_IO_TYPE_READ:
3086 196 : assert(bdev_io->u.bdev.accel_sequence == NULL);
3087 588 : rc = bdev_readv_blocks_with_md(bdev_io->internal.desc,
3088 196 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
3089 : iov, iovcnt, md_buf, current_offset,
3090 : num_blocks,
3091 196 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain : NULL,
3092 196 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain_ctx : NULL,
3093 : NULL,
3094 : bdev_io->u.bdev.dif_check_flags,
3095 : bdev_io_split_done, bdev_io);
3096 196 : break;
3097 50 : case SPDK_BDEV_IO_TYPE_WRITE:
3098 50 : assert(bdev_io->u.bdev.accel_sequence == NULL);
3099 150 : rc = bdev_writev_blocks_with_md(bdev_io->internal.desc,
3100 50 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
3101 : iov, iovcnt, md_buf, current_offset,
3102 : num_blocks,
3103 50 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain : NULL,
3104 50 : bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain_ctx : NULL,
3105 : NULL,
3106 : bdev_io->u.bdev.dif_check_flags,
3107 : bdev_io->u.bdev.nvme_cdw12.raw,
3108 : bdev_io->u.bdev.nvme_cdw13.raw,
3109 : bdev_io_split_done, bdev_io);
3110 50 : break;
3111 17 : case SPDK_BDEV_IO_TYPE_UNMAP:
3112 17 : io_wait_fn = _bdev_unmap_split;
3113 17 : rc = spdk_bdev_unmap_blocks(bdev_io->internal.desc,
3114 17 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
3115 : current_offset, num_blocks,
3116 : bdev_io_split_done, bdev_io);
3117 17 : break;
3118 23 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3119 23 : io_wait_fn = _bdev_write_zeroes_split;
3120 23 : rc = spdk_bdev_write_zeroes_blocks(bdev_io->internal.desc,
3121 23 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
3122 : current_offset, num_blocks,
3123 : bdev_io_split_done, bdev_io);
3124 23 : break;
3125 19 : case SPDK_BDEV_IO_TYPE_COPY:
3126 19 : io_wait_fn = _bdev_copy_split;
3127 19 : current_src_offset = bdev_io->u.bdev.copy.src_offset_blocks +
3128 19 : (current_offset - bdev_io->u.bdev.offset_blocks);
3129 19 : rc = spdk_bdev_copy_blocks(bdev_io->internal.desc,
3130 19 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
3131 : current_offset, current_src_offset, num_blocks,
3132 : bdev_io_split_done, bdev_io);
3133 19 : break;
3134 0 : default:
3135 0 : assert(false);
3136 : rc = -EINVAL;
3137 : break;
3138 : }
3139 :
3140 305 : if (rc == 0) {
3141 301 : current_offset += num_blocks;
3142 301 : current_remaining -= num_blocks;
3143 301 : bdev_io->internal.split.current_offset_blocks = current_offset;
3144 301 : bdev_io->internal.split.remaining_num_blocks = current_remaining;
3145 301 : *offset = current_offset;
3146 301 : *remaining = current_remaining;
3147 : } else {
3148 4 : bdev_io->internal.split.outstanding--;
3149 4 : if (rc == -ENOMEM) {
3150 4 : if (bdev_io->internal.split.outstanding == 0) {
3151 : /* No I/O is outstanding. Hence we should wait here. */
3152 1 : bdev_queue_io_wait_with_cb(bdev_io, io_wait_fn);
3153 : }
3154 : } else {
3155 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3156 0 : if (bdev_io->internal.split.outstanding == 0) {
3157 0 : bdev_ch_remove_from_io_submitted(bdev_io);
3158 0 : spdk_trace_record(TRACE_BDEV_IO_DONE, bdev_io->internal.ch->trace_id,
3159 : 0, (uintptr_t)bdev_io, bdev_io->internal.caller_ctx,
3160 : bdev_io->internal.ch->queue_depth);
3161 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
3162 : }
3163 : }
3164 : }
3165 :
3166 305 : return rc;
3167 : }
3168 :
3169 : static void
3170 67 : _bdev_rw_split(void *_bdev_io)
3171 : {
3172 : struct iovec *parent_iov, *iov;
3173 67 : struct spdk_bdev_io *bdev_io = _bdev_io;
3174 67 : struct spdk_bdev *bdev = bdev_io->bdev;
3175 67 : uint64_t parent_offset, current_offset, remaining;
3176 : uint32_t parent_iov_offset, parent_iovcnt, parent_iovpos, child_iovcnt;
3177 : uint32_t to_next_boundary, to_next_boundary_bytes, to_last_block_bytes;
3178 : uint32_t iovcnt, iov_len, child_iovsize;
3179 67 : uint32_t blocklen = bdev->blocklen;
3180 : uint32_t io_boundary;
3181 67 : uint32_t max_segment_size = bdev->max_segment_size;
3182 67 : uint32_t max_child_iovcnt = bdev->max_num_segments;
3183 67 : uint32_t max_size = bdev->max_rw_size;
3184 67 : void *md_buf = NULL;
3185 : int rc;
3186 :
3187 67 : max_size = max_size ? max_size : UINT32_MAX;
3188 67 : max_segment_size = max_segment_size ? max_segment_size : UINT32_MAX;
3189 67 : max_child_iovcnt = max_child_iovcnt ? spdk_min(max_child_iovcnt, SPDK_BDEV_IO_NUM_CHILD_IOV) :
3190 : SPDK_BDEV_IO_NUM_CHILD_IOV;
3191 :
3192 67 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE && bdev->split_on_write_unit) {
3193 5 : io_boundary = bdev->write_unit_size;
3194 62 : } else if (bdev->split_on_optimal_io_boundary) {
3195 40 : io_boundary = bdev->optimal_io_boundary;
3196 : } else {
3197 22 : io_boundary = UINT32_MAX;
3198 : }
3199 :
3200 67 : assert(bdev_io->internal.f.split);
3201 :
3202 67 : remaining = bdev_io->internal.split.remaining_num_blocks;
3203 67 : current_offset = bdev_io->internal.split.current_offset_blocks;
3204 67 : parent_offset = bdev_io->u.bdev.offset_blocks;
3205 67 : parent_iov_offset = (current_offset - parent_offset) * blocklen;
3206 67 : parent_iovcnt = bdev_io->u.bdev.iovcnt;
3207 :
3208 420 : for (parent_iovpos = 0; parent_iovpos < parent_iovcnt; parent_iovpos++) {
3209 420 : parent_iov = &bdev_io->u.bdev.iovs[parent_iovpos];
3210 420 : if (parent_iov_offset < parent_iov->iov_len) {
3211 67 : break;
3212 : }
3213 353 : parent_iov_offset -= parent_iov->iov_len;
3214 : }
3215 :
3216 67 : child_iovcnt = 0;
3217 309 : while (remaining > 0 && parent_iovpos < parent_iovcnt &&
3218 : child_iovcnt < SPDK_BDEV_IO_NUM_CHILD_IOV) {
3219 249 : to_next_boundary = _to_next_boundary(current_offset, io_boundary);
3220 249 : to_next_boundary = spdk_min(remaining, to_next_boundary);
3221 249 : to_next_boundary = spdk_min(max_size, to_next_boundary);
3222 249 : to_next_boundary_bytes = to_next_boundary * blocklen;
3223 :
3224 249 : iov = &bdev_io->child_iov[child_iovcnt];
3225 249 : iovcnt = 0;
3226 :
3227 249 : if (bdev_io->u.bdev.md_buf) {
3228 48 : md_buf = (char *)bdev_io->u.bdev.md_buf +
3229 24 : (current_offset - parent_offset) * spdk_bdev_get_md_size(bdev);
3230 : }
3231 :
3232 249 : child_iovsize = spdk_min(SPDK_BDEV_IO_NUM_CHILD_IOV - child_iovcnt, max_child_iovcnt);
3233 974 : while (to_next_boundary_bytes > 0 && parent_iovpos < parent_iovcnt &&
3234 : iovcnt < child_iovsize) {
3235 725 : parent_iov = &bdev_io->u.bdev.iovs[parent_iovpos];
3236 725 : iov_len = parent_iov->iov_len - parent_iov_offset;
3237 :
3238 725 : iov_len = spdk_min(iov_len, max_segment_size);
3239 725 : iov_len = spdk_min(iov_len, to_next_boundary_bytes);
3240 725 : to_next_boundary_bytes -= iov_len;
3241 :
3242 725 : bdev_io->child_iov[child_iovcnt].iov_base = parent_iov->iov_base + parent_iov_offset;
3243 725 : bdev_io->child_iov[child_iovcnt].iov_len = iov_len;
3244 :
3245 725 : if (iov_len < parent_iov->iov_len - parent_iov_offset) {
3246 183 : parent_iov_offset += iov_len;
3247 : } else {
3248 542 : parent_iovpos++;
3249 542 : parent_iov_offset = 0;
3250 : }
3251 725 : child_iovcnt++;
3252 725 : iovcnt++;
3253 : }
3254 :
3255 249 : if (to_next_boundary_bytes > 0) {
3256 : /* We had to stop this child I/O early because we ran out of
3257 : * child_iov space or were limited by max_num_segments.
3258 : * Ensure the iovs to be aligned with block size and
3259 : * then adjust to_next_boundary before starting the
3260 : * child I/O.
3261 : */
3262 111 : assert(child_iovcnt == SPDK_BDEV_IO_NUM_CHILD_IOV ||
3263 : iovcnt == child_iovsize);
3264 111 : to_last_block_bytes = to_next_boundary_bytes % blocklen;
3265 111 : if (to_last_block_bytes != 0) {
3266 24 : uint32_t child_iovpos = child_iovcnt - 1;
3267 : /* don't decrease child_iovcnt when it equals to SPDK_BDEV_IO_NUM_CHILD_IOV
3268 : * so the loop will naturally end
3269 : */
3270 :
3271 24 : to_last_block_bytes = blocklen - to_last_block_bytes;
3272 24 : to_next_boundary_bytes += to_last_block_bytes;
3273 53 : while (to_last_block_bytes > 0 && iovcnt > 0) {
3274 32 : iov_len = spdk_min(to_last_block_bytes,
3275 : bdev_io->child_iov[child_iovpos].iov_len);
3276 32 : bdev_io->child_iov[child_iovpos].iov_len -= iov_len;
3277 32 : if (bdev_io->child_iov[child_iovpos].iov_len == 0) {
3278 15 : child_iovpos--;
3279 15 : if (--iovcnt == 0) {
3280 : /* If the child IO is less than a block size just return.
3281 : * If the first child IO of any split round is less than
3282 : * a block size, an error exit.
3283 : */
3284 3 : if (bdev_io->internal.split.outstanding == 0) {
3285 1 : SPDK_ERRLOG("The first child io was less than a block size\n");
3286 1 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3287 1 : bdev_ch_remove_from_io_submitted(bdev_io);
3288 1 : spdk_trace_record(TRACE_BDEV_IO_DONE, bdev_io->internal.ch->trace_id,
3289 : 0, (uintptr_t)bdev_io, bdev_io->internal.caller_ctx,
3290 : bdev_io->internal.ch->queue_depth);
3291 1 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
3292 : }
3293 :
3294 3 : return;
3295 : }
3296 : }
3297 :
3298 29 : to_last_block_bytes -= iov_len;
3299 :
3300 29 : if (parent_iov_offset == 0) {
3301 14 : parent_iovpos--;
3302 14 : parent_iov_offset = bdev_io->u.bdev.iovs[parent_iovpos].iov_len;
3303 : }
3304 29 : parent_iov_offset -= iov_len;
3305 : }
3306 :
3307 21 : assert(to_last_block_bytes == 0);
3308 : }
3309 108 : to_next_boundary -= to_next_boundary_bytes / blocklen;
3310 : }
3311 :
3312 246 : rc = bdev_io_split_submit(bdev_io, iov, iovcnt, md_buf, to_next_boundary,
3313 : ¤t_offset, &remaining);
3314 246 : if (spdk_unlikely(rc)) {
3315 4 : return;
3316 : }
3317 : }
3318 : }
3319 :
3320 : static void
3321 3 : bdev_unmap_split(struct spdk_bdev_io *bdev_io)
3322 : {
3323 3 : uint64_t offset, unmap_blocks, remaining, max_unmap_blocks;
3324 3 : uint32_t num_children_reqs = 0;
3325 : int rc;
3326 :
3327 3 : assert(bdev_io->internal.f.split);
3328 :
3329 3 : offset = bdev_io->internal.split.current_offset_blocks;
3330 3 : remaining = bdev_io->internal.split.remaining_num_blocks;
3331 3 : max_unmap_blocks = bdev_io->bdev->max_unmap * bdev_io->bdev->max_unmap_segments;
3332 :
3333 20 : while (remaining && (num_children_reqs < SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS)) {
3334 17 : unmap_blocks = spdk_min(remaining, max_unmap_blocks);
3335 :
3336 17 : rc = bdev_io_split_submit(bdev_io, NULL, 0, NULL, unmap_blocks,
3337 : &offset, &remaining);
3338 17 : if (spdk_likely(rc == 0)) {
3339 17 : num_children_reqs++;
3340 : } else {
3341 0 : return;
3342 : }
3343 : }
3344 : }
3345 :
3346 : static void
3347 6 : bdev_write_zeroes_split(struct spdk_bdev_io *bdev_io)
3348 : {
3349 6 : uint64_t offset, write_zeroes_blocks, remaining;
3350 6 : uint32_t num_children_reqs = 0;
3351 : int rc;
3352 :
3353 6 : assert(bdev_io->internal.f.split);
3354 :
3355 6 : offset = bdev_io->internal.split.current_offset_blocks;
3356 6 : remaining = bdev_io->internal.split.remaining_num_blocks;
3357 :
3358 29 : while (remaining && (num_children_reqs < SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS)) {
3359 23 : write_zeroes_blocks = spdk_min(remaining, bdev_io->bdev->max_write_zeroes);
3360 :
3361 23 : rc = bdev_io_split_submit(bdev_io, NULL, 0, NULL, write_zeroes_blocks,
3362 : &offset, &remaining);
3363 23 : if (spdk_likely(rc == 0)) {
3364 23 : num_children_reqs++;
3365 : } else {
3366 0 : return;
3367 : }
3368 : }
3369 : }
3370 :
3371 : static void
3372 4 : bdev_copy_split(struct spdk_bdev_io *bdev_io)
3373 : {
3374 4 : uint64_t offset, copy_blocks, remaining;
3375 4 : uint32_t num_children_reqs = 0;
3376 : int rc;
3377 :
3378 4 : assert(bdev_io->internal.f.split);
3379 :
3380 4 : offset = bdev_io->internal.split.current_offset_blocks;
3381 4 : remaining = bdev_io->internal.split.remaining_num_blocks;
3382 :
3383 4 : assert(bdev_io->bdev->max_copy != 0);
3384 23 : while (remaining && (num_children_reqs < SPDK_BDEV_MAX_CHILDREN_COPY_REQS)) {
3385 19 : copy_blocks = spdk_min(remaining, bdev_io->bdev->max_copy);
3386 :
3387 19 : rc = bdev_io_split_submit(bdev_io, NULL, 0, NULL, copy_blocks,
3388 : &offset, &remaining);
3389 19 : if (spdk_likely(rc == 0)) {
3390 19 : num_children_reqs++;
3391 : } else {
3392 0 : return;
3393 : }
3394 : }
3395 : }
3396 :
3397 : static void
3398 58 : parent_bdev_io_complete(void *ctx, int rc)
3399 : {
3400 58 : struct spdk_bdev_io *parent_io = ctx;
3401 :
3402 58 : if (rc) {
3403 0 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3404 : }
3405 :
3406 58 : parent_io->internal.cb(parent_io, parent_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS,
3407 : parent_io->internal.caller_ctx);
3408 58 : }
3409 :
3410 : static void
3411 0 : bdev_io_complete_parent_sequence_cb(void *ctx, int status)
3412 : {
3413 0 : struct spdk_bdev_io *bdev_io = ctx;
3414 :
3415 : /* u.bdev.accel_sequence should have already been cleared at this point */
3416 0 : assert(bdev_io->u.bdev.accel_sequence == NULL);
3417 0 : assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
3418 0 : bdev_io->internal.f.has_accel_sequence = false;
3419 :
3420 0 : if (spdk_unlikely(status != 0)) {
3421 0 : SPDK_ERRLOG("Failed to execute accel sequence, status=%d\n", status);
3422 : }
3423 :
3424 0 : parent_bdev_io_complete(bdev_io, status);
3425 0 : }
3426 :
3427 : static void
3428 301 : bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
3429 : {
3430 301 : struct spdk_bdev_io *parent_io = cb_arg;
3431 :
3432 301 : spdk_bdev_free_io(bdev_io);
3433 :
3434 301 : assert(parent_io->internal.f.split);
3435 :
3436 301 : if (!success) {
3437 21 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
3438 : /* If any child I/O failed, stop further splitting process. */
3439 21 : parent_io->internal.split.current_offset_blocks += parent_io->internal.split.remaining_num_blocks;
3440 21 : parent_io->internal.split.remaining_num_blocks = 0;
3441 : }
3442 301 : parent_io->internal.split.outstanding--;
3443 301 : if (parent_io->internal.split.outstanding != 0) {
3444 223 : return;
3445 : }
3446 :
3447 : /*
3448 : * Parent I/O finishes when all blocks are consumed.
3449 : */
3450 78 : if (parent_io->internal.split.remaining_num_blocks == 0) {
3451 58 : assert(parent_io->internal.cb != bdev_io_split_done);
3452 58 : bdev_ch_remove_from_io_submitted(parent_io);
3453 58 : spdk_trace_record(TRACE_BDEV_IO_DONE, parent_io->internal.ch->trace_id,
3454 : 0, (uintptr_t)parent_io, bdev_io->internal.caller_ctx,
3455 : parent_io->internal.ch->queue_depth);
3456 :
3457 58 : if (spdk_likely(parent_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
3458 48 : if (bdev_io_needs_sequence_exec(parent_io->internal.desc, parent_io)) {
3459 0 : bdev_io_exec_sequence(parent_io, bdev_io_complete_parent_sequence_cb);
3460 0 : return;
3461 48 : } else if (parent_io->internal.f.has_bounce_buf &&
3462 0 : !bdev_io_use_accel_sequence(bdev_io)) {
3463 : /* bdev IO will be completed in the callback */
3464 0 : _bdev_io_push_bounce_data_buffer(parent_io, parent_bdev_io_complete);
3465 0 : return;
3466 : }
3467 : }
3468 :
3469 58 : parent_bdev_io_complete(parent_io, 0);
3470 58 : return;
3471 : }
3472 :
3473 : /*
3474 : * Continue with the splitting process. This function will complete the parent I/O if the
3475 : * splitting is done.
3476 : */
3477 20 : switch (parent_io->type) {
3478 17 : case SPDK_BDEV_IO_TYPE_READ:
3479 : case SPDK_BDEV_IO_TYPE_WRITE:
3480 17 : _bdev_rw_split(parent_io);
3481 17 : break;
3482 1 : case SPDK_BDEV_IO_TYPE_UNMAP:
3483 1 : bdev_unmap_split(parent_io);
3484 1 : break;
3485 1 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3486 1 : bdev_write_zeroes_split(parent_io);
3487 1 : break;
3488 1 : case SPDK_BDEV_IO_TYPE_COPY:
3489 1 : bdev_copy_split(parent_io);
3490 1 : break;
3491 0 : default:
3492 0 : assert(false);
3493 : break;
3494 : }
3495 : }
3496 :
3497 : static void bdev_rw_split_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
3498 : bool success);
3499 :
3500 : static void
3501 59 : bdev_io_split(struct spdk_bdev_io *bdev_io)
3502 : {
3503 59 : assert(bdev_io_should_split(bdev_io));
3504 59 : assert(bdev_io->internal.f.split);
3505 :
3506 59 : bdev_io->internal.split.current_offset_blocks = bdev_io->u.bdev.offset_blocks;
3507 59 : bdev_io->internal.split.remaining_num_blocks = bdev_io->u.bdev.num_blocks;
3508 59 : bdev_io->internal.split.outstanding = 0;
3509 59 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
3510 :
3511 59 : switch (bdev_io->type) {
3512 49 : case SPDK_BDEV_IO_TYPE_READ:
3513 : case SPDK_BDEV_IO_TYPE_WRITE:
3514 49 : if (_is_buf_allocated(bdev_io->u.bdev.iovs)) {
3515 49 : _bdev_rw_split(bdev_io);
3516 : } else {
3517 0 : assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
3518 0 : spdk_bdev_io_get_buf(bdev_io, bdev_rw_split_get_buf_cb,
3519 0 : bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
3520 : }
3521 49 : break;
3522 2 : case SPDK_BDEV_IO_TYPE_UNMAP:
3523 2 : bdev_unmap_split(bdev_io);
3524 2 : break;
3525 5 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3526 5 : bdev_write_zeroes_split(bdev_io);
3527 5 : break;
3528 3 : case SPDK_BDEV_IO_TYPE_COPY:
3529 3 : bdev_copy_split(bdev_io);
3530 3 : break;
3531 0 : default:
3532 0 : assert(false);
3533 : break;
3534 : }
3535 59 : }
3536 :
3537 : static void
3538 0 : bdev_rw_split_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
3539 : {
3540 0 : if (!success) {
3541 0 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
3542 0 : return;
3543 : }
3544 :
3545 0 : _bdev_rw_split(bdev_io);
3546 : }
3547 :
3548 : static inline void
3549 579 : _bdev_io_submit(struct spdk_bdev_io *bdev_io)
3550 : {
3551 579 : struct spdk_bdev *bdev = bdev_io->bdev;
3552 579 : struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
3553 :
3554 579 : if (spdk_likely(bdev_ch->flags == 0)) {
3555 554 : bdev_io_do_submit(bdev_ch, bdev_io);
3556 554 : return;
3557 : }
3558 :
3559 25 : if (bdev_ch->flags & BDEV_CH_RESET_IN_PROGRESS) {
3560 2 : _bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
3561 23 : } else if (bdev_ch->flags & BDEV_CH_QOS_ENABLED) {
3562 25 : if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) &&
3563 2 : bdev_abort_queued_io(&bdev_ch->qos_queued_io, bdev_io->u.abort.bio_to_abort)) {
3564 0 : _bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
3565 : } else {
3566 23 : TAILQ_INSERT_TAIL(&bdev_ch->qos_queued_io, bdev_io, internal.link);
3567 23 : bdev_qos_io_submit(bdev_ch, bdev->internal.qos);
3568 : }
3569 : } else {
3570 0 : SPDK_ERRLOG("unknown bdev_ch flag %x found\n", bdev_ch->flags);
3571 0 : _bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
3572 : }
3573 : }
3574 :
3575 : bool bdev_lba_range_overlapped(struct lba_range *range1, struct lba_range *range2);
3576 :
3577 : bool
3578 23 : bdev_lba_range_overlapped(struct lba_range *range1, struct lba_range *range2)
3579 : {
3580 23 : if (range1->length == 0 || range2->length == 0) {
3581 1 : return false;
3582 : }
3583 :
3584 22 : if (range1->offset + range1->length <= range2->offset) {
3585 1 : return false;
3586 : }
3587 :
3588 21 : if (range2->offset + range2->length <= range1->offset) {
3589 3 : return false;
3590 : }
3591 :
3592 18 : return true;
3593 : }
3594 :
3595 : static bool
3596 11 : bdev_io_range_is_locked(struct spdk_bdev_io *bdev_io, struct lba_range *range)
3597 : {
3598 11 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
3599 11 : struct lba_range r;
3600 :
3601 11 : switch (bdev_io->type) {
3602 0 : case SPDK_BDEV_IO_TYPE_NVME_IO:
3603 : case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
3604 : /* Don't try to decode the NVMe command - just assume worst-case and that
3605 : * it overlaps a locked range.
3606 : */
3607 0 : return true;
3608 6 : case SPDK_BDEV_IO_TYPE_READ:
3609 6 : if (!range->quiesce) {
3610 4 : return false;
3611 : }
3612 : /* fallthrough */
3613 : case SPDK_BDEV_IO_TYPE_WRITE:
3614 : case SPDK_BDEV_IO_TYPE_UNMAP:
3615 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3616 : case SPDK_BDEV_IO_TYPE_ZCOPY:
3617 : case SPDK_BDEV_IO_TYPE_COPY:
3618 7 : r.offset = bdev_io->u.bdev.offset_blocks;
3619 7 : r.length = bdev_io->u.bdev.num_blocks;
3620 7 : if (!bdev_lba_range_overlapped(range, &r)) {
3621 : /* This I/O doesn't overlap the specified LBA range. */
3622 0 : return false;
3623 7 : } else if (range->owner_ch == ch && range->locked_ctx == bdev_io->internal.caller_ctx) {
3624 : /* This I/O overlaps, but the I/O is on the same channel that locked this
3625 : * range, and the caller_ctx is the same as the locked_ctx. This means
3626 : * that this I/O is associated with the lock, and is allowed to execute.
3627 : */
3628 2 : return false;
3629 : } else {
3630 5 : return true;
3631 : }
3632 0 : default:
3633 0 : return false;
3634 : }
3635 : }
3636 :
3637 : void
3638 639 : bdev_io_submit(struct spdk_bdev_io *bdev_io)
3639 : {
3640 639 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
3641 :
3642 639 : assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING);
3643 :
3644 639 : if (!TAILQ_EMPTY(&ch->locked_ranges)) {
3645 : struct lba_range *range;
3646 :
3647 13 : TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
3648 8 : if (bdev_io_range_is_locked(bdev_io, range)) {
3649 3 : TAILQ_INSERT_TAIL(&ch->io_locked, bdev_io, internal.ch_link);
3650 3 : return;
3651 : }
3652 : }
3653 : }
3654 :
3655 636 : bdev_ch_add_to_io_submitted(bdev_io);
3656 :
3657 636 : bdev_io->internal.submit_tsc = spdk_get_ticks();
3658 636 : spdk_trace_record_tsc(bdev_io->internal.submit_tsc, TRACE_BDEV_IO_START,
3659 : ch->trace_id, bdev_io->u.bdev.num_blocks,
3660 : (uintptr_t)bdev_io, (uint64_t)bdev_io->type, bdev_io->internal.caller_ctx,
3661 : bdev_io->u.bdev.offset_blocks, ch->queue_depth);
3662 :
3663 636 : if (bdev_io->internal.f.split) {
3664 59 : bdev_io_split(bdev_io);
3665 59 : return;
3666 : }
3667 :
3668 577 : _bdev_io_submit(bdev_io);
3669 : }
3670 :
3671 : static inline void
3672 4 : _bdev_io_ext_use_bounce_buffer(struct spdk_bdev_io *bdev_io)
3673 : {
3674 : /* bdev doesn't support memory domains, thereby buffers in this IO request can't
3675 : * be accessed directly. It is needed to allocate buffers before issuing IO operation.
3676 : * For write operation we need to pull buffers from memory domain before submitting IO.
3677 : * Once read operation completes, we need to use memory_domain push functionality to
3678 : * update data in original memory domain IO buffer
3679 : * This IO request will go through a regular IO flow, so clear memory domains pointers */
3680 4 : assert(bdev_io->internal.f.has_memory_domain);
3681 4 : bdev_io->u.bdev.memory_domain = NULL;
3682 4 : bdev_io->u.bdev.memory_domain_ctx = NULL;
3683 4 : _bdev_memory_domain_io_get_buf(bdev_io, _bdev_memory_domain_get_io_cb,
3684 4 : bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
3685 4 : }
3686 :
3687 : static inline void
3688 292 : _bdev_io_submit_ext(struct spdk_bdev_desc *desc, struct spdk_bdev_io *bdev_io)
3689 : {
3690 292 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
3691 292 : bool needs_exec = bdev_io_needs_sequence_exec(desc, bdev_io);
3692 :
3693 292 : if (spdk_unlikely(ch->flags & BDEV_CH_RESET_IN_PROGRESS)) {
3694 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED;
3695 0 : bdev_io_complete_unsubmitted(bdev_io);
3696 0 : return;
3697 : }
3698 :
3699 : /* We need to allocate bounce buffer if bdev doesn't support memory domains, or if it does
3700 : * support them, but we need to execute an accel sequence and the data buffer is from accel
3701 : * memory domain (to avoid doing a push/pull from that domain).
3702 : */
3703 292 : if (bdev_io_use_memory_domain(bdev_io)) {
3704 4 : if (!desc->memory_domains_supported ||
3705 0 : (needs_exec && bdev_io->internal.memory_domain == spdk_accel_get_memory_domain())) {
3706 4 : _bdev_io_ext_use_bounce_buffer(bdev_io);
3707 4 : return;
3708 : }
3709 : }
3710 :
3711 288 : if (needs_exec) {
3712 0 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
3713 0 : bdev_io_exec_sequence(bdev_io, bdev_io_submit_sequence_cb);
3714 0 : return;
3715 : }
3716 : /* For reads we'll execute the sequence after the data is read, so, for now, only
3717 : * clear out accel_sequence pointer and submit the IO */
3718 0 : assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
3719 0 : bdev_io->u.bdev.accel_sequence = NULL;
3720 : }
3721 :
3722 288 : bdev_io_submit(bdev_io);
3723 : }
3724 :
3725 : static void
3726 12 : bdev_io_submit_reset(struct spdk_bdev_io *bdev_io)
3727 : {
3728 12 : struct spdk_bdev *bdev = bdev_io->bdev;
3729 12 : struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
3730 12 : struct spdk_io_channel *ch = bdev_ch->channel;
3731 :
3732 12 : assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING);
3733 :
3734 12 : bdev_io->internal.f.in_submit_request = true;
3735 12 : bdev_submit_request(bdev, ch, bdev_io);
3736 12 : bdev_io->internal.f.in_submit_request = false;
3737 12 : }
3738 :
3739 : void
3740 693 : bdev_io_init(struct spdk_bdev_io *bdev_io,
3741 : struct spdk_bdev *bdev, void *cb_arg,
3742 : spdk_bdev_io_completion_cb cb)
3743 : {
3744 693 : bdev_io->bdev = bdev;
3745 693 : bdev_io->internal.f.raw = 0;
3746 693 : bdev_io->internal.caller_ctx = cb_arg;
3747 693 : bdev_io->internal.cb = cb;
3748 693 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
3749 693 : bdev_io->internal.f.in_submit_request = false;
3750 693 : bdev_io->internal.error.nvme.cdw0 = 0;
3751 693 : bdev_io->num_retries = 0;
3752 693 : bdev_io->internal.get_buf_cb = NULL;
3753 693 : bdev_io->internal.get_aux_buf_cb = NULL;
3754 693 : bdev_io->internal.data_transfer_cpl = NULL;
3755 693 : bdev_io->internal.f.split = bdev_io_should_split(bdev_io);
3756 693 : }
3757 :
3758 : static bool
3759 534 : bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
3760 : {
3761 534 : return bdev->fn_table->io_type_supported(bdev->ctxt, io_type);
3762 : }
3763 :
3764 : bool
3765 176 : spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
3766 : {
3767 : bool supported;
3768 :
3769 176 : supported = bdev_io_type_supported(bdev, io_type);
3770 :
3771 176 : if (!supported) {
3772 7 : switch (io_type) {
3773 0 : case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
3774 : /* The bdev layer will emulate write zeroes as long as write is supported. */
3775 0 : supported = bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE);
3776 0 : break;
3777 7 : default:
3778 7 : break;
3779 : }
3780 : }
3781 :
3782 176 : return supported;
3783 : }
3784 :
3785 : static const char *g_io_type_strings[] = {
3786 : [SPDK_BDEV_IO_TYPE_READ] = "read",
3787 : [SPDK_BDEV_IO_TYPE_WRITE] = "write",
3788 : [SPDK_BDEV_IO_TYPE_UNMAP] = "unmap",
3789 : [SPDK_BDEV_IO_TYPE_FLUSH] = "flush",
3790 : [SPDK_BDEV_IO_TYPE_RESET] = "reset",
3791 : [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = "nvme_admin",
3792 : [SPDK_BDEV_IO_TYPE_NVME_IO] = "nvme_io",
3793 : [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = "nvme_io_md",
3794 : [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = "write_zeroes",
3795 : [SPDK_BDEV_IO_TYPE_ZCOPY] = "zcopy",
3796 : [SPDK_BDEV_IO_TYPE_GET_ZONE_INFO] = "get_zone_info",
3797 : [SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT] = "zone_management",
3798 : [SPDK_BDEV_IO_TYPE_ZONE_APPEND] = "zone_append",
3799 : [SPDK_BDEV_IO_TYPE_COMPARE] = "compare",
3800 : [SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE] = "compare_and_write",
3801 : [SPDK_BDEV_IO_TYPE_ABORT] = "abort",
3802 : [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = "seek_hole",
3803 : [SPDK_BDEV_IO_TYPE_SEEK_DATA] = "seek_data",
3804 : [SPDK_BDEV_IO_TYPE_COPY] = "copy",
3805 : [SPDK_BDEV_IO_TYPE_NVME_IOV_MD] = "nvme_iov_md",
3806 : };
3807 :
3808 : const char *
3809 0 : spdk_bdev_get_io_type_name(enum spdk_bdev_io_type io_type)
3810 : {
3811 0 : if (io_type <= SPDK_BDEV_IO_TYPE_INVALID || io_type >= SPDK_BDEV_NUM_IO_TYPES) {
3812 0 : return NULL;
3813 : }
3814 :
3815 0 : return g_io_type_strings[io_type];
3816 : }
3817 :
3818 : int
3819 0 : spdk_bdev_get_io_type(const char *io_type_string)
3820 : {
3821 : int i;
3822 :
3823 0 : for (i = SPDK_BDEV_IO_TYPE_READ; i < SPDK_BDEV_NUM_IO_TYPES; ++i) {
3824 0 : if (!strcmp(io_type_string, g_io_type_strings[i])) {
3825 0 : return i;
3826 : }
3827 : }
3828 :
3829 0 : return -1;
3830 : }
3831 :
3832 : uint64_t
3833 0 : spdk_bdev_io_get_submit_tsc(struct spdk_bdev_io *bdev_io)
3834 : {
3835 0 : return bdev_io->internal.submit_tsc;
3836 : }
3837 :
3838 : int
3839 0 : spdk_bdev_dump_info_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w)
3840 : {
3841 0 : if (bdev->fn_table->dump_info_json) {
3842 0 : return bdev->fn_table->dump_info_json(bdev->ctxt, w);
3843 : }
3844 :
3845 0 : return 0;
3846 : }
3847 :
3848 : static void
3849 10 : bdev_qos_update_max_quota_per_timeslice(struct spdk_bdev_qos *qos)
3850 : {
3851 10 : uint32_t max_per_timeslice = 0;
3852 : int i;
3853 :
3854 50 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3855 40 : if (qos->rate_limits[i].limit == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
3856 15 : qos->rate_limits[i].max_per_timeslice = 0;
3857 15 : continue;
3858 : }
3859 :
3860 25 : max_per_timeslice = qos->rate_limits[i].limit *
3861 25 : SPDK_BDEV_QOS_TIMESLICE_IN_USEC / SPDK_SEC_TO_USEC;
3862 :
3863 25 : qos->rate_limits[i].max_per_timeslice = spdk_max(max_per_timeslice,
3864 : qos->rate_limits[i].min_per_timeslice);
3865 :
3866 25 : __atomic_store_n(&qos->rate_limits[i].remaining_this_timeslice,
3867 25 : qos->rate_limits[i].max_per_timeslice, __ATOMIC_RELEASE);
3868 : }
3869 :
3870 10 : bdev_qos_set_ops(qos);
3871 10 : }
3872 :
3873 : static void
3874 4 : bdev_channel_submit_qos_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
3875 : struct spdk_io_channel *io_ch, void *ctx)
3876 : {
3877 4 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
3878 : int status;
3879 :
3880 4 : bdev_qos_io_submit(bdev_ch, bdev->internal.qos);
3881 :
3882 : /* if all IOs were sent then continue the iteration, otherwise - stop it */
3883 : /* TODO: channels round robing */
3884 4 : status = TAILQ_EMPTY(&bdev_ch->qos_queued_io) ? 0 : 1;
3885 :
3886 4 : spdk_bdev_for_each_channel_continue(i, status);
3887 4 : }
3888 :
3889 :
3890 : static void
3891 2 : bdev_channel_submit_qos_io_done(struct spdk_bdev *bdev, void *ctx, int status)
3892 : {
3893 :
3894 2 : }
3895 :
3896 : static int
3897 3 : bdev_channel_poll_qos(void *arg)
3898 : {
3899 3 : struct spdk_bdev *bdev = arg;
3900 3 : struct spdk_bdev_qos *qos = bdev->internal.qos;
3901 3 : uint64_t now = spdk_get_ticks();
3902 : int i;
3903 : int64_t remaining_last_timeslice;
3904 :
3905 3 : if (spdk_unlikely(qos->thread == NULL)) {
3906 : /* Old QoS was unbound to remove and new QoS is not enabled yet. */
3907 1 : return SPDK_POLLER_IDLE;
3908 : }
3909 :
3910 2 : if (now < (qos->last_timeslice + qos->timeslice_size)) {
3911 : /* We received our callback earlier than expected - return
3912 : * immediately and wait to do accounting until at least one
3913 : * timeslice has actually expired. This should never happen
3914 : * with a well-behaved timer implementation.
3915 : */
3916 0 : return SPDK_POLLER_IDLE;
3917 : }
3918 :
3919 : /* Reset for next round of rate limiting */
3920 10 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3921 : /* We may have allowed the IOs or bytes to slightly overrun in the last
3922 : * timeslice. remaining_this_timeslice is signed, so if it's negative
3923 : * here, we'll account for the overrun so that the next timeslice will
3924 : * be appropriately reduced.
3925 : */
3926 8 : remaining_last_timeslice = __atomic_exchange_n(&qos->rate_limits[i].remaining_this_timeslice,
3927 : 0, __ATOMIC_RELAXED);
3928 8 : if (remaining_last_timeslice < 0) {
3929 : /* There could be a race condition here as both bdev_qos_rw_queue_io() and bdev_channel_poll_qos()
3930 : * potentially use 2 atomic ops each, so they can intertwine.
3931 : * This race can potentially cause the limits to be a little fuzzy but won't cause any real damage.
3932 : */
3933 0 : __atomic_store_n(&qos->rate_limits[i].remaining_this_timeslice,
3934 : remaining_last_timeslice, __ATOMIC_RELAXED);
3935 : }
3936 : }
3937 :
3938 4 : while (now >= (qos->last_timeslice + qos->timeslice_size)) {
3939 2 : qos->last_timeslice += qos->timeslice_size;
3940 10 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
3941 8 : __atomic_add_fetch(&qos->rate_limits[i].remaining_this_timeslice,
3942 8 : qos->rate_limits[i].max_per_timeslice, __ATOMIC_RELAXED);
3943 : }
3944 : }
3945 :
3946 2 : spdk_bdev_for_each_channel(bdev, bdev_channel_submit_qos_io, qos,
3947 : bdev_channel_submit_qos_io_done);
3948 :
3949 2 : return SPDK_POLLER_BUSY;
3950 : }
3951 :
3952 : static void
3953 75 : bdev_channel_destroy_resource(struct spdk_bdev_channel *ch)
3954 : {
3955 : struct spdk_bdev_shared_resource *shared_resource;
3956 : struct lba_range *range;
3957 :
3958 75 : bdev_free_io_stat(ch->stat);
3959 : #ifdef SPDK_CONFIG_VTUNE
3960 : bdev_free_io_stat(ch->prev_stat);
3961 : #endif
3962 :
3963 75 : while (!TAILQ_EMPTY(&ch->locked_ranges)) {
3964 0 : range = TAILQ_FIRST(&ch->locked_ranges);
3965 0 : TAILQ_REMOVE(&ch->locked_ranges, range, tailq);
3966 0 : free(range);
3967 : }
3968 :
3969 75 : spdk_put_io_channel(ch->channel);
3970 75 : spdk_put_io_channel(ch->accel_channel);
3971 :
3972 75 : shared_resource = ch->shared_resource;
3973 :
3974 75 : assert(TAILQ_EMPTY(&ch->io_locked));
3975 75 : assert(TAILQ_EMPTY(&ch->io_submitted));
3976 75 : assert(TAILQ_EMPTY(&ch->io_accel_exec));
3977 75 : assert(TAILQ_EMPTY(&ch->io_memory_domain));
3978 75 : assert(ch->io_outstanding == 0);
3979 75 : assert(shared_resource->ref > 0);
3980 75 : shared_resource->ref--;
3981 75 : if (shared_resource->ref == 0) {
3982 74 : assert(shared_resource->io_outstanding == 0);
3983 74 : TAILQ_REMOVE(&shared_resource->mgmt_ch->shared_resources, shared_resource, link);
3984 74 : spdk_put_io_channel(spdk_io_channel_from_ctx(shared_resource->mgmt_ch));
3985 74 : spdk_poller_unregister(&shared_resource->nomem_poller);
3986 74 : free(shared_resource);
3987 : }
3988 75 : }
3989 :
3990 : static void
3991 84 : bdev_enable_qos(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch)
3992 : {
3993 84 : struct spdk_bdev_qos *qos = bdev->internal.qos;
3994 : int i;
3995 :
3996 84 : assert(spdk_spin_held(&bdev->internal.spinlock));
3997 :
3998 : /* Rate limiting on this bdev enabled */
3999 84 : if (qos) {
4000 17 : if (qos->ch == NULL) {
4001 : struct spdk_io_channel *io_ch;
4002 :
4003 9 : SPDK_DEBUGLOG(bdev, "Selecting channel %p as QoS channel for bdev %s on thread %p\n", ch,
4004 : bdev->name, spdk_get_thread());
4005 :
4006 : /* No qos channel has been selected, so set one up */
4007 :
4008 : /* Take another reference to ch */
4009 9 : io_ch = spdk_get_io_channel(__bdev_to_io_dev(bdev));
4010 9 : assert(io_ch != NULL);
4011 9 : qos->ch = ch;
4012 :
4013 9 : qos->thread = spdk_io_channel_get_thread(io_ch);
4014 :
4015 45 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
4016 36 : if (bdev_qos_is_iops_rate_limit(i) == true) {
4017 9 : qos->rate_limits[i].min_per_timeslice =
4018 : SPDK_BDEV_QOS_MIN_IO_PER_TIMESLICE;
4019 : } else {
4020 27 : qos->rate_limits[i].min_per_timeslice =
4021 : SPDK_BDEV_QOS_MIN_BYTE_PER_TIMESLICE;
4022 : }
4023 :
4024 36 : if (qos->rate_limits[i].limit == 0) {
4025 2 : qos->rate_limits[i].limit = SPDK_BDEV_QOS_LIMIT_NOT_DEFINED;
4026 : }
4027 : }
4028 9 : bdev_qos_update_max_quota_per_timeslice(qos);
4029 9 : qos->timeslice_size =
4030 9 : SPDK_BDEV_QOS_TIMESLICE_IN_USEC * spdk_get_ticks_hz() / SPDK_SEC_TO_USEC;
4031 9 : qos->last_timeslice = spdk_get_ticks();
4032 9 : qos->poller = SPDK_POLLER_REGISTER(bdev_channel_poll_qos,
4033 : bdev,
4034 : SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
4035 : }
4036 :
4037 17 : ch->flags |= BDEV_CH_QOS_ENABLED;
4038 : }
4039 84 : }
4040 :
4041 : struct poll_timeout_ctx {
4042 : struct spdk_bdev_desc *desc;
4043 : uint64_t timeout_in_sec;
4044 : spdk_bdev_io_timeout_cb cb_fn;
4045 : void *cb_arg;
4046 : };
4047 :
4048 : static void
4049 274 : bdev_desc_free(struct spdk_bdev_desc *desc)
4050 : {
4051 274 : spdk_spin_destroy(&desc->spinlock);
4052 274 : free(desc->media_events_buffer);
4053 274 : free(desc);
4054 274 : }
4055 :
4056 : static void
4057 8 : bdev_channel_poll_timeout_io_done(struct spdk_bdev *bdev, void *_ctx, int status)
4058 : {
4059 8 : struct poll_timeout_ctx *ctx = _ctx;
4060 8 : struct spdk_bdev_desc *desc = ctx->desc;
4061 :
4062 8 : free(ctx);
4063 :
4064 8 : spdk_spin_lock(&desc->spinlock);
4065 8 : desc->refs--;
4066 8 : if (desc->closed == true && desc->refs == 0) {
4067 1 : spdk_spin_unlock(&desc->spinlock);
4068 1 : bdev_desc_free(desc);
4069 1 : return;
4070 : }
4071 7 : spdk_spin_unlock(&desc->spinlock);
4072 : }
4073 :
4074 : static void
4075 13 : bdev_channel_poll_timeout_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
4076 : struct spdk_io_channel *io_ch, void *_ctx)
4077 : {
4078 13 : struct poll_timeout_ctx *ctx = _ctx;
4079 13 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
4080 13 : struct spdk_bdev_desc *desc = ctx->desc;
4081 : struct spdk_bdev_io *bdev_io;
4082 : uint64_t now;
4083 :
4084 13 : spdk_spin_lock(&desc->spinlock);
4085 13 : if (desc->closed == true) {
4086 1 : spdk_spin_unlock(&desc->spinlock);
4087 1 : spdk_bdev_for_each_channel_continue(i, -1);
4088 1 : return;
4089 : }
4090 12 : spdk_spin_unlock(&desc->spinlock);
4091 :
4092 12 : now = spdk_get_ticks();
4093 22 : TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) {
4094 : /* Exclude any I/O that are generated via splitting. */
4095 15 : if (bdev_io->internal.cb == bdev_io_split_done) {
4096 3 : continue;
4097 : }
4098 :
4099 : /* Once we find an I/O that has not timed out, we can immediately
4100 : * exit the loop.
4101 : */
4102 24 : if (now < (bdev_io->internal.submit_tsc +
4103 12 : ctx->timeout_in_sec * spdk_get_ticks_hz())) {
4104 5 : goto end;
4105 : }
4106 :
4107 7 : if (bdev_io->internal.desc == desc) {
4108 7 : ctx->cb_fn(ctx->cb_arg, bdev_io);
4109 : }
4110 : }
4111 :
4112 7 : end:
4113 12 : spdk_bdev_for_each_channel_continue(i, 0);
4114 : }
4115 :
4116 : static int
4117 8 : bdev_poll_timeout_io(void *arg)
4118 : {
4119 8 : struct spdk_bdev_desc *desc = arg;
4120 8 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4121 : struct poll_timeout_ctx *ctx;
4122 :
4123 8 : ctx = calloc(1, sizeof(struct poll_timeout_ctx));
4124 8 : if (!ctx) {
4125 0 : SPDK_ERRLOG("failed to allocate memory\n");
4126 0 : return SPDK_POLLER_BUSY;
4127 : }
4128 8 : ctx->desc = desc;
4129 8 : ctx->cb_arg = desc->cb_arg;
4130 8 : ctx->cb_fn = desc->cb_fn;
4131 8 : ctx->timeout_in_sec = desc->timeout_in_sec;
4132 :
4133 : /* Take a ref on the descriptor in case it gets closed while we are checking
4134 : * all of the channels.
4135 : */
4136 8 : spdk_spin_lock(&desc->spinlock);
4137 8 : desc->refs++;
4138 8 : spdk_spin_unlock(&desc->spinlock);
4139 :
4140 8 : spdk_bdev_for_each_channel(bdev, bdev_channel_poll_timeout_io, ctx,
4141 : bdev_channel_poll_timeout_io_done);
4142 :
4143 8 : return SPDK_POLLER_BUSY;
4144 : }
4145 :
4146 : int
4147 5 : spdk_bdev_set_timeout(struct spdk_bdev_desc *desc, uint64_t timeout_in_sec,
4148 : spdk_bdev_io_timeout_cb cb_fn, void *cb_arg)
4149 : {
4150 5 : assert(desc->thread == spdk_get_thread());
4151 :
4152 5 : spdk_poller_unregister(&desc->io_timeout_poller);
4153 :
4154 5 : if (timeout_in_sec) {
4155 4 : assert(cb_fn != NULL);
4156 4 : desc->io_timeout_poller = SPDK_POLLER_REGISTER(bdev_poll_timeout_io,
4157 : desc,
4158 : SPDK_BDEV_IO_POLL_INTERVAL_IN_MSEC * SPDK_SEC_TO_USEC /
4159 : 1000);
4160 4 : if (desc->io_timeout_poller == NULL) {
4161 0 : SPDK_ERRLOG("can not register the desc timeout IO poller\n");
4162 0 : return -1;
4163 : }
4164 : }
4165 :
4166 5 : desc->cb_fn = cb_fn;
4167 5 : desc->cb_arg = cb_arg;
4168 5 : desc->timeout_in_sec = timeout_in_sec;
4169 :
4170 5 : return 0;
4171 : }
4172 :
4173 : static int
4174 77 : bdev_channel_create(void *io_device, void *ctx_buf)
4175 : {
4176 77 : struct spdk_bdev *bdev = __bdev_from_io_dev(io_device);
4177 77 : struct spdk_bdev_channel *ch = ctx_buf;
4178 : struct spdk_io_channel *mgmt_io_ch;
4179 : struct spdk_bdev_mgmt_channel *mgmt_ch;
4180 : struct spdk_bdev_shared_resource *shared_resource;
4181 : struct lba_range *range;
4182 :
4183 77 : ch->bdev = bdev;
4184 77 : ch->channel = bdev->fn_table->get_io_channel(bdev->ctxt);
4185 77 : if (!ch->channel) {
4186 2 : return -1;
4187 : }
4188 :
4189 75 : ch->accel_channel = spdk_accel_get_io_channel();
4190 75 : if (!ch->accel_channel) {
4191 0 : spdk_put_io_channel(ch->channel);
4192 0 : return -1;
4193 : }
4194 :
4195 75 : spdk_trace_record(TRACE_BDEV_IOCH_CREATE, bdev->internal.trace_id, 0, 0,
4196 : spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel)));
4197 :
4198 75 : assert(ch->histogram == NULL);
4199 75 : if (bdev->internal.histogram_enabled) {
4200 0 : ch->histogram = spdk_histogram_data_alloc();
4201 0 : if (ch->histogram == NULL) {
4202 0 : SPDK_ERRLOG("Could not allocate histogram\n");
4203 : }
4204 : }
4205 :
4206 75 : mgmt_io_ch = spdk_get_io_channel(&g_bdev_mgr);
4207 75 : if (!mgmt_io_ch) {
4208 0 : spdk_put_io_channel(ch->channel);
4209 0 : spdk_put_io_channel(ch->accel_channel);
4210 0 : return -1;
4211 : }
4212 :
4213 75 : mgmt_ch = __io_ch_to_bdev_mgmt_ch(mgmt_io_ch);
4214 77 : TAILQ_FOREACH(shared_resource, &mgmt_ch->shared_resources, link) {
4215 3 : if (shared_resource->shared_ch == ch->channel) {
4216 1 : spdk_put_io_channel(mgmt_io_ch);
4217 1 : shared_resource->ref++;
4218 1 : break;
4219 : }
4220 : }
4221 :
4222 75 : if (shared_resource == NULL) {
4223 74 : shared_resource = calloc(1, sizeof(*shared_resource));
4224 74 : if (shared_resource == NULL) {
4225 0 : spdk_put_io_channel(ch->channel);
4226 0 : spdk_put_io_channel(ch->accel_channel);
4227 0 : spdk_put_io_channel(mgmt_io_ch);
4228 0 : return -1;
4229 : }
4230 :
4231 74 : shared_resource->mgmt_ch = mgmt_ch;
4232 74 : shared_resource->io_outstanding = 0;
4233 74 : TAILQ_INIT(&shared_resource->nomem_io);
4234 74 : shared_resource->nomem_threshold = 0;
4235 74 : shared_resource->shared_ch = ch->channel;
4236 74 : shared_resource->ref = 1;
4237 74 : TAILQ_INSERT_TAIL(&mgmt_ch->shared_resources, shared_resource, link);
4238 : }
4239 :
4240 75 : ch->io_outstanding = 0;
4241 75 : TAILQ_INIT(&ch->locked_ranges);
4242 75 : TAILQ_INIT(&ch->qos_queued_io);
4243 75 : ch->flags = 0;
4244 75 : ch->trace_id = bdev->internal.trace_id;
4245 75 : ch->shared_resource = shared_resource;
4246 :
4247 75 : TAILQ_INIT(&ch->io_submitted);
4248 75 : TAILQ_INIT(&ch->io_locked);
4249 75 : TAILQ_INIT(&ch->io_accel_exec);
4250 75 : TAILQ_INIT(&ch->io_memory_domain);
4251 :
4252 75 : ch->stat = bdev_alloc_io_stat(false);
4253 75 : if (ch->stat == NULL) {
4254 0 : bdev_channel_destroy_resource(ch);
4255 0 : return -1;
4256 : }
4257 :
4258 75 : ch->stat->ticks_rate = spdk_get_ticks_hz();
4259 :
4260 : #ifdef SPDK_CONFIG_VTUNE
4261 : {
4262 : char *name;
4263 : __itt_init_ittlib(NULL, 0);
4264 : name = spdk_sprintf_alloc("spdk_bdev_%s_%p", ch->bdev->name, ch);
4265 : if (!name) {
4266 : bdev_channel_destroy_resource(ch);
4267 : return -1;
4268 : }
4269 : ch->handle = __itt_string_handle_create(name);
4270 : free(name);
4271 : ch->start_tsc = spdk_get_ticks();
4272 : ch->interval_tsc = spdk_get_ticks_hz() / 100;
4273 : ch->prev_stat = bdev_alloc_io_stat(false);
4274 : if (ch->prev_stat == NULL) {
4275 : bdev_channel_destroy_resource(ch);
4276 : return -1;
4277 : }
4278 : }
4279 : #endif
4280 :
4281 75 : spdk_spin_lock(&bdev->internal.spinlock);
4282 75 : bdev_enable_qos(bdev, ch);
4283 :
4284 76 : TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
4285 : struct lba_range *new_range;
4286 :
4287 1 : new_range = calloc(1, sizeof(*new_range));
4288 1 : if (new_range == NULL) {
4289 0 : spdk_spin_unlock(&bdev->internal.spinlock);
4290 0 : bdev_channel_destroy_resource(ch);
4291 0 : return -1;
4292 : }
4293 1 : new_range->length = range->length;
4294 1 : new_range->offset = range->offset;
4295 1 : new_range->locked_ctx = range->locked_ctx;
4296 1 : TAILQ_INSERT_TAIL(&ch->locked_ranges, new_range, tailq);
4297 : }
4298 :
4299 75 : spdk_spin_unlock(&bdev->internal.spinlock);
4300 :
4301 75 : return 0;
4302 : }
4303 :
4304 : static int
4305 0 : bdev_abort_all_buf_io_cb(struct spdk_iobuf_channel *ch, struct spdk_iobuf_entry *entry,
4306 : void *cb_ctx)
4307 : {
4308 0 : struct spdk_bdev_channel *bdev_ch = cb_ctx;
4309 : struct spdk_bdev_io *bdev_io;
4310 : uint64_t buf_len;
4311 :
4312 0 : bdev_io = SPDK_CONTAINEROF(entry, struct spdk_bdev_io, internal.iobuf);
4313 0 : if (bdev_io->internal.ch == bdev_ch) {
4314 0 : buf_len = bdev_io_get_max_buf_len(bdev_io, bdev_io->internal.buf.len);
4315 0 : spdk_iobuf_entry_abort(ch, entry, buf_len);
4316 0 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
4317 : }
4318 :
4319 0 : return 0;
4320 : }
4321 :
4322 : /*
4323 : * Abort I/O that are waiting on a data buffer.
4324 : */
4325 : static void
4326 98 : bdev_abort_all_buf_io(struct spdk_bdev_mgmt_channel *mgmt_ch, struct spdk_bdev_channel *ch)
4327 : {
4328 98 : spdk_iobuf_for_each_entry(&mgmt_ch->iobuf, bdev_abort_all_buf_io_cb, ch);
4329 98 : }
4330 :
4331 : /*
4332 : * Abort I/O that are queued waiting for submission. These types of I/O are
4333 : * linked using the spdk_bdev_io link TAILQ_ENTRY.
4334 : */
4335 : static void
4336 117 : bdev_abort_all_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_channel *ch)
4337 : {
4338 : struct spdk_bdev_io *bdev_io, *tmp;
4339 :
4340 156 : TAILQ_FOREACH_SAFE(bdev_io, queue, internal.link, tmp) {
4341 39 : if (bdev_io->internal.ch == ch) {
4342 39 : TAILQ_REMOVE(queue, bdev_io, internal.link);
4343 : /*
4344 : * spdk_bdev_io_complete() assumes that the completed I/O had
4345 : * been submitted to the bdev module. Since in this case it
4346 : * hadn't, bump io_outstanding to account for the decrement
4347 : * that spdk_bdev_io_complete() will do.
4348 : */
4349 39 : if (bdev_io->type != SPDK_BDEV_IO_TYPE_RESET) {
4350 39 : bdev_io_increment_outstanding(ch, ch->shared_resource);
4351 : }
4352 39 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED);
4353 : }
4354 : }
4355 117 : }
4356 :
4357 : static bool
4358 18 : bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort)
4359 : {
4360 : struct spdk_bdev_io *bdev_io;
4361 :
4362 18 : TAILQ_FOREACH(bdev_io, queue, internal.link) {
4363 0 : if (bdev_io == bio_to_abort) {
4364 0 : TAILQ_REMOVE(queue, bio_to_abort, internal.link);
4365 0 : spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
4366 0 : return true;
4367 : }
4368 : }
4369 :
4370 18 : return false;
4371 : }
4372 :
4373 : static int
4374 0 : bdev_abort_buf_io_cb(struct spdk_iobuf_channel *ch, struct spdk_iobuf_entry *entry, void *cb_ctx)
4375 : {
4376 0 : struct spdk_bdev_io *bdev_io, *bio_to_abort = cb_ctx;
4377 : uint64_t buf_len;
4378 :
4379 0 : bdev_io = SPDK_CONTAINEROF(entry, struct spdk_bdev_io, internal.iobuf);
4380 0 : if (bdev_io == bio_to_abort) {
4381 0 : buf_len = bdev_io_get_max_buf_len(bdev_io, bdev_io->internal.buf.len);
4382 0 : spdk_iobuf_entry_abort(ch, entry, buf_len);
4383 0 : spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
4384 0 : return 1;
4385 : }
4386 :
4387 0 : return 0;
4388 : }
4389 :
4390 : static bool
4391 16 : bdev_abort_buf_io(struct spdk_bdev_mgmt_channel *mgmt_ch, struct spdk_bdev_io *bio_to_abort)
4392 : {
4393 : int rc;
4394 :
4395 16 : rc = spdk_iobuf_for_each_entry(&mgmt_ch->iobuf, bdev_abort_buf_io_cb, bio_to_abort);
4396 16 : return rc == 1;
4397 : }
4398 :
4399 : static void
4400 7 : bdev_qos_channel_destroy(void *cb_arg)
4401 : {
4402 7 : struct spdk_bdev_qos *qos = cb_arg;
4403 :
4404 7 : spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
4405 7 : spdk_poller_unregister(&qos->poller);
4406 :
4407 7 : SPDK_DEBUGLOG(bdev, "Free QoS %p.\n", qos);
4408 :
4409 7 : free(qos);
4410 7 : }
4411 :
4412 : static int
4413 7 : bdev_qos_destroy(struct spdk_bdev *bdev)
4414 : {
4415 : int i;
4416 :
4417 : /*
4418 : * Cleanly shutting down the QoS poller is tricky, because
4419 : * during the asynchronous operation the user could open
4420 : * a new descriptor and create a new channel, spawning
4421 : * a new QoS poller.
4422 : *
4423 : * The strategy is to create a new QoS structure here and swap it
4424 : * in. The shutdown path then continues to refer to the old one
4425 : * until it completes and then releases it.
4426 : */
4427 : struct spdk_bdev_qos *new_qos, *old_qos;
4428 :
4429 7 : old_qos = bdev->internal.qos;
4430 :
4431 7 : new_qos = calloc(1, sizeof(*new_qos));
4432 7 : if (!new_qos) {
4433 0 : SPDK_ERRLOG("Unable to allocate memory to shut down QoS.\n");
4434 0 : return -ENOMEM;
4435 : }
4436 :
4437 : /* Copy the old QoS data into the newly allocated structure */
4438 7 : memcpy(new_qos, old_qos, sizeof(*new_qos));
4439 :
4440 : /* Zero out the key parts of the QoS structure */
4441 7 : new_qos->ch = NULL;
4442 7 : new_qos->thread = NULL;
4443 7 : new_qos->poller = NULL;
4444 : /*
4445 : * The limit member of spdk_bdev_qos_limit structure is not zeroed.
4446 : * It will be used later for the new QoS structure.
4447 : */
4448 35 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
4449 28 : new_qos->rate_limits[i].remaining_this_timeslice = 0;
4450 28 : new_qos->rate_limits[i].min_per_timeslice = 0;
4451 28 : new_qos->rate_limits[i].max_per_timeslice = 0;
4452 : }
4453 :
4454 7 : bdev->internal.qos = new_qos;
4455 :
4456 7 : if (old_qos->thread == NULL) {
4457 0 : free(old_qos);
4458 : } else {
4459 7 : spdk_thread_send_msg(old_qos->thread, bdev_qos_channel_destroy, old_qos);
4460 : }
4461 :
4462 : /* It is safe to continue with destroying the bdev even though the QoS channel hasn't
4463 : * been destroyed yet. The destruction path will end up waiting for the final
4464 : * channel to be put before it releases resources. */
4465 :
4466 7 : return 0;
4467 : }
4468 :
4469 : void
4470 79 : spdk_bdev_add_io_stat(struct spdk_bdev_io_stat *total, struct spdk_bdev_io_stat *add)
4471 : {
4472 79 : total->bytes_read += add->bytes_read;
4473 79 : total->num_read_ops += add->num_read_ops;
4474 79 : total->bytes_written += add->bytes_written;
4475 79 : total->num_write_ops += add->num_write_ops;
4476 79 : total->bytes_unmapped += add->bytes_unmapped;
4477 79 : total->num_unmap_ops += add->num_unmap_ops;
4478 79 : total->bytes_copied += add->bytes_copied;
4479 79 : total->num_copy_ops += add->num_copy_ops;
4480 79 : total->read_latency_ticks += add->read_latency_ticks;
4481 79 : total->write_latency_ticks += add->write_latency_ticks;
4482 79 : total->unmap_latency_ticks += add->unmap_latency_ticks;
4483 79 : total->copy_latency_ticks += add->copy_latency_ticks;
4484 79 : if (total->max_read_latency_ticks < add->max_read_latency_ticks) {
4485 7 : total->max_read_latency_ticks = add->max_read_latency_ticks;
4486 : }
4487 79 : if (total->min_read_latency_ticks > add->min_read_latency_ticks) {
4488 39 : total->min_read_latency_ticks = add->min_read_latency_ticks;
4489 : }
4490 79 : if (total->max_write_latency_ticks < add->max_write_latency_ticks) {
4491 4 : total->max_write_latency_ticks = add->max_write_latency_ticks;
4492 : }
4493 79 : if (total->min_write_latency_ticks > add->min_write_latency_ticks) {
4494 24 : total->min_write_latency_ticks = add->min_write_latency_ticks;
4495 : }
4496 79 : if (total->max_unmap_latency_ticks < add->max_unmap_latency_ticks) {
4497 0 : total->max_unmap_latency_ticks = add->max_unmap_latency_ticks;
4498 : }
4499 79 : if (total->min_unmap_latency_ticks > add->min_unmap_latency_ticks) {
4500 3 : total->min_unmap_latency_ticks = add->min_unmap_latency_ticks;
4501 : }
4502 79 : if (total->max_copy_latency_ticks < add->max_copy_latency_ticks) {
4503 0 : total->max_copy_latency_ticks = add->max_copy_latency_ticks;
4504 : }
4505 79 : if (total->min_copy_latency_ticks > add->min_copy_latency_ticks) {
4506 4 : total->min_copy_latency_ticks = add->min_copy_latency_ticks;
4507 : }
4508 79 : }
4509 :
4510 : static void
4511 5 : bdev_get_io_stat(struct spdk_bdev_io_stat *to_stat, struct spdk_bdev_io_stat *from_stat)
4512 : {
4513 5 : memcpy(to_stat, from_stat, offsetof(struct spdk_bdev_io_stat, io_error));
4514 :
4515 5 : if (to_stat->io_error != NULL && from_stat->io_error != NULL) {
4516 0 : memcpy(to_stat->io_error, from_stat->io_error,
4517 : sizeof(struct spdk_bdev_io_error_stat));
4518 : }
4519 5 : }
4520 :
4521 : void
4522 214 : spdk_bdev_reset_io_stat(struct spdk_bdev_io_stat *stat, enum spdk_bdev_reset_stat_mode mode)
4523 : {
4524 214 : if (mode == SPDK_BDEV_RESET_STAT_NONE) {
4525 5 : return;
4526 : }
4527 :
4528 209 : stat->max_read_latency_ticks = 0;
4529 209 : stat->min_read_latency_ticks = UINT64_MAX;
4530 209 : stat->max_write_latency_ticks = 0;
4531 209 : stat->min_write_latency_ticks = UINT64_MAX;
4532 209 : stat->max_unmap_latency_ticks = 0;
4533 209 : stat->min_unmap_latency_ticks = UINT64_MAX;
4534 209 : stat->max_copy_latency_ticks = 0;
4535 209 : stat->min_copy_latency_ticks = UINT64_MAX;
4536 :
4537 209 : if (mode != SPDK_BDEV_RESET_STAT_ALL) {
4538 2 : return;
4539 : }
4540 :
4541 207 : stat->bytes_read = 0;
4542 207 : stat->num_read_ops = 0;
4543 207 : stat->bytes_written = 0;
4544 207 : stat->num_write_ops = 0;
4545 207 : stat->bytes_unmapped = 0;
4546 207 : stat->num_unmap_ops = 0;
4547 207 : stat->bytes_copied = 0;
4548 207 : stat->num_copy_ops = 0;
4549 207 : stat->read_latency_ticks = 0;
4550 207 : stat->write_latency_ticks = 0;
4551 207 : stat->unmap_latency_ticks = 0;
4552 207 : stat->copy_latency_ticks = 0;
4553 :
4554 207 : if (stat->io_error != NULL) {
4555 131 : memset(stat->io_error, 0, sizeof(struct spdk_bdev_io_error_stat));
4556 : }
4557 : }
4558 :
4559 : struct spdk_bdev_io_stat *
4560 205 : bdev_alloc_io_stat(bool io_error_stat)
4561 : {
4562 : struct spdk_bdev_io_stat *stat;
4563 :
4564 205 : stat = malloc(sizeof(struct spdk_bdev_io_stat));
4565 205 : if (stat == NULL) {
4566 0 : return NULL;
4567 : }
4568 :
4569 205 : if (io_error_stat) {
4570 130 : stat->io_error = malloc(sizeof(struct spdk_bdev_io_error_stat));
4571 130 : if (stat->io_error == NULL) {
4572 0 : free(stat);
4573 0 : return NULL;
4574 : }
4575 : } else {
4576 75 : stat->io_error = NULL;
4577 : }
4578 :
4579 205 : spdk_bdev_reset_io_stat(stat, SPDK_BDEV_RESET_STAT_ALL);
4580 :
4581 205 : return stat;
4582 : }
4583 :
4584 : void
4585 205 : bdev_free_io_stat(struct spdk_bdev_io_stat *stat)
4586 : {
4587 205 : if (stat != NULL) {
4588 205 : free(stat->io_error);
4589 205 : free(stat);
4590 : }
4591 205 : }
4592 :
4593 : void
4594 0 : spdk_bdev_dump_io_stat_json(struct spdk_bdev_io_stat *stat, struct spdk_json_write_ctx *w)
4595 : {
4596 : int i;
4597 :
4598 0 : spdk_json_write_named_uint64(w, "bytes_read", stat->bytes_read);
4599 0 : spdk_json_write_named_uint64(w, "num_read_ops", stat->num_read_ops);
4600 0 : spdk_json_write_named_uint64(w, "bytes_written", stat->bytes_written);
4601 0 : spdk_json_write_named_uint64(w, "num_write_ops", stat->num_write_ops);
4602 0 : spdk_json_write_named_uint64(w, "bytes_unmapped", stat->bytes_unmapped);
4603 0 : spdk_json_write_named_uint64(w, "num_unmap_ops", stat->num_unmap_ops);
4604 0 : spdk_json_write_named_uint64(w, "bytes_copied", stat->bytes_copied);
4605 0 : spdk_json_write_named_uint64(w, "num_copy_ops", stat->num_copy_ops);
4606 0 : spdk_json_write_named_uint64(w, "read_latency_ticks", stat->read_latency_ticks);
4607 0 : spdk_json_write_named_uint64(w, "max_read_latency_ticks", stat->max_read_latency_ticks);
4608 0 : spdk_json_write_named_uint64(w, "min_read_latency_ticks",
4609 0 : stat->min_read_latency_ticks != UINT64_MAX ?
4610 : stat->min_read_latency_ticks : 0);
4611 0 : spdk_json_write_named_uint64(w, "write_latency_ticks", stat->write_latency_ticks);
4612 0 : spdk_json_write_named_uint64(w, "max_write_latency_ticks", stat->max_write_latency_ticks);
4613 0 : spdk_json_write_named_uint64(w, "min_write_latency_ticks",
4614 0 : stat->min_write_latency_ticks != UINT64_MAX ?
4615 : stat->min_write_latency_ticks : 0);
4616 0 : spdk_json_write_named_uint64(w, "unmap_latency_ticks", stat->unmap_latency_ticks);
4617 0 : spdk_json_write_named_uint64(w, "max_unmap_latency_ticks", stat->max_unmap_latency_ticks);
4618 0 : spdk_json_write_named_uint64(w, "min_unmap_latency_ticks",
4619 0 : stat->min_unmap_latency_ticks != UINT64_MAX ?
4620 : stat->min_unmap_latency_ticks : 0);
4621 0 : spdk_json_write_named_uint64(w, "copy_latency_ticks", stat->copy_latency_ticks);
4622 0 : spdk_json_write_named_uint64(w, "max_copy_latency_ticks", stat->max_copy_latency_ticks);
4623 0 : spdk_json_write_named_uint64(w, "min_copy_latency_ticks",
4624 0 : stat->min_copy_latency_ticks != UINT64_MAX ?
4625 : stat->min_copy_latency_ticks : 0);
4626 :
4627 0 : if (stat->io_error != NULL) {
4628 0 : spdk_json_write_named_object_begin(w, "io_error");
4629 0 : for (i = 0; i < -SPDK_MIN_BDEV_IO_STATUS; i++) {
4630 0 : if (stat->io_error->error_status[i] != 0) {
4631 0 : spdk_json_write_named_uint32(w, bdev_io_status_get_string(-(i + 1)),
4632 0 : stat->io_error->error_status[i]);
4633 : }
4634 : }
4635 0 : spdk_json_write_object_end(w);
4636 : }
4637 0 : }
4638 :
4639 : static void
4640 79 : bdev_channel_abort_queued_ios(struct spdk_bdev_channel *ch)
4641 : {
4642 79 : struct spdk_bdev_shared_resource *shared_resource = ch->shared_resource;
4643 79 : struct spdk_bdev_mgmt_channel *mgmt_ch = shared_resource->mgmt_ch;
4644 :
4645 79 : bdev_abort_all_queued_io(&shared_resource->nomem_io, ch);
4646 79 : bdev_abort_all_buf_io(mgmt_ch, ch);
4647 79 : }
4648 :
4649 : static void
4650 75 : bdev_channel_destroy(void *io_device, void *ctx_buf)
4651 : {
4652 75 : struct spdk_bdev_channel *ch = ctx_buf;
4653 :
4654 75 : SPDK_DEBUGLOG(bdev, "Destroying channel %p for bdev %s on thread %p\n", ch, ch->bdev->name,
4655 : spdk_get_thread());
4656 :
4657 75 : spdk_trace_record(TRACE_BDEV_IOCH_DESTROY, ch->bdev->internal.trace_id, 0, 0,
4658 : spdk_thread_get_id(spdk_io_channel_get_thread(ch->channel)));
4659 :
4660 : /* This channel is going away, so add its statistics into the bdev so that they don't get lost. */
4661 75 : spdk_spin_lock(&ch->bdev->internal.spinlock);
4662 75 : spdk_bdev_add_io_stat(ch->bdev->internal.stat, ch->stat);
4663 75 : spdk_spin_unlock(&ch->bdev->internal.spinlock);
4664 :
4665 75 : bdev_channel_abort_queued_ios(ch);
4666 :
4667 75 : if (ch->histogram) {
4668 0 : spdk_histogram_data_free(ch->histogram);
4669 : }
4670 :
4671 75 : bdev_channel_destroy_resource(ch);
4672 75 : }
4673 :
4674 : /*
4675 : * If the name already exists in the global bdev name tree, RB_INSERT() returns a pointer
4676 : * to it. Hence we do not have to call bdev_get_by_name() when using this function.
4677 : */
4678 : static int
4679 263 : bdev_name_add(struct spdk_bdev_name *bdev_name, struct spdk_bdev *bdev, const char *name)
4680 : {
4681 : struct spdk_bdev_name *tmp;
4682 :
4683 263 : bdev_name->name = strdup(name);
4684 263 : if (bdev_name->name == NULL) {
4685 0 : SPDK_ERRLOG("Unable to allocate bdev name\n");
4686 0 : return -ENOMEM;
4687 : }
4688 :
4689 263 : bdev_name->bdev = bdev;
4690 :
4691 263 : spdk_spin_lock(&g_bdev_mgr.spinlock);
4692 263 : tmp = RB_INSERT(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name);
4693 263 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
4694 :
4695 263 : if (tmp != NULL) {
4696 4 : SPDK_ERRLOG("Bdev name %s already exists\n", name);
4697 4 : free(bdev_name->name);
4698 4 : return -EEXIST;
4699 : }
4700 :
4701 259 : return 0;
4702 : }
4703 :
4704 : static void
4705 259 : bdev_name_del_unsafe(struct spdk_bdev_name *bdev_name)
4706 : {
4707 259 : RB_REMOVE(bdev_name_tree, &g_bdev_mgr.bdev_names, bdev_name);
4708 259 : free(bdev_name->name);
4709 259 : }
4710 :
4711 : static void
4712 5 : bdev_name_del(struct spdk_bdev_name *bdev_name)
4713 : {
4714 5 : spdk_spin_lock(&g_bdev_mgr.spinlock);
4715 5 : bdev_name_del_unsafe(bdev_name);
4716 5 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
4717 5 : }
4718 :
4719 : int
4720 136 : spdk_bdev_alias_add(struct spdk_bdev *bdev, const char *alias)
4721 : {
4722 : struct spdk_bdev_alias *tmp;
4723 : int ret;
4724 :
4725 136 : if (alias == NULL) {
4726 1 : SPDK_ERRLOG("Empty alias passed\n");
4727 1 : return -EINVAL;
4728 : }
4729 :
4730 135 : tmp = calloc(1, sizeof(*tmp));
4731 135 : if (tmp == NULL) {
4732 0 : SPDK_ERRLOG("Unable to allocate alias\n");
4733 0 : return -ENOMEM;
4734 : }
4735 :
4736 135 : ret = bdev_name_add(&tmp->alias, bdev, alias);
4737 135 : if (ret != 0) {
4738 4 : free(tmp);
4739 4 : return ret;
4740 : }
4741 :
4742 131 : TAILQ_INSERT_TAIL(&bdev->aliases, tmp, tailq);
4743 :
4744 131 : return 0;
4745 : }
4746 :
4747 : static int
4748 132 : bdev_alias_del(struct spdk_bdev *bdev, const char *alias,
4749 : void (*alias_del_fn)(struct spdk_bdev_name *n))
4750 : {
4751 : struct spdk_bdev_alias *tmp;
4752 :
4753 137 : TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
4754 133 : if (strcmp(alias, tmp->alias.name) == 0) {
4755 128 : TAILQ_REMOVE(&bdev->aliases, tmp, tailq);
4756 128 : alias_del_fn(&tmp->alias);
4757 128 : free(tmp);
4758 128 : return 0;
4759 : }
4760 : }
4761 :
4762 4 : return -ENOENT;
4763 : }
4764 :
4765 : int
4766 4 : spdk_bdev_alias_del(struct spdk_bdev *bdev, const char *alias)
4767 : {
4768 : int rc;
4769 :
4770 4 : rc = bdev_alias_del(bdev, alias, bdev_name_del);
4771 4 : if (rc == -ENOENT) {
4772 2 : SPDK_INFOLOG(bdev, "Alias %s does not exist\n", alias);
4773 : }
4774 :
4775 4 : return rc;
4776 : }
4777 :
4778 : void
4779 2 : spdk_bdev_alias_del_all(struct spdk_bdev *bdev)
4780 : {
4781 : struct spdk_bdev_alias *p, *tmp;
4782 :
4783 5 : TAILQ_FOREACH_SAFE(p, &bdev->aliases, tailq, tmp) {
4784 3 : TAILQ_REMOVE(&bdev->aliases, p, tailq);
4785 3 : bdev_name_del(&p->alias);
4786 3 : free(p);
4787 : }
4788 2 : }
4789 :
4790 : struct spdk_io_channel *
4791 77 : spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
4792 : {
4793 77 : return spdk_get_io_channel(__bdev_to_io_dev(spdk_bdev_desc_get_bdev(desc)));
4794 : }
4795 :
4796 : void *
4797 0 : spdk_bdev_get_module_ctx(struct spdk_bdev_desc *desc)
4798 : {
4799 0 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
4800 0 : void *ctx = NULL;
4801 :
4802 0 : if (bdev->fn_table->get_module_ctx) {
4803 0 : ctx = bdev->fn_table->get_module_ctx(bdev->ctxt);
4804 : }
4805 :
4806 0 : return ctx;
4807 : }
4808 :
4809 : const char *
4810 0 : spdk_bdev_get_module_name(const struct spdk_bdev *bdev)
4811 : {
4812 0 : return bdev->module->name;
4813 : }
4814 :
4815 : const char *
4816 259 : spdk_bdev_get_name(const struct spdk_bdev *bdev)
4817 : {
4818 259 : return bdev->name;
4819 : }
4820 :
4821 : const char *
4822 0 : spdk_bdev_get_product_name(const struct spdk_bdev *bdev)
4823 : {
4824 0 : return bdev->product_name;
4825 : }
4826 :
4827 : const struct spdk_bdev_aliases_list *
4828 0 : spdk_bdev_get_aliases(const struct spdk_bdev *bdev)
4829 : {
4830 0 : return &bdev->aliases;
4831 : }
4832 :
4833 : uint32_t
4834 5 : spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
4835 : {
4836 5 : return bdev->blocklen;
4837 : }
4838 :
4839 : uint32_t
4840 0 : spdk_bdev_get_write_unit_size(const struct spdk_bdev *bdev)
4841 : {
4842 0 : return bdev->write_unit_size;
4843 : }
4844 :
4845 : uint64_t
4846 0 : spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
4847 : {
4848 0 : return bdev->blockcnt;
4849 : }
4850 :
4851 : const char *
4852 0 : spdk_bdev_get_qos_rpc_type(enum spdk_bdev_qos_rate_limit_type type)
4853 : {
4854 0 : return qos_rpc_type[type];
4855 : }
4856 :
4857 : void
4858 0 : spdk_bdev_get_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
4859 : {
4860 : int i;
4861 :
4862 0 : memset(limits, 0, sizeof(*limits) * SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES);
4863 :
4864 0 : spdk_spin_lock(&bdev->internal.spinlock);
4865 0 : if (bdev->internal.qos) {
4866 0 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
4867 0 : if (bdev->internal.qos->rate_limits[i].limit !=
4868 : SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
4869 0 : limits[i] = bdev->internal.qos->rate_limits[i].limit;
4870 0 : if (bdev_qos_is_iops_rate_limit(i) == false) {
4871 : /* Change from Byte to Megabyte which is user visible. */
4872 0 : limits[i] = limits[i] / 1024 / 1024;
4873 : }
4874 : }
4875 : }
4876 : }
4877 0 : spdk_spin_unlock(&bdev->internal.spinlock);
4878 0 : }
4879 :
4880 : size_t
4881 320 : spdk_bdev_get_buf_align(const struct spdk_bdev *bdev)
4882 : {
4883 320 : return 1 << bdev->required_alignment;
4884 : }
4885 :
4886 : uint32_t
4887 0 : spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
4888 : {
4889 0 : return bdev->optimal_io_boundary;
4890 : }
4891 :
4892 : bool
4893 0 : spdk_bdev_has_write_cache(const struct spdk_bdev *bdev)
4894 : {
4895 0 : return bdev->write_cache;
4896 : }
4897 :
4898 : const struct spdk_uuid *
4899 0 : spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
4900 : {
4901 0 : return &bdev->uuid;
4902 : }
4903 :
4904 : uint16_t
4905 0 : spdk_bdev_get_acwu(const struct spdk_bdev *bdev)
4906 : {
4907 0 : return bdev->acwu;
4908 : }
4909 :
4910 : uint32_t
4911 29 : spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
4912 : {
4913 29 : return bdev->md_len;
4914 : }
4915 :
4916 : bool
4917 133 : spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
4918 : {
4919 133 : return (bdev->md_len != 0) && bdev->md_interleave;
4920 : }
4921 :
4922 : bool
4923 158 : spdk_bdev_is_md_separate(const struct spdk_bdev *bdev)
4924 : {
4925 158 : return (bdev->md_len != 0) && !bdev->md_interleave;
4926 : }
4927 :
4928 : bool
4929 0 : spdk_bdev_is_zoned(const struct spdk_bdev *bdev)
4930 : {
4931 0 : return bdev->zoned;
4932 : }
4933 :
4934 : uint32_t
4935 124 : spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
4936 : {
4937 124 : if (spdk_bdev_is_md_interleaved(bdev)) {
4938 0 : return bdev->blocklen - bdev->md_len;
4939 : } else {
4940 124 : return bdev->blocklen;
4941 : }
4942 : }
4943 :
4944 : uint32_t
4945 0 : spdk_bdev_get_physical_block_size(const struct spdk_bdev *bdev)
4946 : {
4947 0 : return bdev->phys_blocklen;
4948 : }
4949 :
4950 : static uint32_t
4951 9 : _bdev_get_block_size_with_md(const struct spdk_bdev *bdev)
4952 : {
4953 9 : if (!spdk_bdev_is_md_interleaved(bdev)) {
4954 6 : return bdev->blocklen + bdev->md_len;
4955 : } else {
4956 3 : return bdev->blocklen;
4957 : }
4958 : }
4959 :
4960 : /* We have to use the typedef in the function declaration to appease astyle. */
4961 : typedef enum spdk_dif_type spdk_dif_type_t;
4962 : typedef enum spdk_dif_pi_format spdk_dif_pi_format_t;
4963 :
4964 : spdk_dif_type_t
4965 0 : spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
4966 : {
4967 0 : if (bdev->md_len != 0) {
4968 0 : return bdev->dif_type;
4969 : } else {
4970 0 : return SPDK_DIF_DISABLE;
4971 : }
4972 : }
4973 :
4974 : spdk_dif_pi_format_t
4975 0 : spdk_bdev_get_dif_pi_format(const struct spdk_bdev *bdev)
4976 : {
4977 0 : return bdev->dif_pi_format;
4978 : }
4979 :
4980 : bool
4981 0 : spdk_bdev_is_dif_head_of_md(const struct spdk_bdev *bdev)
4982 : {
4983 0 : if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
4984 0 : return bdev->dif_is_head_of_md;
4985 : } else {
4986 0 : return false;
4987 : }
4988 : }
4989 :
4990 : bool
4991 0 : spdk_bdev_is_dif_check_enabled(const struct spdk_bdev *bdev,
4992 : enum spdk_dif_check_type check_type)
4993 : {
4994 0 : if (spdk_bdev_get_dif_type(bdev) == SPDK_DIF_DISABLE) {
4995 0 : return false;
4996 : }
4997 :
4998 0 : switch (check_type) {
4999 0 : case SPDK_DIF_CHECK_TYPE_REFTAG:
5000 0 : return (bdev->dif_check_flags & SPDK_DIF_FLAGS_REFTAG_CHECK) != 0;
5001 0 : case SPDK_DIF_CHECK_TYPE_APPTAG:
5002 0 : return (bdev->dif_check_flags & SPDK_DIF_FLAGS_APPTAG_CHECK) != 0;
5003 0 : case SPDK_DIF_CHECK_TYPE_GUARD:
5004 0 : return (bdev->dif_check_flags & SPDK_DIF_FLAGS_GUARD_CHECK) != 0;
5005 0 : default:
5006 0 : return false;
5007 : }
5008 : }
5009 :
5010 : static uint32_t
5011 3 : bdev_get_max_write(const struct spdk_bdev *bdev, uint64_t num_bytes)
5012 : {
5013 : uint64_t aligned_length, max_write_blocks;
5014 :
5015 3 : aligned_length = num_bytes - (spdk_bdev_get_buf_align(bdev) - 1);
5016 3 : max_write_blocks = aligned_length / _bdev_get_block_size_with_md(bdev);
5017 3 : max_write_blocks -= max_write_blocks % bdev->write_unit_size;
5018 :
5019 3 : return max_write_blocks;
5020 : }
5021 :
5022 : uint32_t
5023 1 : spdk_bdev_get_max_copy(const struct spdk_bdev *bdev)
5024 : {
5025 1 : return bdev->max_copy;
5026 : }
5027 :
5028 : uint64_t
5029 0 : spdk_bdev_get_qd(const struct spdk_bdev *bdev)
5030 : {
5031 0 : return bdev->internal.measured_queue_depth;
5032 : }
5033 :
5034 : uint64_t
5035 0 : spdk_bdev_get_qd_sampling_period(const struct spdk_bdev *bdev)
5036 : {
5037 0 : return bdev->internal.period;
5038 : }
5039 :
5040 : uint64_t
5041 0 : spdk_bdev_get_weighted_io_time(const struct spdk_bdev *bdev)
5042 : {
5043 0 : return bdev->internal.weighted_io_time;
5044 : }
5045 :
5046 : uint64_t
5047 0 : spdk_bdev_get_io_time(const struct spdk_bdev *bdev)
5048 : {
5049 0 : return bdev->internal.io_time;
5050 : }
5051 :
5052 0 : union spdk_bdev_nvme_ctratt spdk_bdev_get_nvme_ctratt(struct spdk_bdev *bdev)
5053 : {
5054 0 : return bdev->ctratt;
5055 : }
5056 :
5057 : uint32_t
5058 0 : spdk_bdev_get_nvme_nsid(struct spdk_bdev *bdev)
5059 : {
5060 0 : return bdev->nsid;
5061 : }
5062 :
5063 : static void bdev_update_qd_sampling_period(void *ctx);
5064 :
5065 : static void
5066 1 : _calculate_measured_qd_cpl(struct spdk_bdev *bdev, void *_ctx, int status)
5067 : {
5068 1 : bdev->internal.measured_queue_depth = bdev->internal.temporary_queue_depth;
5069 :
5070 1 : if (bdev->internal.measured_queue_depth) {
5071 0 : bdev->internal.io_time += bdev->internal.period;
5072 0 : bdev->internal.weighted_io_time += bdev->internal.period * bdev->internal.measured_queue_depth;
5073 : }
5074 :
5075 1 : bdev->internal.qd_poll_in_progress = false;
5076 :
5077 1 : bdev_update_qd_sampling_period(bdev);
5078 1 : }
5079 :
5080 : static void
5081 1 : _calculate_measured_qd(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
5082 : struct spdk_io_channel *io_ch, void *_ctx)
5083 : {
5084 1 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(io_ch);
5085 :
5086 1 : bdev->internal.temporary_queue_depth += ch->io_outstanding;
5087 1 : spdk_bdev_for_each_channel_continue(i, 0);
5088 1 : }
5089 :
5090 : static int
5091 1 : bdev_calculate_measured_queue_depth(void *ctx)
5092 : {
5093 1 : struct spdk_bdev *bdev = ctx;
5094 :
5095 1 : bdev->internal.qd_poll_in_progress = true;
5096 1 : bdev->internal.temporary_queue_depth = 0;
5097 1 : spdk_bdev_for_each_channel(bdev, _calculate_measured_qd, bdev, _calculate_measured_qd_cpl);
5098 1 : return SPDK_POLLER_BUSY;
5099 : }
5100 :
5101 : static void
5102 5 : bdev_update_qd_sampling_period(void *ctx)
5103 : {
5104 5 : struct spdk_bdev *bdev = ctx;
5105 :
5106 5 : if (bdev->internal.period == bdev->internal.new_period) {
5107 0 : return;
5108 : }
5109 :
5110 5 : if (bdev->internal.qd_poll_in_progress) {
5111 1 : return;
5112 : }
5113 :
5114 4 : bdev->internal.period = bdev->internal.new_period;
5115 :
5116 4 : spdk_poller_unregister(&bdev->internal.qd_poller);
5117 4 : if (bdev->internal.period != 0) {
5118 2 : bdev->internal.qd_poller = SPDK_POLLER_REGISTER(bdev_calculate_measured_queue_depth,
5119 : bdev, bdev->internal.period);
5120 : } else {
5121 2 : spdk_bdev_close(bdev->internal.qd_desc);
5122 2 : bdev->internal.qd_desc = NULL;
5123 : }
5124 : }
5125 :
5126 : static void
5127 0 : _tmp_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *ctx)
5128 : {
5129 0 : SPDK_NOTICELOG("Unexpected event type: %d\n", type);
5130 0 : }
5131 :
5132 : void
5133 133 : spdk_bdev_set_qd_sampling_period(struct spdk_bdev *bdev, uint64_t period)
5134 : {
5135 : int rc;
5136 :
5137 133 : if (bdev->internal.new_period == period) {
5138 127 : return;
5139 : }
5140 :
5141 6 : bdev->internal.new_period = period;
5142 :
5143 6 : if (bdev->internal.qd_desc != NULL) {
5144 4 : assert(bdev->internal.period != 0);
5145 :
5146 4 : spdk_thread_send_msg(bdev->internal.qd_desc->thread,
5147 : bdev_update_qd_sampling_period, bdev);
5148 4 : return;
5149 : }
5150 :
5151 2 : assert(bdev->internal.period == 0);
5152 :
5153 2 : rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), false, _tmp_bdev_event_cb,
5154 : NULL, &bdev->internal.qd_desc);
5155 2 : if (rc != 0) {
5156 0 : return;
5157 : }
5158 :
5159 2 : bdev->internal.period = period;
5160 2 : bdev->internal.qd_poller = SPDK_POLLER_REGISTER(bdev_calculate_measured_queue_depth,
5161 : bdev, period);
5162 : }
5163 :
5164 : struct bdev_get_current_qd_ctx {
5165 : uint64_t current_qd;
5166 : spdk_bdev_get_current_qd_cb cb_fn;
5167 : void *cb_arg;
5168 : };
5169 :
5170 : static void
5171 0 : bdev_get_current_qd_done(struct spdk_bdev *bdev, void *_ctx, int status)
5172 : {
5173 0 : struct bdev_get_current_qd_ctx *ctx = _ctx;
5174 :
5175 0 : ctx->cb_fn(bdev, ctx->current_qd, ctx->cb_arg, 0);
5176 :
5177 0 : free(ctx);
5178 0 : }
5179 :
5180 : static void
5181 0 : bdev_get_current_qd(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
5182 : struct spdk_io_channel *io_ch, void *_ctx)
5183 : {
5184 0 : struct bdev_get_current_qd_ctx *ctx = _ctx;
5185 0 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
5186 :
5187 0 : ctx->current_qd += bdev_ch->io_outstanding;
5188 :
5189 0 : spdk_bdev_for_each_channel_continue(i, 0);
5190 0 : }
5191 :
5192 : void
5193 0 : spdk_bdev_get_current_qd(struct spdk_bdev *bdev, spdk_bdev_get_current_qd_cb cb_fn,
5194 : void *cb_arg)
5195 : {
5196 : struct bdev_get_current_qd_ctx *ctx;
5197 :
5198 0 : assert(cb_fn != NULL);
5199 :
5200 0 : ctx = calloc(1, sizeof(*ctx));
5201 0 : if (ctx == NULL) {
5202 0 : cb_fn(bdev, 0, cb_arg, -ENOMEM);
5203 0 : return;
5204 : }
5205 :
5206 0 : ctx->cb_fn = cb_fn;
5207 0 : ctx->cb_arg = cb_arg;
5208 :
5209 0 : spdk_bdev_for_each_channel(bdev, bdev_get_current_qd, ctx, bdev_get_current_qd_done);
5210 : }
5211 :
5212 : static void
5213 25 : _event_notify(struct spdk_bdev_desc *desc, enum spdk_bdev_event_type type)
5214 : {
5215 25 : assert(desc->thread == spdk_get_thread());
5216 :
5217 25 : spdk_spin_lock(&desc->spinlock);
5218 25 : desc->refs--;
5219 25 : if (!desc->closed) {
5220 14 : spdk_spin_unlock(&desc->spinlock);
5221 14 : desc->callback.event_fn(type,
5222 : desc->bdev,
5223 : desc->callback.ctx);
5224 14 : return;
5225 11 : } else if (desc->refs == 0) {
5226 : /* This descriptor was closed after this event_notify message was sent.
5227 : * spdk_bdev_close() could not free the descriptor since this message was
5228 : * in flight, so we free it now using bdev_desc_free().
5229 : */
5230 10 : spdk_spin_unlock(&desc->spinlock);
5231 10 : bdev_desc_free(desc);
5232 10 : return;
5233 : }
5234 1 : spdk_spin_unlock(&desc->spinlock);
5235 : }
5236 :
5237 : static void
5238 25 : event_notify(struct spdk_bdev_desc *desc, spdk_msg_fn event_notify_fn)
5239 : {
5240 25 : spdk_spin_lock(&desc->spinlock);
5241 25 : desc->refs++;
5242 25 : spdk_thread_send_msg(desc->thread, event_notify_fn, desc);
5243 25 : spdk_spin_unlock(&desc->spinlock);
5244 25 : }
5245 :
5246 : static void
5247 6 : _resize_notify(void *ctx)
5248 : {
5249 6 : struct spdk_bdev_desc *desc = ctx;
5250 :
5251 6 : _event_notify(desc, SPDK_BDEV_EVENT_RESIZE);
5252 6 : }
5253 :
5254 : int
5255 11 : spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
5256 : {
5257 : struct spdk_bdev_desc *desc;
5258 : int ret;
5259 :
5260 11 : if (size == bdev->blockcnt) {
5261 0 : return 0;
5262 : }
5263 :
5264 11 : spdk_spin_lock(&bdev->internal.spinlock);
5265 :
5266 : /* bdev has open descriptors */
5267 11 : if (!TAILQ_EMPTY(&bdev->internal.open_descs) &&
5268 7 : bdev->blockcnt > size) {
5269 1 : ret = -EBUSY;
5270 : } else {
5271 10 : bdev->blockcnt = size;
5272 16 : TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
5273 6 : event_notify(desc, _resize_notify);
5274 : }
5275 10 : ret = 0;
5276 : }
5277 :
5278 11 : spdk_spin_unlock(&bdev->internal.spinlock);
5279 :
5280 11 : return ret;
5281 : }
5282 :
5283 : /*
5284 : * Convert I/O offset and length from bytes to blocks.
5285 : *
5286 : * Returns zero on success or non-zero if the byte parameters aren't divisible by the block size.
5287 : */
5288 : static uint64_t
5289 20 : bdev_bytes_to_blocks(struct spdk_bdev_desc *desc, uint64_t offset_bytes,
5290 : uint64_t *offset_blocks, uint64_t num_bytes, uint64_t *num_blocks)
5291 : {
5292 20 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5293 20 : uint32_t block_size = bdev->blocklen;
5294 : uint8_t shift_cnt;
5295 :
5296 : /* Avoid expensive div operations if possible. These spdk_u32 functions are very cheap. */
5297 20 : if (spdk_likely(spdk_u32_is_pow2(block_size))) {
5298 17 : shift_cnt = spdk_u32log2(block_size);
5299 17 : *offset_blocks = offset_bytes >> shift_cnt;
5300 17 : *num_blocks = num_bytes >> shift_cnt;
5301 17 : return (offset_bytes - (*offset_blocks << shift_cnt)) |
5302 17 : (num_bytes - (*num_blocks << shift_cnt));
5303 : } else {
5304 3 : *offset_blocks = offset_bytes / block_size;
5305 3 : *num_blocks = num_bytes / block_size;
5306 3 : return (offset_bytes % block_size) | (num_bytes % block_size);
5307 : }
5308 : }
5309 :
5310 : static bool
5311 689 : bdev_io_valid_blocks(struct spdk_bdev *bdev, uint64_t offset_blocks, uint64_t num_blocks)
5312 : {
5313 : /* Return failure if offset_blocks + num_blocks is less than offset_blocks; indicates there
5314 : * has been an overflow and hence the offset has been wrapped around */
5315 689 : if (offset_blocks + num_blocks < offset_blocks) {
5316 1 : return false;
5317 : }
5318 :
5319 : /* Return failure if offset_blocks + num_blocks exceeds the size of the bdev */
5320 688 : if (offset_blocks + num_blocks > bdev->blockcnt) {
5321 2 : return false;
5322 : }
5323 :
5324 686 : return true;
5325 : }
5326 :
5327 : static void
5328 2 : bdev_seek_complete_cb(void *ctx)
5329 : {
5330 2 : struct spdk_bdev_io *bdev_io = ctx;
5331 :
5332 2 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
5333 2 : bdev_io->internal.cb(bdev_io, true, bdev_io->internal.caller_ctx);
5334 2 : }
5335 :
5336 : static int
5337 4 : bdev_seek(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5338 : uint64_t offset_blocks, enum spdk_bdev_io_type io_type,
5339 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5340 : {
5341 4 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5342 : struct spdk_bdev_io *bdev_io;
5343 4 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5344 :
5345 4 : assert(io_type == SPDK_BDEV_IO_TYPE_SEEK_DATA || io_type == SPDK_BDEV_IO_TYPE_SEEK_HOLE);
5346 :
5347 : /* Check if offset_blocks is valid looking at the validity of one block */
5348 4 : if (!bdev_io_valid_blocks(bdev, offset_blocks, 1)) {
5349 0 : return -EINVAL;
5350 : }
5351 :
5352 4 : bdev_io = bdev_channel_get_io(channel);
5353 4 : if (!bdev_io) {
5354 0 : return -ENOMEM;
5355 : }
5356 :
5357 4 : bdev_io->internal.ch = channel;
5358 4 : bdev_io->internal.desc = desc;
5359 4 : bdev_io->type = io_type;
5360 4 : bdev_io->u.bdev.offset_blocks = offset_blocks;
5361 4 : bdev_io->u.bdev.memory_domain = NULL;
5362 4 : bdev_io->u.bdev.memory_domain_ctx = NULL;
5363 4 : bdev_io->u.bdev.accel_sequence = NULL;
5364 4 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
5365 :
5366 4 : if (!spdk_bdev_io_type_supported(bdev, io_type)) {
5367 : /* In case bdev doesn't support seek to next data/hole offset,
5368 : * it is assumed that only data and no holes are present */
5369 2 : if (io_type == SPDK_BDEV_IO_TYPE_SEEK_DATA) {
5370 1 : bdev_io->u.bdev.seek.offset = offset_blocks;
5371 : } else {
5372 1 : bdev_io->u.bdev.seek.offset = UINT64_MAX;
5373 : }
5374 :
5375 2 : spdk_thread_send_msg(spdk_get_thread(), bdev_seek_complete_cb, bdev_io);
5376 2 : return 0;
5377 : }
5378 :
5379 2 : bdev_io_submit(bdev_io);
5380 2 : return 0;
5381 : }
5382 :
5383 : int
5384 2 : spdk_bdev_seek_data(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5385 : uint64_t offset_blocks,
5386 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5387 : {
5388 2 : return bdev_seek(desc, ch, offset_blocks, SPDK_BDEV_IO_TYPE_SEEK_DATA, cb, cb_arg);
5389 : }
5390 :
5391 : int
5392 2 : spdk_bdev_seek_hole(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5393 : uint64_t offset_blocks,
5394 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5395 : {
5396 2 : return bdev_seek(desc, ch, offset_blocks, SPDK_BDEV_IO_TYPE_SEEK_HOLE, cb, cb_arg);
5397 : }
5398 :
5399 : uint64_t
5400 4 : spdk_bdev_io_get_seek_offset(const struct spdk_bdev_io *bdev_io)
5401 : {
5402 4 : return bdev_io->u.bdev.seek.offset;
5403 : }
5404 :
5405 : static int
5406 204 : bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
5407 : void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5408 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5409 : {
5410 204 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5411 : struct spdk_bdev_io *bdev_io;
5412 204 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5413 :
5414 204 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
5415 0 : return -EINVAL;
5416 : }
5417 :
5418 204 : bdev_io = bdev_channel_get_io(channel);
5419 204 : if (!bdev_io) {
5420 1 : return -ENOMEM;
5421 : }
5422 :
5423 203 : bdev_io->internal.ch = channel;
5424 203 : bdev_io->internal.desc = desc;
5425 203 : bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
5426 203 : bdev_io->u.bdev.iovs = &bdev_io->iov;
5427 203 : bdev_io->u.bdev.iovs[0].iov_base = buf;
5428 203 : bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev->blocklen;
5429 203 : bdev_io->u.bdev.iovcnt = 1;
5430 203 : bdev_io->u.bdev.md_buf = md_buf;
5431 203 : bdev_io->u.bdev.num_blocks = num_blocks;
5432 203 : bdev_io->u.bdev.offset_blocks = offset_blocks;
5433 203 : bdev_io->u.bdev.memory_domain = NULL;
5434 203 : bdev_io->u.bdev.memory_domain_ctx = NULL;
5435 203 : bdev_io->u.bdev.accel_sequence = NULL;
5436 203 : bdev_io->u.bdev.dif_check_flags = bdev->dif_check_flags;
5437 203 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
5438 :
5439 203 : bdev_io_submit(bdev_io);
5440 203 : return 0;
5441 : }
5442 :
5443 : int
5444 3 : spdk_bdev_read(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5445 : void *buf, uint64_t offset, uint64_t nbytes,
5446 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5447 : {
5448 3 : uint64_t offset_blocks, num_blocks;
5449 :
5450 3 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, nbytes, &num_blocks) != 0) {
5451 0 : return -EINVAL;
5452 : }
5453 :
5454 3 : return spdk_bdev_read_blocks(desc, ch, buf, offset_blocks, num_blocks, cb, cb_arg);
5455 : }
5456 :
5457 : int
5458 200 : spdk_bdev_read_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5459 : void *buf, uint64_t offset_blocks, uint64_t num_blocks,
5460 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5461 : {
5462 200 : return bdev_read_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks, cb, cb_arg);
5463 : }
5464 :
5465 : int
5466 4 : spdk_bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5467 : void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5468 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5469 : {
5470 4 : struct iovec iov = {
5471 : .iov_base = buf,
5472 : };
5473 :
5474 4 : if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
5475 0 : return -EINVAL;
5476 : }
5477 :
5478 4 : if (md_buf && !_is_buf_allocated(&iov)) {
5479 0 : return -EINVAL;
5480 : }
5481 :
5482 4 : return bdev_read_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
5483 : cb, cb_arg);
5484 : }
5485 :
5486 : int
5487 5 : spdk_bdev_readv(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5488 : struct iovec *iov, int iovcnt,
5489 : uint64_t offset, uint64_t nbytes,
5490 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5491 : {
5492 5 : uint64_t offset_blocks, num_blocks;
5493 :
5494 5 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, nbytes, &num_blocks) != 0) {
5495 0 : return -EINVAL;
5496 : }
5497 :
5498 5 : return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
5499 : }
5500 :
5501 : static int
5502 226 : bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5503 : struct iovec *iov, int iovcnt, void *md_buf, uint64_t offset_blocks,
5504 : uint64_t num_blocks, struct spdk_memory_domain *domain, void *domain_ctx,
5505 : struct spdk_accel_sequence *seq, uint32_t dif_check_flags,
5506 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5507 : {
5508 226 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5509 : struct spdk_bdev_io *bdev_io;
5510 226 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5511 :
5512 226 : if (spdk_unlikely(!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks))) {
5513 0 : return -EINVAL;
5514 : }
5515 :
5516 226 : bdev_io = bdev_channel_get_io(channel);
5517 226 : if (spdk_unlikely(!bdev_io)) {
5518 2 : return -ENOMEM;
5519 : }
5520 :
5521 224 : bdev_io->internal.ch = channel;
5522 224 : bdev_io->internal.desc = desc;
5523 224 : bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
5524 224 : bdev_io->u.bdev.iovs = iov;
5525 224 : bdev_io->u.bdev.iovcnt = iovcnt;
5526 224 : bdev_io->u.bdev.md_buf = md_buf;
5527 224 : bdev_io->u.bdev.num_blocks = num_blocks;
5528 224 : bdev_io->u.bdev.offset_blocks = offset_blocks;
5529 224 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
5530 :
5531 224 : if (seq != NULL) {
5532 0 : bdev_io->internal.f.has_accel_sequence = true;
5533 0 : bdev_io->internal.accel_sequence = seq;
5534 : }
5535 :
5536 224 : if (domain != NULL) {
5537 2 : bdev_io->internal.f.has_memory_domain = true;
5538 2 : bdev_io->internal.memory_domain = domain;
5539 2 : bdev_io->internal.memory_domain_ctx = domain_ctx;
5540 : }
5541 :
5542 224 : bdev_io->u.bdev.memory_domain = domain;
5543 224 : bdev_io->u.bdev.memory_domain_ctx = domain_ctx;
5544 224 : bdev_io->u.bdev.accel_sequence = seq;
5545 224 : bdev_io->u.bdev.dif_check_flags = dif_check_flags;
5546 :
5547 224 : _bdev_io_submit_ext(desc, bdev_io);
5548 :
5549 224 : return 0;
5550 : }
5551 :
5552 : int
5553 21 : spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5554 : struct iovec *iov, int iovcnt,
5555 : uint64_t offset_blocks, uint64_t num_blocks,
5556 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5557 : {
5558 21 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5559 :
5560 21 : return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
5561 : num_blocks, NULL, NULL, NULL, bdev->dif_check_flags, cb, cb_arg);
5562 : }
5563 :
5564 : int
5565 4 : spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5566 : struct iovec *iov, int iovcnt, void *md_buf,
5567 : uint64_t offset_blocks, uint64_t num_blocks,
5568 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5569 : {
5570 4 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5571 :
5572 4 : if (md_buf && !spdk_bdev_is_md_separate(bdev)) {
5573 0 : return -EINVAL;
5574 : }
5575 :
5576 4 : if (md_buf && !_is_buf_allocated(iov)) {
5577 0 : return -EINVAL;
5578 : }
5579 :
5580 4 : return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
5581 : num_blocks, NULL, NULL, NULL, bdev->dif_check_flags, cb, cb_arg);
5582 : }
5583 :
5584 : static inline bool
5585 14 : _bdev_io_check_opts(struct spdk_bdev_ext_io_opts *opts, struct iovec *iov)
5586 : {
5587 : /*
5588 : * We check if opts size is at least of size when we first introduced
5589 : * spdk_bdev_ext_io_opts (ac6f2bdd8d) since access to those members
5590 : * are not checked internal.
5591 : */
5592 14 : return opts->size >= offsetof(struct spdk_bdev_ext_io_opts, metadata) +
5593 10 : sizeof(opts->metadata) &&
5594 22 : opts->size <= sizeof(*opts) &&
5595 : /* When memory domain is used, the user must provide data buffers */
5596 8 : (!opts->memory_domain || (iov && iov[0].iov_base));
5597 : }
5598 :
5599 : int
5600 8 : spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5601 : struct iovec *iov, int iovcnt,
5602 : uint64_t offset_blocks, uint64_t num_blocks,
5603 : spdk_bdev_io_completion_cb cb, void *cb_arg,
5604 : struct spdk_bdev_ext_io_opts *opts)
5605 : {
5606 8 : struct spdk_memory_domain *domain = NULL;
5607 8 : struct spdk_accel_sequence *seq = NULL;
5608 8 : void *domain_ctx = NULL, *md = NULL;
5609 8 : uint32_t dif_check_flags = 0;
5610 8 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5611 :
5612 8 : if (opts) {
5613 7 : if (spdk_unlikely(!_bdev_io_check_opts(opts, iov))) {
5614 3 : return -EINVAL;
5615 : }
5616 :
5617 4 : md = opts->metadata;
5618 4 : domain = bdev_get_ext_io_opt(opts, memory_domain, NULL);
5619 4 : domain_ctx = bdev_get_ext_io_opt(opts, memory_domain_ctx, NULL);
5620 4 : seq = bdev_get_ext_io_opt(opts, accel_sequence, NULL);
5621 4 : if (md) {
5622 4 : if (spdk_unlikely(!spdk_bdev_is_md_separate(bdev))) {
5623 0 : return -EINVAL;
5624 : }
5625 :
5626 4 : if (spdk_unlikely(!_is_buf_allocated(iov))) {
5627 0 : return -EINVAL;
5628 : }
5629 :
5630 4 : if (spdk_unlikely(seq != NULL)) {
5631 0 : return -EINVAL;
5632 : }
5633 : }
5634 : }
5635 :
5636 10 : dif_check_flags = bdev->dif_check_flags &
5637 5 : ~(bdev_get_ext_io_opt(opts, dif_check_flags_exclude_mask, 0));
5638 :
5639 5 : return bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks,
5640 : num_blocks, domain, domain_ctx, seq, dif_check_flags, cb, cb_arg);
5641 : }
5642 :
5643 : static int
5644 36 : bdev_write_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5645 : void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5646 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5647 : {
5648 36 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5649 : struct spdk_bdev_io *bdev_io;
5650 36 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5651 :
5652 36 : if (!desc->write) {
5653 0 : return -EBADF;
5654 : }
5655 :
5656 36 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
5657 0 : return -EINVAL;
5658 : }
5659 :
5660 36 : bdev_io = bdev_channel_get_io(channel);
5661 36 : if (!bdev_io) {
5662 0 : return -ENOMEM;
5663 : }
5664 :
5665 36 : bdev_io->internal.ch = channel;
5666 36 : bdev_io->internal.desc = desc;
5667 36 : bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
5668 36 : bdev_io->u.bdev.iovs = &bdev_io->iov;
5669 36 : bdev_io->u.bdev.iovs[0].iov_base = buf;
5670 36 : bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev->blocklen;
5671 36 : bdev_io->u.bdev.iovcnt = 1;
5672 36 : bdev_io->u.bdev.md_buf = md_buf;
5673 36 : bdev_io->u.bdev.num_blocks = num_blocks;
5674 36 : bdev_io->u.bdev.offset_blocks = offset_blocks;
5675 36 : bdev_io->u.bdev.memory_domain = NULL;
5676 36 : bdev_io->u.bdev.memory_domain_ctx = NULL;
5677 36 : bdev_io->u.bdev.accel_sequence = NULL;
5678 36 : bdev_io->u.bdev.dif_check_flags = bdev->dif_check_flags;
5679 36 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
5680 :
5681 36 : bdev_io_submit(bdev_io);
5682 36 : return 0;
5683 : }
5684 :
5685 : int
5686 3 : spdk_bdev_write(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5687 : void *buf, uint64_t offset, uint64_t nbytes,
5688 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5689 : {
5690 3 : uint64_t offset_blocks, num_blocks;
5691 :
5692 3 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, nbytes, &num_blocks) != 0) {
5693 0 : return -EINVAL;
5694 : }
5695 :
5696 3 : return spdk_bdev_write_blocks(desc, ch, buf, offset_blocks, num_blocks, cb, cb_arg);
5697 : }
5698 :
5699 : int
5700 27 : spdk_bdev_write_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5701 : void *buf, uint64_t offset_blocks, uint64_t num_blocks,
5702 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5703 : {
5704 27 : return bdev_write_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks,
5705 : cb, cb_arg);
5706 : }
5707 :
5708 : int
5709 3 : spdk_bdev_write_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5710 : void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
5711 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5712 : {
5713 3 : struct iovec iov = {
5714 : .iov_base = buf,
5715 : };
5716 :
5717 3 : if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
5718 0 : return -EINVAL;
5719 : }
5720 :
5721 3 : if (md_buf && !_is_buf_allocated(&iov)) {
5722 0 : return -EINVAL;
5723 : }
5724 :
5725 3 : return bdev_write_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
5726 : cb, cb_arg);
5727 : }
5728 :
5729 : static int
5730 70 : bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5731 : struct iovec *iov, int iovcnt, void *md_buf,
5732 : uint64_t offset_blocks, uint64_t num_blocks,
5733 : struct spdk_memory_domain *domain, void *domain_ctx,
5734 : struct spdk_accel_sequence *seq, uint32_t dif_check_flags,
5735 : uint32_t nvme_cdw12_raw, uint32_t nvme_cdw13_raw,
5736 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5737 : {
5738 70 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5739 : struct spdk_bdev_io *bdev_io;
5740 70 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5741 :
5742 70 : if (spdk_unlikely(!desc->write)) {
5743 0 : return -EBADF;
5744 : }
5745 :
5746 70 : if (spdk_unlikely(!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks))) {
5747 0 : return -EINVAL;
5748 : }
5749 :
5750 70 : bdev_io = bdev_channel_get_io(channel);
5751 70 : if (spdk_unlikely(!bdev_io)) {
5752 2 : return -ENOMEM;
5753 : }
5754 :
5755 68 : bdev_io->internal.ch = channel;
5756 68 : bdev_io->internal.desc = desc;
5757 68 : bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
5758 68 : bdev_io->u.bdev.iovs = iov;
5759 68 : bdev_io->u.bdev.iovcnt = iovcnt;
5760 68 : bdev_io->u.bdev.md_buf = md_buf;
5761 68 : bdev_io->u.bdev.num_blocks = num_blocks;
5762 68 : bdev_io->u.bdev.offset_blocks = offset_blocks;
5763 68 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
5764 68 : if (seq != NULL) {
5765 0 : bdev_io->internal.f.has_accel_sequence = true;
5766 0 : bdev_io->internal.accel_sequence = seq;
5767 : }
5768 :
5769 68 : if (domain != NULL) {
5770 2 : bdev_io->internal.f.has_memory_domain = true;
5771 2 : bdev_io->internal.memory_domain = domain;
5772 2 : bdev_io->internal.memory_domain_ctx = domain_ctx;
5773 : }
5774 :
5775 68 : bdev_io->u.bdev.memory_domain = domain;
5776 68 : bdev_io->u.bdev.memory_domain_ctx = domain_ctx;
5777 68 : bdev_io->u.bdev.accel_sequence = seq;
5778 68 : bdev_io->u.bdev.dif_check_flags = dif_check_flags;
5779 68 : bdev_io->u.bdev.nvme_cdw12.raw = nvme_cdw12_raw;
5780 68 : bdev_io->u.bdev.nvme_cdw13.raw = nvme_cdw13_raw;
5781 :
5782 68 : _bdev_io_submit_ext(desc, bdev_io);
5783 :
5784 68 : return 0;
5785 : }
5786 :
5787 : int
5788 3 : spdk_bdev_writev(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5789 : struct iovec *iov, int iovcnt,
5790 : uint64_t offset, uint64_t len,
5791 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5792 : {
5793 3 : uint64_t offset_blocks, num_blocks;
5794 :
5795 3 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, len, &num_blocks) != 0) {
5796 0 : return -EINVAL;
5797 : }
5798 :
5799 3 : return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
5800 : }
5801 :
5802 : int
5803 14 : spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5804 : struct iovec *iov, int iovcnt,
5805 : uint64_t offset_blocks, uint64_t num_blocks,
5806 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5807 : {
5808 14 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5809 :
5810 14 : return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
5811 : num_blocks, NULL, NULL, NULL, bdev->dif_check_flags, 0, 0,
5812 : cb, cb_arg);
5813 : }
5814 :
5815 : int
5816 1 : spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5817 : struct iovec *iov, int iovcnt, void *md_buf,
5818 : uint64_t offset_blocks, uint64_t num_blocks,
5819 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5820 : {
5821 1 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5822 :
5823 1 : if (md_buf && !spdk_bdev_is_md_separate(bdev)) {
5824 0 : return -EINVAL;
5825 : }
5826 :
5827 1 : if (md_buf && !_is_buf_allocated(iov)) {
5828 0 : return -EINVAL;
5829 : }
5830 :
5831 1 : return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
5832 : num_blocks, NULL, NULL, NULL, bdev->dif_check_flags, 0, 0,
5833 : cb, cb_arg);
5834 : }
5835 :
5836 : int
5837 8 : spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5838 : struct iovec *iov, int iovcnt,
5839 : uint64_t offset_blocks, uint64_t num_blocks,
5840 : spdk_bdev_io_completion_cb cb, void *cb_arg,
5841 : struct spdk_bdev_ext_io_opts *opts)
5842 : {
5843 8 : struct spdk_memory_domain *domain = NULL;
5844 8 : struct spdk_accel_sequence *seq = NULL;
5845 8 : void *domain_ctx = NULL, *md = NULL;
5846 8 : uint32_t dif_check_flags = 0;
5847 8 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5848 8 : uint32_t nvme_cdw12_raw = 0;
5849 8 : uint32_t nvme_cdw13_raw = 0;
5850 :
5851 8 : if (opts) {
5852 7 : if (spdk_unlikely(!_bdev_io_check_opts(opts, iov))) {
5853 3 : return -EINVAL;
5854 : }
5855 4 : md = opts->metadata;
5856 4 : domain = bdev_get_ext_io_opt(opts, memory_domain, NULL);
5857 4 : domain_ctx = bdev_get_ext_io_opt(opts, memory_domain_ctx, NULL);
5858 4 : seq = bdev_get_ext_io_opt(opts, accel_sequence, NULL);
5859 4 : nvme_cdw12_raw = bdev_get_ext_io_opt(opts, nvme_cdw12.raw, 0);
5860 4 : nvme_cdw13_raw = bdev_get_ext_io_opt(opts, nvme_cdw13.raw, 0);
5861 4 : if (md) {
5862 4 : if (spdk_unlikely(!spdk_bdev_is_md_separate(bdev))) {
5863 0 : return -EINVAL;
5864 : }
5865 :
5866 4 : if (spdk_unlikely(!_is_buf_allocated(iov))) {
5867 0 : return -EINVAL;
5868 : }
5869 :
5870 4 : if (spdk_unlikely(seq != NULL)) {
5871 0 : return -EINVAL;
5872 : }
5873 : }
5874 : }
5875 :
5876 10 : dif_check_flags = bdev->dif_check_flags &
5877 5 : ~(bdev_get_ext_io_opt(opts, dif_check_flags_exclude_mask, 0));
5878 :
5879 5 : return bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, md, offset_blocks, num_blocks,
5880 : domain, domain_ctx, seq, dif_check_flags,
5881 : nvme_cdw12_raw, nvme_cdw13_raw, cb, cb_arg);
5882 : }
5883 :
5884 : static void
5885 11 : bdev_compare_do_read_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
5886 : {
5887 11 : struct spdk_bdev_io *parent_io = cb_arg;
5888 11 : struct spdk_bdev *bdev = parent_io->bdev;
5889 11 : uint8_t *read_buf = bdev_io->u.bdev.iovs[0].iov_base;
5890 11 : int i, rc = 0;
5891 :
5892 11 : if (!success) {
5893 0 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
5894 0 : parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
5895 0 : spdk_bdev_free_io(bdev_io);
5896 0 : return;
5897 : }
5898 :
5899 17 : for (i = 0; i < parent_io->u.bdev.iovcnt; i++) {
5900 11 : rc = memcmp(read_buf,
5901 11 : parent_io->u.bdev.iovs[i].iov_base,
5902 11 : parent_io->u.bdev.iovs[i].iov_len);
5903 11 : if (rc) {
5904 5 : break;
5905 : }
5906 6 : read_buf += parent_io->u.bdev.iovs[i].iov_len;
5907 : }
5908 :
5909 11 : if (rc == 0 && parent_io->u.bdev.md_buf && spdk_bdev_is_md_separate(bdev)) {
5910 2 : rc = memcmp(bdev_io->u.bdev.md_buf,
5911 2 : parent_io->u.bdev.md_buf,
5912 2 : spdk_bdev_get_md_size(bdev));
5913 : }
5914 :
5915 11 : spdk_bdev_free_io(bdev_io);
5916 :
5917 11 : if (rc == 0) {
5918 5 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
5919 5 : parent_io->internal.cb(parent_io, true, parent_io->internal.caller_ctx);
5920 : } else {
5921 6 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
5922 6 : parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
5923 : }
5924 : }
5925 :
5926 : static void
5927 11 : bdev_compare_do_read(void *_bdev_io)
5928 : {
5929 11 : struct spdk_bdev_io *bdev_io = _bdev_io;
5930 : int rc;
5931 :
5932 11 : rc = spdk_bdev_read_blocks(bdev_io->internal.desc,
5933 11 : spdk_io_channel_from_ctx(bdev_io->internal.ch), NULL,
5934 : bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
5935 : bdev_compare_do_read_done, bdev_io);
5936 :
5937 11 : if (rc == -ENOMEM) {
5938 0 : bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_do_read);
5939 11 : } else if (rc != 0) {
5940 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
5941 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
5942 : }
5943 11 : }
5944 :
5945 : static int
5946 16 : bdev_comparev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5947 : struct iovec *iov, int iovcnt, void *md_buf,
5948 : uint64_t offset_blocks, uint64_t num_blocks,
5949 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5950 : {
5951 16 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
5952 : struct spdk_bdev_io *bdev_io;
5953 16 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
5954 :
5955 16 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
5956 0 : return -EINVAL;
5957 : }
5958 :
5959 16 : bdev_io = bdev_channel_get_io(channel);
5960 16 : if (!bdev_io) {
5961 0 : return -ENOMEM;
5962 : }
5963 :
5964 16 : bdev_io->internal.ch = channel;
5965 16 : bdev_io->internal.desc = desc;
5966 16 : bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE;
5967 16 : bdev_io->u.bdev.iovs = iov;
5968 16 : bdev_io->u.bdev.iovcnt = iovcnt;
5969 16 : bdev_io->u.bdev.md_buf = md_buf;
5970 16 : bdev_io->u.bdev.num_blocks = num_blocks;
5971 16 : bdev_io->u.bdev.offset_blocks = offset_blocks;
5972 16 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
5973 16 : bdev_io->u.bdev.memory_domain = NULL;
5974 16 : bdev_io->u.bdev.memory_domain_ctx = NULL;
5975 16 : bdev_io->u.bdev.accel_sequence = NULL;
5976 :
5977 16 : if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
5978 7 : bdev_io_submit(bdev_io);
5979 7 : return 0;
5980 : }
5981 :
5982 9 : bdev_compare_do_read(bdev_io);
5983 :
5984 9 : return 0;
5985 : }
5986 :
5987 : int
5988 10 : spdk_bdev_comparev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5989 : struct iovec *iov, int iovcnt,
5990 : uint64_t offset_blocks, uint64_t num_blocks,
5991 : spdk_bdev_io_completion_cb cb, void *cb_arg)
5992 : {
5993 10 : return bdev_comparev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks,
5994 : num_blocks, cb, cb_arg);
5995 : }
5996 :
5997 : int
5998 6 : spdk_bdev_comparev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
5999 : struct iovec *iov, int iovcnt, void *md_buf,
6000 : uint64_t offset_blocks, uint64_t num_blocks,
6001 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6002 : {
6003 6 : if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
6004 0 : return -EINVAL;
6005 : }
6006 :
6007 6 : if (md_buf && !_is_buf_allocated(iov)) {
6008 0 : return -EINVAL;
6009 : }
6010 :
6011 6 : return bdev_comparev_blocks_with_md(desc, ch, iov, iovcnt, md_buf, offset_blocks,
6012 : num_blocks, cb, cb_arg);
6013 : }
6014 :
6015 : static int
6016 4 : bdev_compare_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6017 : void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
6018 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6019 : {
6020 4 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6021 : struct spdk_bdev_io *bdev_io;
6022 4 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6023 :
6024 4 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6025 0 : return -EINVAL;
6026 : }
6027 :
6028 4 : bdev_io = bdev_channel_get_io(channel);
6029 4 : if (!bdev_io) {
6030 0 : return -ENOMEM;
6031 : }
6032 :
6033 4 : bdev_io->internal.ch = channel;
6034 4 : bdev_io->internal.desc = desc;
6035 4 : bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE;
6036 4 : bdev_io->u.bdev.iovs = &bdev_io->iov;
6037 4 : bdev_io->u.bdev.iovs[0].iov_base = buf;
6038 4 : bdev_io->u.bdev.iovs[0].iov_len = num_blocks * bdev->blocklen;
6039 4 : bdev_io->u.bdev.iovcnt = 1;
6040 4 : bdev_io->u.bdev.md_buf = md_buf;
6041 4 : bdev_io->u.bdev.num_blocks = num_blocks;
6042 4 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6043 4 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6044 4 : bdev_io->u.bdev.memory_domain = NULL;
6045 4 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6046 4 : bdev_io->u.bdev.accel_sequence = NULL;
6047 :
6048 4 : if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE)) {
6049 2 : bdev_io_submit(bdev_io);
6050 2 : return 0;
6051 : }
6052 :
6053 2 : bdev_compare_do_read(bdev_io);
6054 :
6055 2 : return 0;
6056 : }
6057 :
6058 : int
6059 4 : spdk_bdev_compare_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6060 : void *buf, uint64_t offset_blocks, uint64_t num_blocks,
6061 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6062 : {
6063 4 : return bdev_compare_blocks_with_md(desc, ch, buf, NULL, offset_blocks, num_blocks,
6064 : cb, cb_arg);
6065 : }
6066 :
6067 : int
6068 0 : spdk_bdev_compare_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6069 : void *buf, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
6070 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6071 : {
6072 0 : struct iovec iov = {
6073 : .iov_base = buf,
6074 : };
6075 :
6076 0 : if (md_buf && !spdk_bdev_is_md_separate(spdk_bdev_desc_get_bdev(desc))) {
6077 0 : return -EINVAL;
6078 : }
6079 :
6080 0 : if (md_buf && !_is_buf_allocated(&iov)) {
6081 0 : return -EINVAL;
6082 : }
6083 :
6084 0 : return bdev_compare_blocks_with_md(desc, ch, buf, md_buf, offset_blocks, num_blocks,
6085 : cb, cb_arg);
6086 : }
6087 :
6088 : static void
6089 2 : bdev_comparev_and_writev_blocks_unlocked(struct lba_range *range, void *ctx, int unlock_status)
6090 : {
6091 2 : struct spdk_bdev_io *bdev_io = ctx;
6092 :
6093 2 : if (unlock_status) {
6094 0 : SPDK_ERRLOG("LBA range unlock failed\n");
6095 : }
6096 :
6097 2 : bdev_io->internal.cb(bdev_io, bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS ? true :
6098 : false, bdev_io->internal.caller_ctx);
6099 2 : }
6100 :
6101 : static void
6102 2 : bdev_comparev_and_writev_blocks_unlock(struct spdk_bdev_io *bdev_io, int status)
6103 : {
6104 2 : bdev_io->internal.status = status;
6105 :
6106 2 : bdev_unlock_lba_range(bdev_io->internal.desc, spdk_io_channel_from_ctx(bdev_io->internal.ch),
6107 : bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
6108 : bdev_comparev_and_writev_blocks_unlocked, bdev_io);
6109 2 : }
6110 :
6111 : static void
6112 1 : bdev_compare_and_write_do_write_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
6113 : {
6114 1 : struct spdk_bdev_io *parent_io = cb_arg;
6115 :
6116 1 : if (!success) {
6117 0 : SPDK_ERRLOG("Compare and write operation failed\n");
6118 : }
6119 :
6120 1 : spdk_bdev_free_io(bdev_io);
6121 :
6122 1 : bdev_comparev_and_writev_blocks_unlock(parent_io,
6123 : success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED);
6124 1 : }
6125 :
6126 : static void
6127 1 : bdev_compare_and_write_do_write(void *_bdev_io)
6128 : {
6129 1 : struct spdk_bdev_io *bdev_io = _bdev_io;
6130 : int rc;
6131 :
6132 1 : rc = spdk_bdev_writev_blocks(bdev_io->internal.desc,
6133 1 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
6134 : bdev_io->u.bdev.fused_iovs, bdev_io->u.bdev.fused_iovcnt,
6135 : bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
6136 : bdev_compare_and_write_do_write_done, bdev_io);
6137 :
6138 :
6139 1 : if (rc == -ENOMEM) {
6140 0 : bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_and_write_do_write);
6141 1 : } else if (rc != 0) {
6142 0 : bdev_comparev_and_writev_blocks_unlock(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
6143 : }
6144 1 : }
6145 :
6146 : static void
6147 2 : bdev_compare_and_write_do_compare_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
6148 : {
6149 2 : struct spdk_bdev_io *parent_io = cb_arg;
6150 :
6151 2 : spdk_bdev_free_io(bdev_io);
6152 :
6153 2 : if (!success) {
6154 1 : bdev_comparev_and_writev_blocks_unlock(parent_io, SPDK_BDEV_IO_STATUS_MISCOMPARE);
6155 1 : return;
6156 : }
6157 :
6158 1 : bdev_compare_and_write_do_write(parent_io);
6159 : }
6160 :
6161 : static void
6162 2 : bdev_compare_and_write_do_compare(void *_bdev_io)
6163 : {
6164 2 : struct spdk_bdev_io *bdev_io = _bdev_io;
6165 : int rc;
6166 :
6167 2 : rc = spdk_bdev_comparev_blocks(bdev_io->internal.desc,
6168 2 : spdk_io_channel_from_ctx(bdev_io->internal.ch), bdev_io->u.bdev.iovs,
6169 : bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks,
6170 : bdev_compare_and_write_do_compare_done, bdev_io);
6171 :
6172 2 : if (rc == -ENOMEM) {
6173 0 : bdev_queue_io_wait_with_cb(bdev_io, bdev_compare_and_write_do_compare);
6174 2 : } else if (rc != 0) {
6175 0 : bdev_comparev_and_writev_blocks_unlock(bdev_io, SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED);
6176 : }
6177 2 : }
6178 :
6179 : static void
6180 2 : bdev_comparev_and_writev_blocks_locked(struct lba_range *range, void *ctx, int status)
6181 : {
6182 2 : struct spdk_bdev_io *bdev_io = ctx;
6183 :
6184 2 : if (status) {
6185 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED;
6186 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
6187 0 : return;
6188 : }
6189 :
6190 2 : bdev_compare_and_write_do_compare(bdev_io);
6191 : }
6192 :
6193 : int
6194 2 : spdk_bdev_comparev_and_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6195 : struct iovec *compare_iov, int compare_iovcnt,
6196 : struct iovec *write_iov, int write_iovcnt,
6197 : uint64_t offset_blocks, uint64_t num_blocks,
6198 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6199 : {
6200 2 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6201 : struct spdk_bdev_io *bdev_io;
6202 2 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6203 :
6204 2 : if (!desc->write) {
6205 0 : return -EBADF;
6206 : }
6207 :
6208 2 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6209 0 : return -EINVAL;
6210 : }
6211 :
6212 2 : if (num_blocks > bdev->acwu) {
6213 0 : return -EINVAL;
6214 : }
6215 :
6216 2 : bdev_io = bdev_channel_get_io(channel);
6217 2 : if (!bdev_io) {
6218 0 : return -ENOMEM;
6219 : }
6220 :
6221 2 : bdev_io->internal.ch = channel;
6222 2 : bdev_io->internal.desc = desc;
6223 2 : bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
6224 2 : bdev_io->u.bdev.iovs = compare_iov;
6225 2 : bdev_io->u.bdev.iovcnt = compare_iovcnt;
6226 2 : bdev_io->u.bdev.fused_iovs = write_iov;
6227 2 : bdev_io->u.bdev.fused_iovcnt = write_iovcnt;
6228 2 : bdev_io->u.bdev.md_buf = NULL;
6229 2 : bdev_io->u.bdev.num_blocks = num_blocks;
6230 2 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6231 2 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6232 2 : bdev_io->u.bdev.memory_domain = NULL;
6233 2 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6234 2 : bdev_io->u.bdev.accel_sequence = NULL;
6235 :
6236 2 : if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE)) {
6237 0 : bdev_io_submit(bdev_io);
6238 0 : return 0;
6239 : }
6240 :
6241 2 : return bdev_lock_lba_range(desc, ch, offset_blocks, num_blocks,
6242 : bdev_comparev_and_writev_blocks_locked, bdev_io);
6243 : }
6244 :
6245 : int
6246 2 : spdk_bdev_zcopy_start(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6247 : struct iovec *iov, int iovcnt,
6248 : uint64_t offset_blocks, uint64_t num_blocks,
6249 : bool populate,
6250 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6251 : {
6252 2 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6253 : struct spdk_bdev_io *bdev_io;
6254 2 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6255 :
6256 2 : if (!desc->write) {
6257 0 : return -EBADF;
6258 : }
6259 :
6260 2 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6261 0 : return -EINVAL;
6262 : }
6263 :
6264 2 : if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY)) {
6265 0 : return -ENOTSUP;
6266 : }
6267 :
6268 2 : bdev_io = bdev_channel_get_io(channel);
6269 2 : if (!bdev_io) {
6270 0 : return -ENOMEM;
6271 : }
6272 :
6273 2 : bdev_io->internal.ch = channel;
6274 2 : bdev_io->internal.desc = desc;
6275 2 : bdev_io->type = SPDK_BDEV_IO_TYPE_ZCOPY;
6276 2 : bdev_io->u.bdev.num_blocks = num_blocks;
6277 2 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6278 2 : bdev_io->u.bdev.iovs = iov;
6279 2 : bdev_io->u.bdev.iovcnt = iovcnt;
6280 2 : bdev_io->u.bdev.md_buf = NULL;
6281 2 : bdev_io->u.bdev.zcopy.populate = populate ? 1 : 0;
6282 2 : bdev_io->u.bdev.zcopy.commit = 0;
6283 2 : bdev_io->u.bdev.zcopy.start = 1;
6284 2 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6285 2 : bdev_io->u.bdev.memory_domain = NULL;
6286 2 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6287 2 : bdev_io->u.bdev.accel_sequence = NULL;
6288 :
6289 2 : bdev_io_submit(bdev_io);
6290 :
6291 2 : return 0;
6292 : }
6293 :
6294 : int
6295 2 : spdk_bdev_zcopy_end(struct spdk_bdev_io *bdev_io, bool commit,
6296 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6297 : {
6298 2 : if (bdev_io->type != SPDK_BDEV_IO_TYPE_ZCOPY) {
6299 0 : return -EINVAL;
6300 : }
6301 :
6302 2 : bdev_io->u.bdev.zcopy.commit = commit ? 1 : 0;
6303 2 : bdev_io->u.bdev.zcopy.start = 0;
6304 2 : bdev_io->internal.caller_ctx = cb_arg;
6305 2 : bdev_io->internal.cb = cb;
6306 2 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
6307 :
6308 2 : bdev_io_submit(bdev_io);
6309 :
6310 2 : return 0;
6311 : }
6312 :
6313 : int
6314 0 : spdk_bdev_write_zeroes(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6315 : uint64_t offset, uint64_t len,
6316 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6317 : {
6318 0 : uint64_t offset_blocks, num_blocks;
6319 :
6320 0 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, len, &num_blocks) != 0) {
6321 0 : return -EINVAL;
6322 : }
6323 :
6324 0 : return spdk_bdev_write_zeroes_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
6325 : }
6326 :
6327 : int
6328 33 : spdk_bdev_write_zeroes_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6329 : uint64_t offset_blocks, uint64_t num_blocks,
6330 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6331 : {
6332 33 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6333 : struct spdk_bdev_io *bdev_io;
6334 33 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6335 :
6336 33 : if (!desc->write) {
6337 0 : return -EBADF;
6338 : }
6339 :
6340 33 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6341 0 : return -EINVAL;
6342 : }
6343 :
6344 33 : if (!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) &&
6345 10 : !bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE)) {
6346 1 : return -ENOTSUP;
6347 : }
6348 :
6349 32 : bdev_io = bdev_channel_get_io(channel);
6350 :
6351 32 : if (!bdev_io) {
6352 0 : return -ENOMEM;
6353 : }
6354 :
6355 32 : bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
6356 32 : bdev_io->internal.ch = channel;
6357 32 : bdev_io->internal.desc = desc;
6358 32 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6359 32 : bdev_io->u.bdev.num_blocks = num_blocks;
6360 32 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6361 32 : bdev_io->u.bdev.memory_domain = NULL;
6362 32 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6363 32 : bdev_io->u.bdev.accel_sequence = NULL;
6364 :
6365 : /* If the write_zeroes size is large and should be split, use the generic split
6366 : * logic regardless of whether SPDK_BDEV_IO_TYPE_WRITE_ZEREOS is supported or not.
6367 : *
6368 : * Then, send the write_zeroes request if SPDK_BDEV_IO_TYPE_WRITE_ZEROES is supported
6369 : * or emulate it using regular write request otherwise.
6370 : */
6371 32 : if (bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) ||
6372 : bdev_io->internal.f.split) {
6373 26 : bdev_io_submit(bdev_io);
6374 26 : return 0;
6375 : }
6376 :
6377 6 : assert(_bdev_get_block_size_with_md(bdev) <= ZERO_BUFFER_SIZE);
6378 :
6379 6 : return bdev_write_zero_buffer(bdev_io);
6380 : }
6381 :
6382 : int
6383 0 : spdk_bdev_unmap(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6384 : uint64_t offset, uint64_t nbytes,
6385 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6386 : {
6387 0 : uint64_t offset_blocks, num_blocks;
6388 :
6389 0 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, nbytes, &num_blocks) != 0) {
6390 0 : return -EINVAL;
6391 : }
6392 :
6393 0 : return spdk_bdev_unmap_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
6394 : }
6395 :
6396 : static void
6397 0 : bdev_io_complete_cb(void *ctx)
6398 : {
6399 0 : struct spdk_bdev_io *bdev_io = ctx;
6400 :
6401 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
6402 0 : bdev_io->internal.cb(bdev_io, true, bdev_io->internal.caller_ctx);
6403 0 : }
6404 :
6405 : int
6406 22 : spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6407 : uint64_t offset_blocks, uint64_t num_blocks,
6408 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6409 : {
6410 22 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6411 : struct spdk_bdev_io *bdev_io;
6412 22 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6413 :
6414 22 : if (!desc->write) {
6415 0 : return -EBADF;
6416 : }
6417 :
6418 22 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6419 0 : return -EINVAL;
6420 : }
6421 :
6422 22 : bdev_io = bdev_channel_get_io(channel);
6423 22 : if (!bdev_io) {
6424 0 : return -ENOMEM;
6425 : }
6426 :
6427 22 : bdev_io->internal.ch = channel;
6428 22 : bdev_io->internal.desc = desc;
6429 22 : bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
6430 :
6431 22 : bdev_io->u.bdev.iovs = &bdev_io->iov;
6432 22 : bdev_io->u.bdev.iovs[0].iov_base = NULL;
6433 22 : bdev_io->u.bdev.iovs[0].iov_len = 0;
6434 22 : bdev_io->u.bdev.iovcnt = 1;
6435 :
6436 22 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6437 22 : bdev_io->u.bdev.num_blocks = num_blocks;
6438 22 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6439 22 : bdev_io->u.bdev.memory_domain = NULL;
6440 22 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6441 22 : bdev_io->u.bdev.accel_sequence = NULL;
6442 :
6443 22 : if (num_blocks == 0) {
6444 0 : spdk_thread_send_msg(spdk_get_thread(), bdev_io_complete_cb, bdev_io);
6445 0 : return 0;
6446 : }
6447 :
6448 22 : bdev_io_submit(bdev_io);
6449 22 : return 0;
6450 : }
6451 :
6452 : int
6453 0 : spdk_bdev_flush(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6454 : uint64_t offset, uint64_t length,
6455 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6456 : {
6457 0 : uint64_t offset_blocks, num_blocks;
6458 :
6459 0 : if (bdev_bytes_to_blocks(desc, offset, &offset_blocks, length, &num_blocks) != 0) {
6460 0 : return -EINVAL;
6461 : }
6462 :
6463 0 : return spdk_bdev_flush_blocks(desc, ch, offset_blocks, num_blocks, cb, cb_arg);
6464 : }
6465 :
6466 : int
6467 2 : spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6468 : uint64_t offset_blocks, uint64_t num_blocks,
6469 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6470 : {
6471 2 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6472 : struct spdk_bdev_io *bdev_io;
6473 2 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6474 :
6475 2 : if (!desc->write) {
6476 0 : return -EBADF;
6477 : }
6478 :
6479 2 : if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH))) {
6480 0 : return -ENOTSUP;
6481 : }
6482 :
6483 2 : if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
6484 0 : return -EINVAL;
6485 : }
6486 :
6487 2 : bdev_io = bdev_channel_get_io(channel);
6488 2 : if (!bdev_io) {
6489 0 : return -ENOMEM;
6490 : }
6491 :
6492 2 : bdev_io->internal.ch = channel;
6493 2 : bdev_io->internal.desc = desc;
6494 2 : bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
6495 2 : bdev_io->u.bdev.iovs = NULL;
6496 2 : bdev_io->u.bdev.iovcnt = 0;
6497 2 : bdev_io->u.bdev.offset_blocks = offset_blocks;
6498 2 : bdev_io->u.bdev.num_blocks = num_blocks;
6499 2 : bdev_io->u.bdev.memory_domain = NULL;
6500 2 : bdev_io->u.bdev.memory_domain_ctx = NULL;
6501 2 : bdev_io->u.bdev.accel_sequence = NULL;
6502 2 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6503 :
6504 2 : bdev_io_submit(bdev_io);
6505 2 : return 0;
6506 : }
6507 :
6508 : static int bdev_reset_poll_for_outstanding_io(void *ctx);
6509 :
6510 : static void
6511 13 : bdev_reset_check_outstanding_io_done(struct spdk_bdev *bdev, void *_ctx, int status)
6512 : {
6513 13 : struct spdk_bdev_io *bdev_io = _ctx;
6514 13 : struct spdk_bdev_channel *ch = bdev_io->internal.ch;
6515 :
6516 13 : if (status == -EBUSY) {
6517 9 : if (spdk_get_ticks() < bdev_io->u.reset.wait_poller.stop_time_tsc) {
6518 8 : bdev_io->u.reset.wait_poller.poller = SPDK_POLLER_REGISTER(bdev_reset_poll_for_outstanding_io,
6519 : bdev_io, BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
6520 : } else {
6521 1 : if (TAILQ_EMPTY(&ch->io_memory_domain) && TAILQ_EMPTY(&ch->io_accel_exec)) {
6522 : /* If outstanding IOs are still present and reset_io_drain_timeout
6523 : * seconds passed, start the reset. */
6524 1 : bdev_io_submit_reset(bdev_io);
6525 : } else {
6526 : /* We still have in progress memory domain pull/push or we're
6527 : * executing accel sequence. Since we cannot abort either of those
6528 : * operations, fail the reset request. */
6529 0 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
6530 : }
6531 : }
6532 : } else {
6533 4 : SPDK_DEBUGLOG(bdev,
6534 : "Skipping reset for underlying device of bdev: %s - no outstanding I/O.\n",
6535 : ch->bdev->name);
6536 : /* Mark the completion status as a SUCCESS and complete the reset. */
6537 4 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
6538 : }
6539 13 : }
6540 :
6541 : static void
6542 13 : bdev_reset_check_outstanding_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
6543 : struct spdk_io_channel *io_ch, void *_ctx)
6544 : {
6545 13 : struct spdk_bdev_channel *cur_ch = __io_ch_to_bdev_ch(io_ch);
6546 13 : int status = 0;
6547 :
6548 13 : if (cur_ch->io_outstanding > 0 ||
6549 4 : !TAILQ_EMPTY(&cur_ch->io_memory_domain) ||
6550 4 : !TAILQ_EMPTY(&cur_ch->io_accel_exec)) {
6551 : /* If a channel has outstanding IO, set status to -EBUSY code. This will stop
6552 : * further iteration over the rest of the channels and pass non-zero status
6553 : * to the callback function. */
6554 9 : status = -EBUSY;
6555 : }
6556 13 : spdk_bdev_for_each_channel_continue(i, status);
6557 13 : }
6558 :
6559 : static int
6560 8 : bdev_reset_poll_for_outstanding_io(void *ctx)
6561 : {
6562 8 : struct spdk_bdev_io *bdev_io = ctx;
6563 :
6564 8 : spdk_poller_unregister(&bdev_io->u.reset.wait_poller.poller);
6565 8 : spdk_bdev_for_each_channel(bdev_io->bdev, bdev_reset_check_outstanding_io, bdev_io,
6566 : bdev_reset_check_outstanding_io_done);
6567 :
6568 8 : return SPDK_POLLER_BUSY;
6569 : }
6570 :
6571 : static void
6572 16 : bdev_reset_freeze_channel_done(struct spdk_bdev *bdev, void *_ctx, int status)
6573 : {
6574 16 : struct spdk_bdev_io *bdev_io = _ctx;
6575 :
6576 16 : if (bdev->reset_io_drain_timeout == 0) {
6577 11 : bdev_io_submit_reset(bdev_io);
6578 11 : return;
6579 : }
6580 :
6581 5 : bdev_io->u.reset.wait_poller.stop_time_tsc = spdk_get_ticks() +
6582 5 : (bdev->reset_io_drain_timeout * spdk_get_ticks_hz());
6583 :
6584 : /* In case bdev->reset_io_drain_timeout is not equal to zero,
6585 : * submit the reset to the underlying module only if outstanding I/O
6586 : * remain after reset_io_drain_timeout seconds have passed. */
6587 5 : spdk_bdev_for_each_channel(bdev, bdev_reset_check_outstanding_io, bdev_io,
6588 : bdev_reset_check_outstanding_io_done);
6589 : }
6590 :
6591 : static void
6592 19 : bdev_reset_freeze_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
6593 : struct spdk_io_channel *ch, void *_ctx)
6594 : {
6595 : struct spdk_bdev_channel *channel;
6596 : struct spdk_bdev_mgmt_channel *mgmt_channel;
6597 : struct spdk_bdev_shared_resource *shared_resource;
6598 19 : bdev_io_tailq_t tmp_queued;
6599 :
6600 19 : TAILQ_INIT(&tmp_queued);
6601 :
6602 19 : channel = __io_ch_to_bdev_ch(ch);
6603 19 : shared_resource = channel->shared_resource;
6604 19 : mgmt_channel = shared_resource->mgmt_ch;
6605 :
6606 19 : channel->flags |= BDEV_CH_RESET_IN_PROGRESS;
6607 :
6608 19 : if ((channel->flags & BDEV_CH_QOS_ENABLED) != 0) {
6609 2 : TAILQ_SWAP(&channel->qos_queued_io, &tmp_queued, spdk_bdev_io, internal.link);
6610 : }
6611 :
6612 19 : bdev_abort_all_queued_io(&shared_resource->nomem_io, channel);
6613 19 : bdev_abort_all_buf_io(mgmt_channel, channel);
6614 19 : bdev_abort_all_queued_io(&tmp_queued, channel);
6615 :
6616 19 : spdk_bdev_for_each_channel_continue(i, 0);
6617 19 : }
6618 :
6619 : static void
6620 18 : bdev_start_reset(struct spdk_bdev_io *bdev_io)
6621 : {
6622 18 : struct spdk_bdev *bdev = bdev_io->bdev;
6623 18 : bool freeze_channel = false;
6624 :
6625 18 : bdev_ch_add_to_io_submitted(bdev_io);
6626 :
6627 : /**
6628 : * Take a channel reference for the target bdev for the life of this
6629 : * reset. This guards against the channel getting destroyed before
6630 : * the reset is completed. We will release the reference when this
6631 : * reset is completed.
6632 : */
6633 18 : bdev_io->u.reset.ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev));
6634 :
6635 18 : spdk_spin_lock(&bdev->internal.spinlock);
6636 18 : if (bdev->internal.reset_in_progress == NULL) {
6637 16 : bdev->internal.reset_in_progress = bdev_io;
6638 16 : freeze_channel = true;
6639 : } else {
6640 2 : TAILQ_INSERT_TAIL(&bdev->internal.queued_resets, bdev_io, internal.link);
6641 : }
6642 18 : spdk_spin_unlock(&bdev->internal.spinlock);
6643 :
6644 18 : if (freeze_channel) {
6645 16 : spdk_bdev_for_each_channel(bdev, bdev_reset_freeze_channel, bdev_io,
6646 : bdev_reset_freeze_channel_done);
6647 : }
6648 18 : }
6649 :
6650 : int
6651 18 : spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6652 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6653 : {
6654 18 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6655 : struct spdk_bdev_io *bdev_io;
6656 18 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6657 :
6658 18 : bdev_io = bdev_channel_get_io(channel);
6659 18 : if (!bdev_io) {
6660 0 : return -ENOMEM;
6661 : }
6662 :
6663 18 : bdev_io->internal.ch = channel;
6664 18 : bdev_io->internal.desc = desc;
6665 18 : bdev_io->internal.submit_tsc = spdk_get_ticks();
6666 18 : bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
6667 18 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6668 :
6669 18 : bdev_start_reset(bdev_io);
6670 18 : return 0;
6671 : }
6672 :
6673 : void
6674 0 : spdk_bdev_get_io_stat(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
6675 : struct spdk_bdev_io_stat *stat, enum spdk_bdev_reset_stat_mode reset_mode)
6676 : {
6677 0 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6678 :
6679 0 : bdev_get_io_stat(stat, channel->stat);
6680 0 : spdk_bdev_reset_io_stat(channel->stat, reset_mode);
6681 0 : }
6682 :
6683 : static void
6684 5 : bdev_get_device_stat_done(struct spdk_bdev *bdev, void *_ctx, int status)
6685 : {
6686 5 : struct spdk_bdev_iostat_ctx *bdev_iostat_ctx = _ctx;
6687 :
6688 5 : bdev_iostat_ctx->cb(bdev, bdev_iostat_ctx->stat,
6689 : bdev_iostat_ctx->cb_arg, 0);
6690 5 : free(bdev_iostat_ctx);
6691 5 : }
6692 :
6693 : static void
6694 4 : bdev_get_each_channel_stat(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
6695 : struct spdk_io_channel *ch, void *_ctx)
6696 : {
6697 4 : struct spdk_bdev_iostat_ctx *bdev_iostat_ctx = _ctx;
6698 4 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6699 :
6700 4 : spdk_bdev_add_io_stat(bdev_iostat_ctx->stat, channel->stat);
6701 4 : spdk_bdev_reset_io_stat(channel->stat, bdev_iostat_ctx->reset_mode);
6702 4 : spdk_bdev_for_each_channel_continue(i, 0);
6703 4 : }
6704 :
6705 : void
6706 5 : spdk_bdev_get_device_stat(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat,
6707 : enum spdk_bdev_reset_stat_mode reset_mode, spdk_bdev_get_device_stat_cb cb, void *cb_arg)
6708 : {
6709 : struct spdk_bdev_iostat_ctx *bdev_iostat_ctx;
6710 :
6711 5 : assert(bdev != NULL);
6712 5 : assert(stat != NULL);
6713 5 : assert(cb != NULL);
6714 :
6715 5 : bdev_iostat_ctx = calloc(1, sizeof(struct spdk_bdev_iostat_ctx));
6716 5 : if (bdev_iostat_ctx == NULL) {
6717 0 : SPDK_ERRLOG("Unable to allocate memory for spdk_bdev_iostat_ctx\n");
6718 0 : cb(bdev, stat, cb_arg, -ENOMEM);
6719 0 : return;
6720 : }
6721 :
6722 5 : bdev_iostat_ctx->stat = stat;
6723 5 : bdev_iostat_ctx->cb = cb;
6724 5 : bdev_iostat_ctx->cb_arg = cb_arg;
6725 5 : bdev_iostat_ctx->reset_mode = reset_mode;
6726 :
6727 : /* Start with the statistics from previously deleted channels. */
6728 5 : spdk_spin_lock(&bdev->internal.spinlock);
6729 5 : bdev_get_io_stat(bdev_iostat_ctx->stat, bdev->internal.stat);
6730 5 : spdk_bdev_reset_io_stat(bdev->internal.stat, reset_mode);
6731 5 : spdk_spin_unlock(&bdev->internal.spinlock);
6732 :
6733 : /* Then iterate and add the statistics from each existing channel. */
6734 5 : spdk_bdev_for_each_channel(bdev, bdev_get_each_channel_stat, bdev_iostat_ctx,
6735 : bdev_get_device_stat_done);
6736 : }
6737 :
6738 : struct bdev_iostat_reset_ctx {
6739 : enum spdk_bdev_reset_stat_mode mode;
6740 : bdev_reset_device_stat_cb cb;
6741 : void *cb_arg;
6742 : };
6743 :
6744 : static void
6745 0 : bdev_reset_device_stat_done(struct spdk_bdev *bdev, void *_ctx, int status)
6746 : {
6747 0 : struct bdev_iostat_reset_ctx *ctx = _ctx;
6748 :
6749 0 : ctx->cb(bdev, ctx->cb_arg, 0);
6750 :
6751 0 : free(ctx);
6752 0 : }
6753 :
6754 : static void
6755 0 : bdev_reset_each_channel_stat(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
6756 : struct spdk_io_channel *ch, void *_ctx)
6757 : {
6758 0 : struct bdev_iostat_reset_ctx *ctx = _ctx;
6759 0 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6760 :
6761 0 : spdk_bdev_reset_io_stat(channel->stat, ctx->mode);
6762 :
6763 0 : spdk_bdev_for_each_channel_continue(i, 0);
6764 0 : }
6765 :
6766 : void
6767 0 : bdev_reset_device_stat(struct spdk_bdev *bdev, enum spdk_bdev_reset_stat_mode mode,
6768 : bdev_reset_device_stat_cb cb, void *cb_arg)
6769 : {
6770 : struct bdev_iostat_reset_ctx *ctx;
6771 :
6772 0 : assert(bdev != NULL);
6773 0 : assert(cb != NULL);
6774 :
6775 0 : ctx = calloc(1, sizeof(*ctx));
6776 0 : if (ctx == NULL) {
6777 0 : SPDK_ERRLOG("Unable to allocate bdev_iostat_reset_ctx.\n");
6778 0 : cb(bdev, cb_arg, -ENOMEM);
6779 0 : return;
6780 : }
6781 :
6782 0 : ctx->mode = mode;
6783 0 : ctx->cb = cb;
6784 0 : ctx->cb_arg = cb_arg;
6785 :
6786 0 : spdk_spin_lock(&bdev->internal.spinlock);
6787 0 : spdk_bdev_reset_io_stat(bdev->internal.stat, mode);
6788 0 : spdk_spin_unlock(&bdev->internal.spinlock);
6789 :
6790 0 : spdk_bdev_for_each_channel(bdev,
6791 : bdev_reset_each_channel_stat,
6792 : ctx,
6793 : bdev_reset_device_stat_done);
6794 : }
6795 :
6796 : int
6797 1 : spdk_bdev_nvme_admin_passthru(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6798 : const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
6799 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6800 : {
6801 1 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6802 : struct spdk_bdev_io *bdev_io;
6803 1 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6804 :
6805 1 : if (!desc->write) {
6806 0 : return -EBADF;
6807 : }
6808 :
6809 1 : if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN))) {
6810 1 : return -ENOTSUP;
6811 : }
6812 :
6813 0 : bdev_io = bdev_channel_get_io(channel);
6814 0 : if (!bdev_io) {
6815 0 : return -ENOMEM;
6816 : }
6817 :
6818 0 : bdev_io->internal.ch = channel;
6819 0 : bdev_io->internal.desc = desc;
6820 0 : bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN;
6821 0 : bdev_io->u.nvme_passthru.cmd = *cmd;
6822 0 : bdev_io->u.nvme_passthru.buf = buf;
6823 0 : bdev_io->u.nvme_passthru.nbytes = nbytes;
6824 0 : bdev_io->u.nvme_passthru.md_buf = NULL;
6825 0 : bdev_io->u.nvme_passthru.md_len = 0;
6826 :
6827 0 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6828 :
6829 0 : bdev_io_submit(bdev_io);
6830 0 : return 0;
6831 : }
6832 :
6833 : int
6834 1 : spdk_bdev_nvme_io_passthru(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6835 : const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
6836 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6837 : {
6838 1 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6839 : struct spdk_bdev_io *bdev_io;
6840 1 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6841 :
6842 1 : if (!desc->write) {
6843 : /*
6844 : * Do not try to parse the NVMe command - we could maybe use bits in the opcode
6845 : * to easily determine if the command is a read or write, but for now just
6846 : * do not allow io_passthru with a read-only descriptor.
6847 : */
6848 0 : return -EBADF;
6849 : }
6850 :
6851 1 : if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO))) {
6852 1 : return -ENOTSUP;
6853 : }
6854 :
6855 0 : bdev_io = bdev_channel_get_io(channel);
6856 0 : if (!bdev_io) {
6857 0 : return -ENOMEM;
6858 : }
6859 :
6860 0 : bdev_io->internal.ch = channel;
6861 0 : bdev_io->internal.desc = desc;
6862 0 : bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IO;
6863 0 : bdev_io->u.nvme_passthru.cmd = *cmd;
6864 0 : bdev_io->u.nvme_passthru.buf = buf;
6865 0 : bdev_io->u.nvme_passthru.nbytes = nbytes;
6866 0 : bdev_io->u.nvme_passthru.md_buf = NULL;
6867 0 : bdev_io->u.nvme_passthru.md_len = 0;
6868 :
6869 0 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6870 :
6871 0 : bdev_io_submit(bdev_io);
6872 0 : return 0;
6873 : }
6874 :
6875 : int
6876 1 : spdk_bdev_nvme_io_passthru_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
6877 : const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len,
6878 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6879 : {
6880 1 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6881 : struct spdk_bdev_io *bdev_io;
6882 1 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6883 :
6884 1 : if (!desc->write) {
6885 : /*
6886 : * Do not try to parse the NVMe command - we could maybe use bits in the opcode
6887 : * to easily determine if the command is a read or write, but for now just
6888 : * do not allow io_passthru with a read-only descriptor.
6889 : */
6890 0 : return -EBADF;
6891 : }
6892 :
6893 1 : if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO_MD))) {
6894 1 : return -ENOTSUP;
6895 : }
6896 :
6897 0 : bdev_io = bdev_channel_get_io(channel);
6898 0 : if (!bdev_io) {
6899 0 : return -ENOMEM;
6900 : }
6901 :
6902 0 : bdev_io->internal.ch = channel;
6903 0 : bdev_io->internal.desc = desc;
6904 0 : bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IO_MD;
6905 0 : bdev_io->u.nvme_passthru.cmd = *cmd;
6906 0 : bdev_io->u.nvme_passthru.buf = buf;
6907 0 : bdev_io->u.nvme_passthru.nbytes = nbytes;
6908 0 : bdev_io->u.nvme_passthru.md_buf = md_buf;
6909 0 : bdev_io->u.nvme_passthru.md_len = md_len;
6910 :
6911 0 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6912 :
6913 0 : bdev_io_submit(bdev_io);
6914 0 : return 0;
6915 : }
6916 :
6917 : int
6918 0 : spdk_bdev_nvme_iov_passthru_md(struct spdk_bdev_desc *desc,
6919 : struct spdk_io_channel *ch,
6920 : const struct spdk_nvme_cmd *cmd,
6921 : struct iovec *iov, int iovcnt, size_t nbytes,
6922 : void *md_buf, size_t md_len,
6923 : spdk_bdev_io_completion_cb cb, void *cb_arg)
6924 : {
6925 0 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
6926 : struct spdk_bdev_io *bdev_io;
6927 0 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
6928 :
6929 0 : if (!desc->write) {
6930 : /*
6931 : * Do not try to parse the NVMe command - we could maybe use bits in the opcode
6932 : * to easily determine if the command is a read or write, but for now just
6933 : * do not allow io_passthru with a read-only descriptor.
6934 : */
6935 0 : return -EBADF;
6936 : }
6937 :
6938 0 : if (md_buf && spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO_MD))) {
6939 0 : return -ENOTSUP;
6940 0 : } else if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_NVME_IO))) {
6941 0 : return -ENOTSUP;
6942 : }
6943 :
6944 0 : bdev_io = bdev_channel_get_io(channel);
6945 0 : if (!bdev_io) {
6946 0 : return -ENOMEM;
6947 : }
6948 :
6949 0 : bdev_io->internal.ch = channel;
6950 0 : bdev_io->internal.desc = desc;
6951 0 : bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_IOV_MD;
6952 0 : bdev_io->u.nvme_passthru.cmd = *cmd;
6953 0 : bdev_io->u.nvme_passthru.iovs = iov;
6954 0 : bdev_io->u.nvme_passthru.iovcnt = iovcnt;
6955 0 : bdev_io->u.nvme_passthru.nbytes = nbytes;
6956 0 : bdev_io->u.nvme_passthru.md_buf = md_buf;
6957 0 : bdev_io->u.nvme_passthru.md_len = md_len;
6958 :
6959 0 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
6960 :
6961 0 : bdev_io_submit(bdev_io);
6962 0 : return 0;
6963 : }
6964 :
6965 : static void bdev_abort_retry(void *ctx);
6966 : static void bdev_abort(struct spdk_bdev_io *parent_io);
6967 :
6968 : static void
6969 22 : bdev_abort_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
6970 : {
6971 22 : struct spdk_bdev_channel *channel = bdev_io->internal.ch;
6972 22 : struct spdk_bdev_io *parent_io = cb_arg;
6973 : struct spdk_bdev_io *bio_to_abort, *tmp_io;
6974 :
6975 22 : bio_to_abort = bdev_io->u.abort.bio_to_abort;
6976 :
6977 22 : spdk_bdev_free_io(bdev_io);
6978 :
6979 22 : if (!success) {
6980 : /* Check if the target I/O completed in the meantime. */
6981 2 : TAILQ_FOREACH(tmp_io, &channel->io_submitted, internal.ch_link) {
6982 1 : if (tmp_io == bio_to_abort) {
6983 0 : break;
6984 : }
6985 : }
6986 :
6987 : /* If the target I/O still exists, set the parent to failed. */
6988 1 : if (tmp_io != NULL) {
6989 0 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
6990 : }
6991 : }
6992 :
6993 22 : assert(parent_io->internal.f.split);
6994 :
6995 22 : parent_io->internal.split.outstanding--;
6996 22 : if (parent_io->internal.split.outstanding == 0) {
6997 16 : if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
6998 0 : bdev_abort_retry(parent_io);
6999 : } else {
7000 16 : bdev_io_complete(parent_io);
7001 : }
7002 : }
7003 22 : }
7004 :
7005 : static int
7006 23 : bdev_abort_io(struct spdk_bdev_desc *desc, struct spdk_bdev_channel *channel,
7007 : struct spdk_bdev_io *bio_to_abort,
7008 : spdk_bdev_io_completion_cb cb, void *cb_arg)
7009 : {
7010 23 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
7011 : struct spdk_bdev_io *bdev_io;
7012 :
7013 23 : if (bio_to_abort->type == SPDK_BDEV_IO_TYPE_ABORT ||
7014 23 : bio_to_abort->type == SPDK_BDEV_IO_TYPE_RESET) {
7015 : /* TODO: Abort reset or abort request. */
7016 0 : return -ENOTSUP;
7017 : }
7018 :
7019 23 : bdev_io = bdev_channel_get_io(channel);
7020 23 : if (bdev_io == NULL) {
7021 1 : return -ENOMEM;
7022 : }
7023 :
7024 22 : bdev_io->internal.ch = channel;
7025 22 : bdev_io->internal.desc = desc;
7026 22 : bdev_io->type = SPDK_BDEV_IO_TYPE_ABORT;
7027 22 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
7028 :
7029 22 : if (bio_to_abort->internal.f.split) {
7030 6 : assert(bdev_io_should_split(bio_to_abort));
7031 6 : bdev_io->u.bdev.abort.bio_cb_arg = bio_to_abort;
7032 :
7033 : /* Parent abort request is not submitted directly, but to manage its
7034 : * execution add it to the submitted list here.
7035 : */
7036 6 : bdev_io->internal.submit_tsc = spdk_get_ticks();
7037 6 : bdev_ch_add_to_io_submitted(bdev_io);
7038 :
7039 6 : bdev_abort(bdev_io);
7040 :
7041 6 : return 0;
7042 : }
7043 :
7044 16 : bdev_io->u.abort.bio_to_abort = bio_to_abort;
7045 :
7046 : /* Submit the abort request to the underlying bdev module. */
7047 16 : bdev_io_submit(bdev_io);
7048 :
7049 16 : return 0;
7050 : }
7051 :
7052 : static bool
7053 46 : bdev_io_on_tailq(struct spdk_bdev_io *bdev_io, bdev_io_tailq_t *tailq)
7054 : {
7055 : struct spdk_bdev_io *iter;
7056 :
7057 46 : TAILQ_FOREACH(iter, tailq, internal.link) {
7058 0 : if (iter == bdev_io) {
7059 0 : return true;
7060 : }
7061 : }
7062 :
7063 46 : return false;
7064 : }
7065 :
7066 : static uint32_t
7067 18 : _bdev_abort(struct spdk_bdev_io *parent_io)
7068 : {
7069 18 : struct spdk_bdev_desc *desc = parent_io->internal.desc;
7070 18 : struct spdk_bdev_channel *channel = parent_io->internal.ch;
7071 : void *bio_cb_arg;
7072 : struct spdk_bdev_io *bio_to_abort;
7073 : uint32_t matched_ios;
7074 : int rc;
7075 :
7076 18 : bio_cb_arg = parent_io->u.bdev.abort.bio_cb_arg;
7077 :
7078 : /* matched_ios is returned and will be kept by the caller.
7079 : *
7080 : * This function will be used for two cases, 1) the same cb_arg is used for
7081 : * multiple I/Os, 2) a single large I/O is split into smaller ones.
7082 : * Incrementing split_outstanding directly here may confuse readers especially
7083 : * for the 1st case.
7084 : *
7085 : * Completion of I/O abort is processed after stack unwinding. Hence this trick
7086 : * works as expected.
7087 : */
7088 18 : matched_ios = 0;
7089 18 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
7090 :
7091 105 : TAILQ_FOREACH(bio_to_abort, &channel->io_submitted, internal.ch_link) {
7092 88 : if (bio_to_abort->internal.caller_ctx != bio_cb_arg) {
7093 65 : continue;
7094 : }
7095 :
7096 23 : if (bio_to_abort->internal.submit_tsc > parent_io->internal.submit_tsc) {
7097 : /* Any I/O which was submitted after this abort command should be excluded. */
7098 0 : continue;
7099 : }
7100 :
7101 : /* We can't abort a request that's being pushed/pulled or executed by accel */
7102 46 : if (bdev_io_on_tailq(bio_to_abort, &channel->io_accel_exec) ||
7103 23 : bdev_io_on_tailq(bio_to_abort, &channel->io_memory_domain)) {
7104 0 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7105 0 : break;
7106 : }
7107 :
7108 23 : rc = bdev_abort_io(desc, channel, bio_to_abort, bdev_abort_io_done, parent_io);
7109 23 : if (rc != 0) {
7110 1 : if (rc == -ENOMEM) {
7111 1 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_NOMEM;
7112 : } else {
7113 0 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7114 : }
7115 1 : break;
7116 : }
7117 22 : matched_ios++;
7118 : }
7119 :
7120 18 : return matched_ios;
7121 : }
7122 :
7123 : static void
7124 1 : bdev_abort_retry(void *ctx)
7125 : {
7126 1 : struct spdk_bdev_io *parent_io = ctx;
7127 : uint32_t matched_ios;
7128 :
7129 1 : matched_ios = _bdev_abort(parent_io);
7130 :
7131 1 : if (matched_ios == 0) {
7132 0 : if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
7133 0 : bdev_queue_io_wait_with_cb(parent_io, bdev_abort_retry);
7134 : } else {
7135 : /* For retry, the case that no target I/O was found is success
7136 : * because it means target I/Os completed in the meantime.
7137 : */
7138 0 : bdev_io_complete(parent_io);
7139 : }
7140 0 : return;
7141 : }
7142 :
7143 : /* Use split_outstanding to manage the progress of aborting I/Os. */
7144 1 : parent_io->internal.f.split = true;
7145 1 : parent_io->internal.split.outstanding = matched_ios;
7146 : }
7147 :
7148 : static void
7149 17 : bdev_abort(struct spdk_bdev_io *parent_io)
7150 : {
7151 : uint32_t matched_ios;
7152 :
7153 17 : matched_ios = _bdev_abort(parent_io);
7154 :
7155 17 : if (matched_ios == 0) {
7156 2 : if (parent_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
7157 1 : bdev_queue_io_wait_with_cb(parent_io, bdev_abort_retry);
7158 : } else {
7159 : /* The case the no target I/O was found is failure. */
7160 1 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7161 1 : bdev_io_complete(parent_io);
7162 : }
7163 2 : return;
7164 : }
7165 :
7166 : /* Use split_outstanding to manage the progress of aborting I/Os. */
7167 15 : parent_io->internal.f.split = true;
7168 15 : parent_io->internal.split.outstanding = matched_ios;
7169 : }
7170 :
7171 : int
7172 12 : spdk_bdev_abort(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
7173 : void *bio_cb_arg,
7174 : spdk_bdev_io_completion_cb cb, void *cb_arg)
7175 : {
7176 12 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
7177 12 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
7178 : struct spdk_bdev_io *bdev_io;
7179 :
7180 12 : if (bio_cb_arg == NULL) {
7181 0 : return -EINVAL;
7182 : }
7183 :
7184 12 : if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ABORT)) {
7185 1 : return -ENOTSUP;
7186 : }
7187 :
7188 11 : bdev_io = bdev_channel_get_io(channel);
7189 11 : if (bdev_io == NULL) {
7190 0 : return -ENOMEM;
7191 : }
7192 :
7193 11 : bdev_io->internal.ch = channel;
7194 11 : bdev_io->internal.desc = desc;
7195 11 : bdev_io->internal.submit_tsc = spdk_get_ticks();
7196 11 : bdev_io->type = SPDK_BDEV_IO_TYPE_ABORT;
7197 11 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
7198 :
7199 11 : bdev_io->u.bdev.abort.bio_cb_arg = bio_cb_arg;
7200 :
7201 : /* Parent abort request is not submitted directly, but to manage its execution,
7202 : * add it to the submitted list here.
7203 : */
7204 11 : bdev_ch_add_to_io_submitted(bdev_io);
7205 :
7206 11 : bdev_abort(bdev_io);
7207 :
7208 11 : return 0;
7209 : }
7210 :
7211 : int
7212 4 : spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
7213 : struct spdk_bdev_io_wait_entry *entry)
7214 : {
7215 4 : struct spdk_bdev_channel *channel = __io_ch_to_bdev_ch(ch);
7216 4 : struct spdk_bdev_mgmt_channel *mgmt_ch = channel->shared_resource->mgmt_ch;
7217 :
7218 4 : if (bdev != entry->bdev) {
7219 0 : SPDK_ERRLOG("bdevs do not match\n");
7220 0 : return -EINVAL;
7221 : }
7222 :
7223 4 : if (mgmt_ch->per_thread_cache_count > 0) {
7224 0 : SPDK_ERRLOG("Cannot queue io_wait if spdk_bdev_io available in per-thread cache\n");
7225 0 : return -EINVAL;
7226 : }
7227 :
7228 4 : TAILQ_INSERT_TAIL(&mgmt_ch->io_wait_queue, entry, link);
7229 4 : return 0;
7230 : }
7231 :
7232 : static inline void
7233 612 : bdev_io_update_io_stat(struct spdk_bdev_io *bdev_io, uint64_t tsc_diff)
7234 : {
7235 612 : enum spdk_bdev_io_status io_status = bdev_io->internal.status;
7236 612 : struct spdk_bdev_io_stat *io_stat = bdev_io->internal.ch->stat;
7237 612 : uint64_t num_blocks = bdev_io->u.bdev.num_blocks;
7238 612 : uint32_t blocklen = bdev_io->bdev->blocklen;
7239 :
7240 612 : if (spdk_likely(io_status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
7241 519 : switch (bdev_io->type) {
7242 321 : case SPDK_BDEV_IO_TYPE_READ:
7243 321 : io_stat->bytes_read += num_blocks * blocklen;
7244 321 : io_stat->num_read_ops++;
7245 321 : io_stat->read_latency_ticks += tsc_diff;
7246 321 : if (io_stat->max_read_latency_ticks < tsc_diff) {
7247 7 : io_stat->max_read_latency_ticks = tsc_diff;
7248 : }
7249 321 : if (io_stat->min_read_latency_ticks > tsc_diff) {
7250 42 : io_stat->min_read_latency_ticks = tsc_diff;
7251 : }
7252 321 : break;
7253 75 : case SPDK_BDEV_IO_TYPE_WRITE:
7254 75 : io_stat->bytes_written += num_blocks * blocklen;
7255 75 : io_stat->num_write_ops++;
7256 75 : io_stat->write_latency_ticks += tsc_diff;
7257 75 : if (io_stat->max_write_latency_ticks < tsc_diff) {
7258 4 : io_stat->max_write_latency_ticks = tsc_diff;
7259 : }
7260 75 : if (io_stat->min_write_latency_ticks > tsc_diff) {
7261 25 : io_stat->min_write_latency_ticks = tsc_diff;
7262 : }
7263 75 : break;
7264 20 : case SPDK_BDEV_IO_TYPE_UNMAP:
7265 20 : io_stat->bytes_unmapped += num_blocks * blocklen;
7266 20 : io_stat->num_unmap_ops++;
7267 20 : io_stat->unmap_latency_ticks += tsc_diff;
7268 20 : if (io_stat->max_unmap_latency_ticks < tsc_diff) {
7269 0 : io_stat->max_unmap_latency_ticks = tsc_diff;
7270 : }
7271 20 : if (io_stat->min_unmap_latency_ticks > tsc_diff) {
7272 3 : io_stat->min_unmap_latency_ticks = tsc_diff;
7273 : }
7274 20 : break;
7275 4 : case SPDK_BDEV_IO_TYPE_ZCOPY:
7276 : /* Track the data in the start phase only */
7277 4 : if (bdev_io->u.bdev.zcopy.start) {
7278 2 : if (bdev_io->u.bdev.zcopy.populate) {
7279 1 : io_stat->bytes_read += num_blocks * blocklen;
7280 1 : io_stat->num_read_ops++;
7281 1 : io_stat->read_latency_ticks += tsc_diff;
7282 1 : if (io_stat->max_read_latency_ticks < tsc_diff) {
7283 0 : io_stat->max_read_latency_ticks = tsc_diff;
7284 : }
7285 1 : if (io_stat->min_read_latency_ticks > tsc_diff) {
7286 1 : io_stat->min_read_latency_ticks = tsc_diff;
7287 : }
7288 : } else {
7289 1 : io_stat->bytes_written += num_blocks * blocklen;
7290 1 : io_stat->num_write_ops++;
7291 1 : io_stat->write_latency_ticks += tsc_diff;
7292 1 : if (io_stat->max_write_latency_ticks < tsc_diff) {
7293 0 : io_stat->max_write_latency_ticks = tsc_diff;
7294 : }
7295 1 : if (io_stat->min_write_latency_ticks > tsc_diff) {
7296 1 : io_stat->min_write_latency_ticks = tsc_diff;
7297 : }
7298 : }
7299 : }
7300 4 : break;
7301 21 : case SPDK_BDEV_IO_TYPE_COPY:
7302 21 : io_stat->bytes_copied += num_blocks * blocklen;
7303 21 : io_stat->num_copy_ops++;
7304 21 : bdev_io->internal.ch->stat->copy_latency_ticks += tsc_diff;
7305 21 : if (io_stat->max_copy_latency_ticks < tsc_diff) {
7306 0 : io_stat->max_copy_latency_ticks = tsc_diff;
7307 : }
7308 21 : if (io_stat->min_copy_latency_ticks > tsc_diff) {
7309 4 : io_stat->min_copy_latency_ticks = tsc_diff;
7310 : }
7311 21 : break;
7312 78 : default:
7313 78 : break;
7314 : }
7315 93 : } else if (io_status <= SPDK_BDEV_IO_STATUS_FAILED && io_status >= SPDK_MIN_BDEV_IO_STATUS) {
7316 93 : io_stat = bdev_io->bdev->internal.stat;
7317 93 : assert(io_stat->io_error != NULL);
7318 :
7319 93 : spdk_spin_lock(&bdev_io->bdev->internal.spinlock);
7320 93 : io_stat->io_error->error_status[-io_status - 1]++;
7321 93 : spdk_spin_unlock(&bdev_io->bdev->internal.spinlock);
7322 : }
7323 :
7324 : #ifdef SPDK_CONFIG_VTUNE
7325 : uint64_t now_tsc = spdk_get_ticks();
7326 : if (now_tsc > (bdev_io->internal.ch->start_tsc + bdev_io->internal.ch->interval_tsc)) {
7327 : uint64_t data[5];
7328 : struct spdk_bdev_io_stat *prev_stat = bdev_io->internal.ch->prev_stat;
7329 :
7330 : data[0] = io_stat->num_read_ops - prev_stat->num_read_ops;
7331 : data[1] = io_stat->bytes_read - prev_stat->bytes_read;
7332 : data[2] = io_stat->num_write_ops - prev_stat->num_write_ops;
7333 : data[3] = io_stat->bytes_written - prev_stat->bytes_written;
7334 : data[4] = bdev_io->bdev->fn_table->get_spin_time ?
7335 : bdev_io->bdev->fn_table->get_spin_time(spdk_bdev_io_get_io_channel(bdev_io)) : 0;
7336 :
7337 : __itt_metadata_add(g_bdev_mgr.domain, __itt_null, bdev_io->internal.ch->handle,
7338 : __itt_metadata_u64, 5, data);
7339 :
7340 : memcpy(prev_stat, io_stat, sizeof(struct spdk_bdev_io_stat));
7341 : bdev_io->internal.ch->start_tsc = now_tsc;
7342 : }
7343 : #endif
7344 612 : }
7345 :
7346 : static inline void
7347 612 : _bdev_io_complete(void *ctx)
7348 : {
7349 612 : struct spdk_bdev_io *bdev_io = ctx;
7350 :
7351 612 : if (spdk_unlikely(bdev_io_use_accel_sequence(bdev_io))) {
7352 0 : assert(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_SUCCESS);
7353 0 : spdk_accel_sequence_abort(bdev_io->internal.accel_sequence);
7354 : }
7355 :
7356 612 : assert(bdev_io->internal.cb != NULL);
7357 612 : assert(spdk_get_thread() == spdk_bdev_io_get_thread(bdev_io));
7358 :
7359 612 : bdev_io->internal.cb(bdev_io, bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS,
7360 : bdev_io->internal.caller_ctx);
7361 612 : }
7362 :
7363 : static inline void
7364 620 : bdev_io_complete(void *ctx)
7365 : {
7366 620 : struct spdk_bdev_io *bdev_io = ctx;
7367 620 : struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
7368 : uint64_t tsc, tsc_diff;
7369 :
7370 620 : if (spdk_unlikely(bdev_io->internal.f.in_submit_request)) {
7371 : /*
7372 : * Defer completion to avoid potential infinite recursion if the
7373 : * user's completion callback issues a new I/O.
7374 : */
7375 8 : spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io),
7376 : bdev_io_complete, bdev_io);
7377 8 : return;
7378 : }
7379 :
7380 612 : tsc = spdk_get_ticks();
7381 612 : tsc_diff = tsc - bdev_io->internal.submit_tsc;
7382 :
7383 612 : bdev_ch_remove_from_io_submitted(bdev_io);
7384 612 : spdk_trace_record_tsc(tsc, TRACE_BDEV_IO_DONE, bdev_ch->trace_id, 0, (uintptr_t)bdev_io,
7385 : bdev_io->internal.caller_ctx, bdev_ch->queue_depth);
7386 :
7387 612 : if (bdev_ch->histogram) {
7388 4 : if (bdev_io->bdev->internal.histogram_io_type == 0 ||
7389 0 : bdev_io->bdev->internal.histogram_io_type == bdev_io->type) {
7390 : /*
7391 : * Tally all I/O types if the histogram_io_type is set to 0.
7392 : */
7393 4 : spdk_histogram_data_tally(bdev_ch->histogram, tsc_diff);
7394 : }
7395 : }
7396 :
7397 612 : bdev_io_update_io_stat(bdev_io, tsc_diff);
7398 612 : _bdev_io_complete(bdev_io);
7399 : }
7400 :
7401 : /* The difference between this function and bdev_io_complete() is that this should be called to
7402 : * complete IOs that haven't been submitted via bdev_io_submit(), as they weren't added onto the
7403 : * io_submitted list and don't have submit_tsc updated.
7404 : */
7405 : static inline void
7406 0 : bdev_io_complete_unsubmitted(struct spdk_bdev_io *bdev_io)
7407 : {
7408 : /* Since the IO hasn't been submitted it's bound to be failed */
7409 0 : assert(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_SUCCESS);
7410 :
7411 : /* At this point we don't know if the IO is completed from submission context or not, but,
7412 : * since this is an error path, we can always do an spdk_thread_send_msg(). */
7413 0 : spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io),
7414 : _bdev_io_complete, bdev_io);
7415 0 : }
7416 :
7417 : static void bdev_destroy_cb(void *io_device);
7418 :
7419 : static inline void
7420 18 : _bdev_reset_complete(void *ctx)
7421 : {
7422 18 : struct spdk_bdev_io *bdev_io = ctx;
7423 :
7424 : /* Put the channel reference we got in submission. */
7425 18 : assert(bdev_io->u.reset.ch_ref != NULL);
7426 18 : spdk_put_io_channel(bdev_io->u.reset.ch_ref);
7427 18 : bdev_io->u.reset.ch_ref = NULL;
7428 :
7429 18 : bdev_io_complete(bdev_io);
7430 18 : }
7431 :
7432 : static void
7433 16 : bdev_reset_complete(struct spdk_bdev *bdev, void *_ctx, int status)
7434 : {
7435 16 : struct spdk_bdev_io *bdev_io = _ctx;
7436 16 : bdev_io_tailq_t queued_resets;
7437 : struct spdk_bdev_io *queued_reset;
7438 :
7439 16 : assert(bdev_io == bdev->internal.reset_in_progress);
7440 :
7441 16 : TAILQ_INIT(&queued_resets);
7442 :
7443 16 : spdk_spin_lock(&bdev->internal.spinlock);
7444 16 : TAILQ_SWAP(&bdev->internal.queued_resets, &queued_resets,
7445 : spdk_bdev_io, internal.link);
7446 16 : bdev->internal.reset_in_progress = NULL;
7447 16 : spdk_spin_unlock(&bdev->internal.spinlock);
7448 :
7449 18 : while (!TAILQ_EMPTY(&queued_resets)) {
7450 2 : queued_reset = TAILQ_FIRST(&queued_resets);
7451 2 : TAILQ_REMOVE(&queued_resets, queued_reset, internal.link);
7452 2 : queued_reset->internal.status = bdev_io->internal.status;
7453 2 : spdk_thread_send_msg(spdk_bdev_io_get_thread(queued_reset),
7454 : _bdev_reset_complete, queued_reset);
7455 : }
7456 :
7457 16 : _bdev_reset_complete(bdev_io);
7458 :
7459 16 : if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING &&
7460 1 : TAILQ_EMPTY(&bdev->internal.open_descs)) {
7461 1 : spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
7462 : }
7463 16 : }
7464 :
7465 : static void
7466 20 : bdev_unfreeze_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
7467 : struct spdk_io_channel *_ch, void *_ctx)
7468 : {
7469 20 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
7470 :
7471 20 : ch->flags &= ~BDEV_CH_RESET_IN_PROGRESS;
7472 :
7473 20 : spdk_bdev_for_each_channel_continue(i, 0);
7474 20 : }
7475 :
7476 : static void
7477 0 : bdev_io_complete_sequence_cb(void *ctx, int status)
7478 : {
7479 0 : struct spdk_bdev_io *bdev_io = ctx;
7480 :
7481 : /* u.bdev.accel_sequence should have already been cleared at this point */
7482 0 : assert(bdev_io->u.bdev.accel_sequence == NULL);
7483 0 : assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
7484 0 : bdev_io->internal.f.has_accel_sequence = false;
7485 :
7486 0 : if (spdk_unlikely(status != 0)) {
7487 0 : SPDK_ERRLOG("Failed to execute accel sequence, status=%d\n", status);
7488 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
7489 : }
7490 :
7491 0 : bdev_io_complete(bdev_io);
7492 0 : }
7493 :
7494 : void
7495 598 : spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
7496 : {
7497 598 : struct spdk_bdev *bdev = bdev_io->bdev;
7498 598 : struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
7499 598 : struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
7500 :
7501 598 : if (spdk_unlikely(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_PENDING)) {
7502 0 : SPDK_ERRLOG("Unexpected completion on IO from %s module, status was %s\n",
7503 : spdk_bdev_get_module_name(bdev),
7504 : bdev_io_status_get_string(bdev_io->internal.status));
7505 0 : assert(false);
7506 : }
7507 598 : bdev_io->internal.status = status;
7508 :
7509 598 : if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_RESET)) {
7510 16 : assert(bdev_io == bdev->internal.reset_in_progress);
7511 16 : spdk_bdev_for_each_channel(bdev, bdev_unfreeze_channel, bdev_io,
7512 : bdev_reset_complete);
7513 16 : return;
7514 : } else {
7515 582 : bdev_io_decrement_outstanding(bdev_ch, shared_resource);
7516 582 : if (spdk_likely(status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
7517 485 : if (bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io)) {
7518 0 : bdev_io_exec_sequence(bdev_io, bdev_io_complete_sequence_cb);
7519 0 : return;
7520 485 : } else if (spdk_unlikely(bdev_io->internal.f.has_bounce_buf &&
7521 : !bdev_io_use_accel_sequence(bdev_io))) {
7522 26 : _bdev_io_push_bounce_data_buffer(bdev_io,
7523 : _bdev_io_complete_push_bounce_done);
7524 : /* bdev IO will be completed in the callback */
7525 26 : return;
7526 : }
7527 : }
7528 :
7529 556 : if (spdk_unlikely(_bdev_io_handle_no_mem(bdev_io, BDEV_IO_RETRY_STATE_SUBMIT))) {
7530 5 : return;
7531 : }
7532 : }
7533 :
7534 551 : bdev_io_complete(bdev_io);
7535 : }
7536 :
7537 : void
7538 0 : spdk_bdev_io_complete_scsi_status(struct spdk_bdev_io *bdev_io, enum spdk_scsi_status sc,
7539 : enum spdk_scsi_sense sk, uint8_t asc, uint8_t ascq)
7540 : {
7541 : enum spdk_bdev_io_status status;
7542 :
7543 0 : if (sc == SPDK_SCSI_STATUS_GOOD) {
7544 0 : status = SPDK_BDEV_IO_STATUS_SUCCESS;
7545 : } else {
7546 0 : status = SPDK_BDEV_IO_STATUS_SCSI_ERROR;
7547 0 : bdev_io->internal.error.scsi.sc = sc;
7548 0 : bdev_io->internal.error.scsi.sk = sk;
7549 0 : bdev_io->internal.error.scsi.asc = asc;
7550 0 : bdev_io->internal.error.scsi.ascq = ascq;
7551 : }
7552 :
7553 0 : spdk_bdev_io_complete(bdev_io, status);
7554 0 : }
7555 :
7556 : void
7557 0 : spdk_bdev_io_get_scsi_status(const struct spdk_bdev_io *bdev_io,
7558 : int *sc, int *sk, int *asc, int *ascq)
7559 : {
7560 0 : assert(sc != NULL);
7561 0 : assert(sk != NULL);
7562 0 : assert(asc != NULL);
7563 0 : assert(ascq != NULL);
7564 :
7565 0 : switch (bdev_io->internal.status) {
7566 0 : case SPDK_BDEV_IO_STATUS_SUCCESS:
7567 0 : *sc = SPDK_SCSI_STATUS_GOOD;
7568 0 : *sk = SPDK_SCSI_SENSE_NO_SENSE;
7569 0 : *asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
7570 0 : *ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
7571 0 : break;
7572 0 : case SPDK_BDEV_IO_STATUS_NVME_ERROR:
7573 0 : spdk_scsi_nvme_translate(bdev_io, sc, sk, asc, ascq);
7574 0 : break;
7575 0 : case SPDK_BDEV_IO_STATUS_MISCOMPARE:
7576 0 : *sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
7577 0 : *sk = SPDK_SCSI_SENSE_MISCOMPARE;
7578 0 : *asc = SPDK_SCSI_ASC_MISCOMPARE_DURING_VERIFY_OPERATION;
7579 0 : *ascq = bdev_io->internal.error.scsi.ascq;
7580 0 : break;
7581 0 : case SPDK_BDEV_IO_STATUS_SCSI_ERROR:
7582 0 : *sc = bdev_io->internal.error.scsi.sc;
7583 0 : *sk = bdev_io->internal.error.scsi.sk;
7584 0 : *asc = bdev_io->internal.error.scsi.asc;
7585 0 : *ascq = bdev_io->internal.error.scsi.ascq;
7586 0 : break;
7587 0 : default:
7588 0 : *sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
7589 0 : *sk = SPDK_SCSI_SENSE_ABORTED_COMMAND;
7590 0 : *asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
7591 0 : *ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
7592 0 : break;
7593 : }
7594 0 : }
7595 :
7596 : void
7597 0 : spdk_bdev_io_complete_aio_status(struct spdk_bdev_io *bdev_io, int aio_result)
7598 : {
7599 : enum spdk_bdev_io_status status;
7600 :
7601 0 : if (aio_result == 0) {
7602 0 : status = SPDK_BDEV_IO_STATUS_SUCCESS;
7603 : } else {
7604 0 : status = SPDK_BDEV_IO_STATUS_AIO_ERROR;
7605 : }
7606 :
7607 0 : bdev_io->internal.error.aio_result = aio_result;
7608 :
7609 0 : spdk_bdev_io_complete(bdev_io, status);
7610 0 : }
7611 :
7612 : void
7613 0 : spdk_bdev_io_get_aio_status(const struct spdk_bdev_io *bdev_io, int *aio_result)
7614 : {
7615 0 : assert(aio_result != NULL);
7616 :
7617 0 : if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_AIO_ERROR) {
7618 0 : *aio_result = bdev_io->internal.error.aio_result;
7619 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
7620 0 : *aio_result = 0;
7621 : } else {
7622 0 : *aio_result = -EIO;
7623 : }
7624 0 : }
7625 :
7626 : void
7627 0 : spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
7628 : {
7629 : enum spdk_bdev_io_status status;
7630 :
7631 0 : if (spdk_likely(sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS)) {
7632 0 : status = SPDK_BDEV_IO_STATUS_SUCCESS;
7633 0 : } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) {
7634 0 : status = SPDK_BDEV_IO_STATUS_ABORTED;
7635 : } else {
7636 0 : status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
7637 : }
7638 :
7639 0 : bdev_io->internal.error.nvme.cdw0 = cdw0;
7640 0 : bdev_io->internal.error.nvme.sct = sct;
7641 0 : bdev_io->internal.error.nvme.sc = sc;
7642 :
7643 0 : spdk_bdev_io_complete(bdev_io, status);
7644 0 : }
7645 :
7646 : void
7647 0 : spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc)
7648 : {
7649 0 : assert(sct != NULL);
7650 0 : assert(sc != NULL);
7651 0 : assert(cdw0 != NULL);
7652 :
7653 0 : if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT)) {
7654 0 : *sct = SPDK_NVME_SCT_GENERIC;
7655 0 : *sc = SPDK_NVME_SC_SUCCESS;
7656 0 : if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
7657 0 : *cdw0 = 0;
7658 : } else {
7659 0 : *cdw0 = 1U;
7660 : }
7661 0 : return;
7662 : }
7663 :
7664 0 : if (spdk_likely(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
7665 0 : *sct = SPDK_NVME_SCT_GENERIC;
7666 0 : *sc = SPDK_NVME_SC_SUCCESS;
7667 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR) {
7668 0 : *sct = bdev_io->internal.error.nvme.sct;
7669 0 : *sc = bdev_io->internal.error.nvme.sc;
7670 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED) {
7671 0 : *sct = SPDK_NVME_SCT_GENERIC;
7672 0 : *sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
7673 : } else {
7674 0 : *sct = SPDK_NVME_SCT_GENERIC;
7675 0 : *sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
7676 : }
7677 :
7678 0 : *cdw0 = bdev_io->internal.error.nvme.cdw0;
7679 : }
7680 :
7681 : void
7682 0 : spdk_bdev_io_get_nvme_fused_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0,
7683 : int *first_sct, int *first_sc, int *second_sct, int *second_sc)
7684 : {
7685 0 : assert(first_sct != NULL);
7686 0 : assert(first_sc != NULL);
7687 0 : assert(second_sct != NULL);
7688 0 : assert(second_sc != NULL);
7689 0 : assert(cdw0 != NULL);
7690 :
7691 0 : if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR) {
7692 0 : if (bdev_io->internal.error.nvme.sct == SPDK_NVME_SCT_MEDIA_ERROR &&
7693 0 : bdev_io->internal.error.nvme.sc == SPDK_NVME_SC_COMPARE_FAILURE) {
7694 0 : *first_sct = bdev_io->internal.error.nvme.sct;
7695 0 : *first_sc = bdev_io->internal.error.nvme.sc;
7696 0 : *second_sct = SPDK_NVME_SCT_GENERIC;
7697 0 : *second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
7698 : } else {
7699 0 : *first_sct = SPDK_NVME_SCT_GENERIC;
7700 0 : *first_sc = SPDK_NVME_SC_SUCCESS;
7701 0 : *second_sct = bdev_io->internal.error.nvme.sct;
7702 0 : *second_sc = bdev_io->internal.error.nvme.sc;
7703 : }
7704 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED) {
7705 0 : *first_sct = SPDK_NVME_SCT_GENERIC;
7706 0 : *first_sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
7707 0 : *second_sct = SPDK_NVME_SCT_GENERIC;
7708 0 : *second_sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
7709 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
7710 0 : *first_sct = SPDK_NVME_SCT_GENERIC;
7711 0 : *first_sc = SPDK_NVME_SC_SUCCESS;
7712 0 : *second_sct = SPDK_NVME_SCT_GENERIC;
7713 0 : *second_sc = SPDK_NVME_SC_SUCCESS;
7714 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FIRST_FUSED_FAILED) {
7715 0 : *first_sct = SPDK_NVME_SCT_GENERIC;
7716 0 : *first_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
7717 0 : *second_sct = SPDK_NVME_SCT_GENERIC;
7718 0 : *second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
7719 0 : } else if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_MISCOMPARE) {
7720 0 : *first_sct = SPDK_NVME_SCT_MEDIA_ERROR;
7721 0 : *first_sc = SPDK_NVME_SC_COMPARE_FAILURE;
7722 0 : *second_sct = SPDK_NVME_SCT_GENERIC;
7723 0 : *second_sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
7724 : } else {
7725 0 : *first_sct = SPDK_NVME_SCT_GENERIC;
7726 0 : *first_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
7727 0 : *second_sct = SPDK_NVME_SCT_GENERIC;
7728 0 : *second_sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
7729 : }
7730 :
7731 0 : *cdw0 = bdev_io->internal.error.nvme.cdw0;
7732 0 : }
7733 :
7734 : void
7735 0 : spdk_bdev_io_complete_base_io_status(struct spdk_bdev_io *bdev_io,
7736 : const struct spdk_bdev_io *base_io)
7737 : {
7738 0 : switch (base_io->internal.status) {
7739 0 : case SPDK_BDEV_IO_STATUS_NVME_ERROR:
7740 0 : spdk_bdev_io_complete_nvme_status(bdev_io,
7741 0 : base_io->internal.error.nvme.cdw0,
7742 0 : base_io->internal.error.nvme.sct,
7743 0 : base_io->internal.error.nvme.sc);
7744 0 : break;
7745 0 : case SPDK_BDEV_IO_STATUS_SCSI_ERROR:
7746 0 : spdk_bdev_io_complete_scsi_status(bdev_io,
7747 0 : base_io->internal.error.scsi.sc,
7748 0 : base_io->internal.error.scsi.sk,
7749 0 : base_io->internal.error.scsi.asc,
7750 0 : base_io->internal.error.scsi.ascq);
7751 0 : break;
7752 0 : case SPDK_BDEV_IO_STATUS_AIO_ERROR:
7753 0 : spdk_bdev_io_complete_aio_status(bdev_io, base_io->internal.error.aio_result);
7754 0 : break;
7755 0 : default:
7756 0 : spdk_bdev_io_complete(bdev_io, base_io->internal.status);
7757 0 : break;
7758 : }
7759 0 : }
7760 :
7761 : struct spdk_thread *
7762 664 : spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io)
7763 : {
7764 664 : return spdk_io_channel_get_thread(bdev_io->internal.ch->channel);
7765 : }
7766 :
7767 : struct spdk_io_channel *
7768 70 : spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io)
7769 : {
7770 70 : return bdev_io->internal.ch->channel;
7771 : }
7772 :
7773 : static int
7774 130 : bdev_register(struct spdk_bdev *bdev)
7775 : {
7776 : char *bdev_name;
7777 130 : char uuid[SPDK_UUID_STRING_LEN];
7778 130 : struct spdk_iobuf_opts iobuf_opts;
7779 : int ret;
7780 :
7781 130 : assert(bdev->module != NULL);
7782 :
7783 130 : if (!bdev->name) {
7784 0 : SPDK_ERRLOG("Bdev name is NULL\n");
7785 0 : return -EINVAL;
7786 : }
7787 :
7788 130 : if (!strlen(bdev->name)) {
7789 0 : SPDK_ERRLOG("Bdev name must not be an empty string\n");
7790 0 : return -EINVAL;
7791 : }
7792 :
7793 : /* Users often register their own I/O devices using the bdev name. In
7794 : * order to avoid conflicts, prepend bdev_. */
7795 130 : bdev_name = spdk_sprintf_alloc("bdev_%s", bdev->name);
7796 130 : if (!bdev_name) {
7797 0 : SPDK_ERRLOG("Unable to allocate memory for internal bdev name.\n");
7798 0 : return -ENOMEM;
7799 : }
7800 :
7801 130 : bdev->internal.stat = bdev_alloc_io_stat(true);
7802 130 : if (!bdev->internal.stat) {
7803 0 : SPDK_ERRLOG("Unable to allocate I/O statistics structure.\n");
7804 0 : free(bdev_name);
7805 0 : return -ENOMEM;
7806 : }
7807 :
7808 130 : bdev->internal.status = SPDK_BDEV_STATUS_READY;
7809 130 : bdev->internal.measured_queue_depth = UINT64_MAX;
7810 130 : bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
7811 130 : memset(&bdev->internal.claim, 0, sizeof(bdev->internal.claim));
7812 130 : bdev->internal.qd_poller = NULL;
7813 130 : bdev->internal.qos = NULL;
7814 :
7815 130 : TAILQ_INIT(&bdev->internal.open_descs);
7816 130 : TAILQ_INIT(&bdev->internal.locked_ranges);
7817 130 : TAILQ_INIT(&bdev->internal.pending_locked_ranges);
7818 130 : TAILQ_INIT(&bdev->internal.queued_resets);
7819 130 : TAILQ_INIT(&bdev->aliases);
7820 :
7821 : /* UUID may be specified by the user or defined by bdev itself.
7822 : * Otherwise it will be generated here, so this field will never be empty. */
7823 130 : if (spdk_uuid_is_null(&bdev->uuid)) {
7824 43 : spdk_uuid_generate(&bdev->uuid);
7825 : }
7826 :
7827 : /* Add the UUID alias only if it's different than the name */
7828 130 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
7829 130 : if (strcmp(bdev->name, uuid) != 0) {
7830 129 : ret = spdk_bdev_alias_add(bdev, uuid);
7831 129 : if (ret != 0) {
7832 2 : SPDK_ERRLOG("Unable to add uuid:%s alias for bdev %s\n", uuid, bdev->name);
7833 2 : bdev_free_io_stat(bdev->internal.stat);
7834 2 : free(bdev_name);
7835 2 : return ret;
7836 : }
7837 : }
7838 :
7839 128 : spdk_iobuf_get_opts(&iobuf_opts, sizeof(iobuf_opts));
7840 128 : if (spdk_bdev_get_buf_align(bdev) > 1) {
7841 0 : bdev->max_rw_size = spdk_min(bdev->max_rw_size ? bdev->max_rw_size : UINT32_MAX,
7842 : iobuf_opts.large_bufsize / bdev->blocklen);
7843 : }
7844 :
7845 : /* If the user didn't specify a write unit size, set it to one. */
7846 128 : if (bdev->write_unit_size == 0) {
7847 124 : bdev->write_unit_size = 1;
7848 : }
7849 :
7850 : /* Set ACWU value to the write unit size if bdev module did not set it (does not support it natively) */
7851 128 : if (bdev->acwu == 0) {
7852 124 : bdev->acwu = bdev->write_unit_size;
7853 : }
7854 :
7855 128 : if (bdev->phys_blocklen == 0) {
7856 124 : bdev->phys_blocklen = spdk_bdev_get_data_block_size(bdev);
7857 : }
7858 :
7859 128 : if (!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY)) {
7860 0 : bdev->max_copy = bdev_get_max_write(bdev, iobuf_opts.large_bufsize);
7861 : }
7862 :
7863 128 : if (!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
7864 0 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
7865 : }
7866 :
7867 128 : bdev->internal.reset_in_progress = NULL;
7868 128 : bdev->internal.qd_poll_in_progress = false;
7869 128 : bdev->internal.period = 0;
7870 128 : bdev->internal.new_period = 0;
7871 128 : bdev->internal.trace_id = spdk_trace_register_owner(OWNER_TYPE_BDEV, bdev_name);
7872 :
7873 : /*
7874 : * Initialize spinlock before registering IO device because spinlock is used in
7875 : * bdev_channel_create
7876 : */
7877 128 : spdk_spin_init(&bdev->internal.spinlock);
7878 :
7879 128 : spdk_io_device_register(__bdev_to_io_dev(bdev),
7880 : bdev_channel_create, bdev_channel_destroy,
7881 : sizeof(struct spdk_bdev_channel),
7882 : bdev_name);
7883 :
7884 : /*
7885 : * Register bdev name only after the bdev object is ready.
7886 : * After bdev_name_add returns, it is possible for other threads to start using the bdev,
7887 : * create IO channels...
7888 : */
7889 128 : ret = bdev_name_add(&bdev->internal.bdev_name, bdev, bdev->name);
7890 128 : if (ret != 0) {
7891 0 : spdk_io_device_unregister(__bdev_to_io_dev(bdev), NULL);
7892 0 : bdev_free_io_stat(bdev->internal.stat);
7893 0 : spdk_spin_destroy(&bdev->internal.spinlock);
7894 0 : free(bdev_name);
7895 0 : return ret;
7896 : }
7897 :
7898 128 : free(bdev_name);
7899 :
7900 128 : SPDK_DEBUGLOG(bdev, "Inserting bdev %s into list\n", bdev->name);
7901 128 : TAILQ_INSERT_TAIL(&g_bdev_mgr.bdevs, bdev, internal.link);
7902 :
7903 128 : return 0;
7904 : }
7905 :
7906 : static void
7907 129 : bdev_destroy_cb(void *io_device)
7908 : {
7909 : int rc;
7910 : struct spdk_bdev *bdev;
7911 : spdk_bdev_unregister_cb cb_fn;
7912 : void *cb_arg;
7913 :
7914 129 : bdev = __bdev_from_io_dev(io_device);
7915 :
7916 129 : if (bdev->internal.unregister_td != spdk_get_thread()) {
7917 1 : spdk_thread_send_msg(bdev->internal.unregister_td, bdev_destroy_cb, io_device);
7918 1 : return;
7919 : }
7920 :
7921 128 : cb_fn = bdev->internal.unregister_cb;
7922 128 : cb_arg = bdev->internal.unregister_ctx;
7923 :
7924 128 : spdk_spin_destroy(&bdev->internal.spinlock);
7925 128 : free(bdev->internal.qos);
7926 128 : bdev_free_io_stat(bdev->internal.stat);
7927 128 : spdk_trace_unregister_owner(bdev->internal.trace_id);
7928 :
7929 128 : rc = bdev->fn_table->destruct(bdev->ctxt);
7930 128 : if (rc < 0) {
7931 0 : SPDK_ERRLOG("destruct failed\n");
7932 : }
7933 128 : if (rc <= 0 && cb_fn != NULL) {
7934 10 : cb_fn(cb_arg, rc);
7935 : }
7936 : }
7937 :
7938 : void
7939 2 : spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
7940 : {
7941 2 : if (bdev->internal.unregister_cb != NULL) {
7942 0 : bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
7943 : }
7944 2 : }
7945 :
7946 : static void
7947 19 : _remove_notify(void *arg)
7948 : {
7949 19 : struct spdk_bdev_desc *desc = arg;
7950 :
7951 19 : _event_notify(desc, SPDK_BDEV_EVENT_REMOVE);
7952 19 : }
7953 :
7954 : /* returns: 0 - bdev removed and ready to be destructed.
7955 : * -EBUSY - bdev can't be destructed yet. */
7956 : static int
7957 143 : bdev_unregister_unsafe(struct spdk_bdev *bdev)
7958 : {
7959 : struct spdk_bdev_desc *desc, *tmp;
7960 : struct spdk_bdev_alias *alias;
7961 143 : int rc = 0;
7962 143 : char uuid[SPDK_UUID_STRING_LEN];
7963 :
7964 143 : assert(spdk_spin_held(&g_bdev_mgr.spinlock));
7965 143 : assert(spdk_spin_held(&bdev->internal.spinlock));
7966 :
7967 : /* Notify each descriptor about hotremoval */
7968 162 : TAILQ_FOREACH_SAFE(desc, &bdev->internal.open_descs, link, tmp) {
7969 19 : rc = -EBUSY;
7970 : /*
7971 : * Defer invocation of the event_cb to a separate message that will
7972 : * run later on its thread. This ensures this context unwinds and
7973 : * we don't recursively unregister this bdev again if the event_cb
7974 : * immediately closes its descriptor.
7975 : */
7976 19 : event_notify(desc, _remove_notify);
7977 : }
7978 :
7979 : /* If there are no descriptors, proceed removing the bdev */
7980 143 : if (rc == 0) {
7981 128 : bdev_examine_allowlist_remove(bdev->name);
7982 254 : TAILQ_FOREACH(alias, &bdev->aliases, tailq) {
7983 126 : bdev_examine_allowlist_remove(alias->alias.name);
7984 : }
7985 128 : TAILQ_REMOVE(&g_bdev_mgr.bdevs, bdev, internal.link);
7986 128 : SPDK_DEBUGLOG(bdev, "Removing bdev %s from list done\n", bdev->name);
7987 :
7988 : /* Delete the name and the UUID alias */
7989 128 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
7990 128 : bdev_name_del_unsafe(&bdev->internal.bdev_name);
7991 128 : bdev_alias_del(bdev, uuid, bdev_name_del_unsafe);
7992 :
7993 128 : spdk_notify_send("bdev_unregister", spdk_bdev_get_name(bdev));
7994 :
7995 128 : if (bdev->internal.reset_in_progress != NULL) {
7996 : /* If reset is in progress, let the completion callback for reset
7997 : * unregister the bdev.
7998 : */
7999 1 : rc = -EBUSY;
8000 : }
8001 : }
8002 :
8003 143 : return rc;
8004 : }
8005 :
8006 : static void
8007 4 : bdev_unregister_abort_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
8008 : struct spdk_io_channel *io_ch, void *_ctx)
8009 : {
8010 4 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
8011 :
8012 4 : bdev_channel_abort_queued_ios(bdev_ch);
8013 4 : spdk_bdev_for_each_channel_continue(i, 0);
8014 4 : }
8015 :
8016 : static void
8017 128 : bdev_unregister(struct spdk_bdev *bdev, void *_ctx, int status)
8018 : {
8019 : int rc;
8020 :
8021 128 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8022 128 : spdk_spin_lock(&bdev->internal.spinlock);
8023 : /*
8024 : * Set the status to REMOVING after completing to abort channels. Otherwise,
8025 : * the last spdk_bdev_close() may call spdk_io_device_unregister() while
8026 : * spdk_bdev_for_each_channel() is executed and spdk_io_device_unregister()
8027 : * may fail.
8028 : */
8029 128 : bdev->internal.status = SPDK_BDEV_STATUS_REMOVING;
8030 128 : rc = bdev_unregister_unsafe(bdev);
8031 128 : spdk_spin_unlock(&bdev->internal.spinlock);
8032 128 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8033 :
8034 128 : if (rc == 0) {
8035 112 : spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
8036 : }
8037 128 : }
8038 :
8039 : void
8040 135 : spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
8041 : {
8042 : struct spdk_thread *thread;
8043 :
8044 135 : SPDK_DEBUGLOG(bdev, "Removing bdev %s from list\n", bdev->name);
8045 :
8046 135 : thread = spdk_get_thread();
8047 135 : if (!thread) {
8048 : /* The user called this from a non-SPDK thread. */
8049 0 : if (cb_fn != NULL) {
8050 0 : cb_fn(cb_arg, -ENOTSUP);
8051 : }
8052 0 : return;
8053 : }
8054 :
8055 135 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8056 135 : if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING ||
8057 135 : bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
8058 7 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8059 7 : if (cb_fn) {
8060 0 : cb_fn(cb_arg, -EBUSY);
8061 : }
8062 7 : return;
8063 : }
8064 :
8065 128 : spdk_spin_lock(&bdev->internal.spinlock);
8066 128 : bdev->internal.status = SPDK_BDEV_STATUS_UNREGISTERING;
8067 128 : bdev->internal.unregister_cb = cb_fn;
8068 128 : bdev->internal.unregister_ctx = cb_arg;
8069 128 : bdev->internal.unregister_td = thread;
8070 128 : spdk_spin_unlock(&bdev->internal.spinlock);
8071 128 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8072 :
8073 128 : spdk_bdev_set_qd_sampling_period(bdev, 0);
8074 :
8075 128 : spdk_bdev_for_each_channel(bdev, bdev_unregister_abort_channel, bdev,
8076 : bdev_unregister);
8077 : }
8078 :
8079 : int
8080 4 : spdk_bdev_unregister_by_name(const char *bdev_name, struct spdk_bdev_module *module,
8081 : spdk_bdev_unregister_cb cb_fn, void *cb_arg)
8082 : {
8083 4 : struct spdk_bdev_desc *desc;
8084 : struct spdk_bdev *bdev;
8085 : int rc;
8086 :
8087 4 : rc = spdk_bdev_open_ext(bdev_name, false, _tmp_bdev_event_cb, NULL, &desc);
8088 4 : if (rc != 0) {
8089 1 : SPDK_ERRLOG("Failed to open bdev with name: %s\n", bdev_name);
8090 1 : return rc;
8091 : }
8092 :
8093 3 : bdev = spdk_bdev_desc_get_bdev(desc);
8094 :
8095 3 : if (bdev->module != module) {
8096 1 : spdk_bdev_close(desc);
8097 1 : SPDK_ERRLOG("Bdev %s was not registered by the specified module.\n",
8098 : bdev_name);
8099 1 : return -ENODEV;
8100 : }
8101 :
8102 2 : spdk_bdev_unregister(bdev, cb_fn, cb_arg);
8103 :
8104 2 : spdk_bdev_close(desc);
8105 :
8106 2 : return 0;
8107 : }
8108 :
8109 : static int
8110 265 : bdev_start_qos(struct spdk_bdev *bdev)
8111 : {
8112 : struct set_qos_limit_ctx *ctx;
8113 :
8114 : /* Enable QoS */
8115 265 : if (bdev->internal.qos && bdev->internal.qos->thread == NULL) {
8116 2 : ctx = calloc(1, sizeof(*ctx));
8117 2 : if (ctx == NULL) {
8118 0 : SPDK_ERRLOG("Failed to allocate memory for QoS context\n");
8119 0 : return -ENOMEM;
8120 : }
8121 2 : ctx->bdev = bdev;
8122 2 : spdk_bdev_for_each_channel(bdev, bdev_enable_qos_msg, ctx, bdev_enable_qos_done);
8123 : }
8124 :
8125 265 : return 0;
8126 : }
8127 :
8128 : static void
8129 25 : log_already_claimed(enum spdk_log_level level, const int line, const char *func, const char *detail,
8130 : struct spdk_bdev *bdev)
8131 : {
8132 : enum spdk_bdev_claim_type type;
8133 : const char *typename, *modname;
8134 : extern struct spdk_log_flag SPDK_LOG_bdev;
8135 :
8136 25 : assert(spdk_spin_held(&bdev->internal.spinlock));
8137 :
8138 25 : if (level >= SPDK_LOG_INFO && !SPDK_LOG_bdev.enabled) {
8139 0 : return;
8140 : }
8141 :
8142 25 : type = bdev->internal.claim_type;
8143 25 : typename = spdk_bdev_claim_get_name(type);
8144 :
8145 25 : if (type == SPDK_BDEV_CLAIM_EXCL_WRITE) {
8146 6 : modname = bdev->internal.claim.v1.module->name;
8147 6 : spdk_log(level, __FILE__, line, func, "bdev %s %s: type %s by module %s\n",
8148 : bdev->name, detail, typename, modname);
8149 6 : return;
8150 : }
8151 :
8152 19 : if (claim_type_is_v2(type)) {
8153 : struct spdk_bdev_module_claim *claim;
8154 :
8155 38 : TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) {
8156 19 : modname = claim->module->name;
8157 19 : spdk_log(level, __FILE__, line, func, "bdev %s %s: type %s by module %s\n",
8158 : bdev->name, detail, typename, modname);
8159 : }
8160 19 : return;
8161 : }
8162 :
8163 0 : assert(false);
8164 : }
8165 :
8166 : static int
8167 274 : bdev_open(struct spdk_bdev *bdev, bool write, struct spdk_bdev_desc *desc)
8168 : {
8169 : struct spdk_thread *thread;
8170 274 : int rc = 0;
8171 :
8172 274 : thread = spdk_get_thread();
8173 274 : if (!thread) {
8174 0 : SPDK_ERRLOG("Cannot open bdev from non-SPDK thread.\n");
8175 0 : return -ENOTSUP;
8176 : }
8177 :
8178 274 : SPDK_DEBUGLOG(bdev, "Opening descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
8179 : spdk_get_thread());
8180 :
8181 274 : desc->bdev = bdev;
8182 274 : desc->thread = thread;
8183 274 : desc->write = write;
8184 :
8185 274 : spdk_spin_lock(&bdev->internal.spinlock);
8186 274 : if (bdev->internal.status == SPDK_BDEV_STATUS_UNREGISTERING ||
8187 274 : bdev->internal.status == SPDK_BDEV_STATUS_REMOVING) {
8188 3 : spdk_spin_unlock(&bdev->internal.spinlock);
8189 3 : return -ENODEV;
8190 : }
8191 :
8192 271 : if (write && bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
8193 6 : LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
8194 6 : spdk_spin_unlock(&bdev->internal.spinlock);
8195 6 : return -EPERM;
8196 : }
8197 :
8198 265 : rc = bdev_start_qos(bdev);
8199 265 : if (rc != 0) {
8200 0 : SPDK_ERRLOG("Failed to start QoS on bdev %s\n", bdev->name);
8201 0 : spdk_spin_unlock(&bdev->internal.spinlock);
8202 0 : return rc;
8203 : }
8204 :
8205 265 : TAILQ_INSERT_TAIL(&bdev->internal.open_descs, desc, link);
8206 :
8207 265 : spdk_spin_unlock(&bdev->internal.spinlock);
8208 :
8209 265 : return 0;
8210 : }
8211 :
8212 : static int
8213 274 : bdev_desc_alloc(struct spdk_bdev *bdev, spdk_bdev_event_cb_t event_cb, void *event_ctx,
8214 : struct spdk_bdev_desc **_desc)
8215 : {
8216 : struct spdk_bdev_desc *desc;
8217 : unsigned int i;
8218 :
8219 274 : desc = calloc(1, sizeof(*desc));
8220 274 : if (desc == NULL) {
8221 0 : SPDK_ERRLOG("Failed to allocate memory for bdev descriptor\n");
8222 0 : return -ENOMEM;
8223 : }
8224 :
8225 274 : TAILQ_INIT(&desc->pending_media_events);
8226 274 : TAILQ_INIT(&desc->free_media_events);
8227 :
8228 274 : desc->memory_domains_supported = spdk_bdev_get_memory_domains(bdev, NULL, 0) > 0;
8229 274 : desc->callback.event_fn = event_cb;
8230 274 : desc->callback.ctx = event_ctx;
8231 274 : spdk_spin_init(&desc->spinlock);
8232 :
8233 274 : if (bdev->media_events) {
8234 0 : desc->media_events_buffer = calloc(MEDIA_EVENT_POOL_SIZE,
8235 : sizeof(*desc->media_events_buffer));
8236 0 : if (desc->media_events_buffer == NULL) {
8237 0 : SPDK_ERRLOG("Failed to initialize media event pool\n");
8238 0 : bdev_desc_free(desc);
8239 0 : return -ENOMEM;
8240 : }
8241 :
8242 0 : for (i = 0; i < MEDIA_EVENT_POOL_SIZE; ++i) {
8243 0 : TAILQ_INSERT_TAIL(&desc->free_media_events,
8244 : &desc->media_events_buffer[i], tailq);
8245 : }
8246 : }
8247 :
8248 274 : if (bdev->fn_table->accel_sequence_supported != NULL) {
8249 0 : for (i = 0; i < SPDK_BDEV_NUM_IO_TYPES; ++i) {
8250 0 : desc->accel_sequence_supported[i] =
8251 0 : bdev->fn_table->accel_sequence_supported(bdev->ctxt,
8252 : (enum spdk_bdev_io_type)i);
8253 : }
8254 : }
8255 :
8256 274 : *_desc = desc;
8257 :
8258 274 : return 0;
8259 : }
8260 :
8261 : static int
8262 134 : bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
8263 : void *event_ctx, struct spdk_bdev_desc **_desc)
8264 : {
8265 134 : struct spdk_bdev_desc *desc;
8266 : struct spdk_bdev *bdev;
8267 : int rc;
8268 :
8269 134 : bdev = bdev_get_by_name(bdev_name);
8270 :
8271 134 : if (bdev == NULL) {
8272 1 : SPDK_NOTICELOG("Currently unable to find bdev with name: %s\n", bdev_name);
8273 1 : return -ENODEV;
8274 : }
8275 :
8276 133 : rc = bdev_desc_alloc(bdev, event_cb, event_ctx, &desc);
8277 133 : if (rc != 0) {
8278 0 : return rc;
8279 : }
8280 :
8281 133 : rc = bdev_open(bdev, write, desc);
8282 133 : if (rc != 0) {
8283 7 : bdev_desc_free(desc);
8284 7 : desc = NULL;
8285 : }
8286 :
8287 133 : *_desc = desc;
8288 :
8289 133 : return rc;
8290 : }
8291 :
8292 : int
8293 136 : spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
8294 : void *event_ctx, struct spdk_bdev_desc **_desc)
8295 : {
8296 : int rc;
8297 :
8298 136 : if (event_cb == NULL) {
8299 2 : SPDK_ERRLOG("Missing event callback function\n");
8300 2 : return -EINVAL;
8301 : }
8302 :
8303 134 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8304 134 : rc = bdev_open_ext(bdev_name, write, event_cb, event_ctx, _desc);
8305 134 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8306 :
8307 134 : return rc;
8308 : }
8309 :
8310 : struct spdk_bdev_open_async_ctx {
8311 : char *bdev_name;
8312 : spdk_bdev_event_cb_t event_cb;
8313 : void *event_ctx;
8314 : bool write;
8315 : int rc;
8316 : spdk_bdev_open_async_cb_t cb_fn;
8317 : void *cb_arg;
8318 : struct spdk_bdev_desc *desc;
8319 : struct spdk_bdev_open_async_opts opts;
8320 : uint64_t start_ticks;
8321 : struct spdk_thread *orig_thread;
8322 : struct spdk_poller *poller;
8323 : TAILQ_ENTRY(spdk_bdev_open_async_ctx) tailq;
8324 : };
8325 :
8326 : static void
8327 0 : bdev_open_async_done(void *arg)
8328 : {
8329 0 : struct spdk_bdev_open_async_ctx *ctx = arg;
8330 :
8331 0 : ctx->cb_fn(ctx->desc, ctx->rc, ctx->cb_arg);
8332 :
8333 0 : free(ctx->bdev_name);
8334 0 : free(ctx);
8335 0 : }
8336 :
8337 : static void
8338 0 : bdev_open_async_cancel(void *arg)
8339 : {
8340 0 : struct spdk_bdev_open_async_ctx *ctx = arg;
8341 :
8342 0 : assert(ctx->rc == -ESHUTDOWN);
8343 :
8344 0 : spdk_poller_unregister(&ctx->poller);
8345 :
8346 0 : bdev_open_async_done(ctx);
8347 0 : }
8348 :
8349 : /* This is called when the bdev library finishes at shutdown. */
8350 : static void
8351 68 : bdev_open_async_fini(void)
8352 : {
8353 : struct spdk_bdev_open_async_ctx *ctx, *tmp_ctx;
8354 :
8355 68 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8356 68 : TAILQ_FOREACH_SAFE(ctx, &g_bdev_mgr.async_bdev_opens, tailq, tmp_ctx) {
8357 0 : TAILQ_REMOVE(&g_bdev_mgr.async_bdev_opens, ctx, tailq);
8358 : /*
8359 : * We have to move to ctx->orig_thread to unregister ctx->poller.
8360 : * However, there is a chance that ctx->poller is executed before
8361 : * message is executed, which could result in bdev_open_async_done()
8362 : * being called twice. To avoid such race condition, set ctx->rc to
8363 : * -ESHUTDOWN.
8364 : */
8365 0 : ctx->rc = -ESHUTDOWN;
8366 0 : spdk_thread_send_msg(ctx->orig_thread, bdev_open_async_cancel, ctx);
8367 : }
8368 68 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8369 68 : }
8370 :
8371 : static int bdev_open_async(void *arg);
8372 :
8373 : static void
8374 0 : _bdev_open_async(struct spdk_bdev_open_async_ctx *ctx)
8375 : {
8376 : uint64_t timeout_ticks;
8377 :
8378 0 : if (ctx->rc == -ESHUTDOWN) {
8379 : /* This context is being canceled. Do nothing. */
8380 0 : return;
8381 : }
8382 :
8383 0 : ctx->rc = bdev_open_ext(ctx->bdev_name, ctx->write, ctx->event_cb, ctx->event_ctx,
8384 : &ctx->desc);
8385 0 : if (ctx->rc == 0 || ctx->opts.timeout_ms == 0) {
8386 0 : goto exit;
8387 : }
8388 :
8389 0 : timeout_ticks = ctx->start_ticks + ctx->opts.timeout_ms * spdk_get_ticks_hz() / 1000ull;
8390 0 : if (spdk_get_ticks() >= timeout_ticks) {
8391 0 : SPDK_ERRLOG("Timed out while waiting for bdev '%s' to appear\n", ctx->bdev_name);
8392 0 : ctx->rc = -ETIMEDOUT;
8393 0 : goto exit;
8394 : }
8395 :
8396 0 : return;
8397 :
8398 0 : exit:
8399 0 : spdk_poller_unregister(&ctx->poller);
8400 0 : TAILQ_REMOVE(&g_bdev_mgr.async_bdev_opens, ctx, tailq);
8401 :
8402 : /* Completion callback is processed after stack unwinding. */
8403 0 : spdk_thread_send_msg(ctx->orig_thread, bdev_open_async_done, ctx);
8404 : }
8405 :
8406 : static int
8407 0 : bdev_open_async(void *arg)
8408 : {
8409 0 : struct spdk_bdev_open_async_ctx *ctx = arg;
8410 :
8411 0 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8412 :
8413 0 : _bdev_open_async(ctx);
8414 :
8415 0 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8416 :
8417 0 : return SPDK_POLLER_BUSY;
8418 : }
8419 :
8420 : static void
8421 0 : bdev_open_async_opts_copy(struct spdk_bdev_open_async_opts *opts,
8422 : struct spdk_bdev_open_async_opts *opts_src,
8423 : size_t size)
8424 : {
8425 0 : assert(opts);
8426 0 : assert(opts_src);
8427 :
8428 0 : opts->size = size;
8429 :
8430 : #define SET_FIELD(field) \
8431 : if (offsetof(struct spdk_bdev_open_async_opts, field) + sizeof(opts->field) <= size) { \
8432 : opts->field = opts_src->field; \
8433 : } \
8434 :
8435 0 : SET_FIELD(timeout_ms);
8436 :
8437 : /* Do not remove this statement, you should always update this statement when you adding a new field,
8438 : * and do not forget to add the SET_FIELD statement for your added field. */
8439 : SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_open_async_opts) == 16, "Incorrect size");
8440 :
8441 : #undef SET_FIELD
8442 0 : }
8443 :
8444 : static void
8445 0 : bdev_open_async_opts_get_default(struct spdk_bdev_open_async_opts *opts, size_t size)
8446 : {
8447 0 : assert(opts);
8448 :
8449 0 : opts->size = size;
8450 :
8451 : #define SET_FIELD(field, value) \
8452 : if (offsetof(struct spdk_bdev_open_async_opts, field) + sizeof(opts->field) <= size) { \
8453 : opts->field = value; \
8454 : } \
8455 :
8456 0 : SET_FIELD(timeout_ms, 0);
8457 :
8458 : #undef SET_FIELD
8459 0 : }
8460 :
8461 : int
8462 0 : spdk_bdev_open_async(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
8463 : void *event_ctx, struct spdk_bdev_open_async_opts *opts,
8464 : spdk_bdev_open_async_cb_t open_cb, void *open_cb_arg)
8465 : {
8466 : struct spdk_bdev_open_async_ctx *ctx;
8467 :
8468 0 : if (event_cb == NULL) {
8469 0 : SPDK_ERRLOG("Missing event callback function\n");
8470 0 : return -EINVAL;
8471 : }
8472 :
8473 0 : if (open_cb == NULL) {
8474 0 : SPDK_ERRLOG("Missing open callback function\n");
8475 0 : return -EINVAL;
8476 : }
8477 :
8478 0 : if (opts != NULL && opts->size == 0) {
8479 0 : SPDK_ERRLOG("size in the options structure should not be zero\n");
8480 0 : return -EINVAL;
8481 : }
8482 :
8483 0 : ctx = calloc(1, sizeof(*ctx));
8484 0 : if (ctx == NULL) {
8485 0 : SPDK_ERRLOG("Failed to allocate open context\n");
8486 0 : return -ENOMEM;
8487 : }
8488 :
8489 0 : ctx->bdev_name = strdup(bdev_name);
8490 0 : if (ctx->bdev_name == NULL) {
8491 0 : SPDK_ERRLOG("Failed to duplicate bdev_name\n");
8492 0 : free(ctx);
8493 0 : return -ENOMEM;
8494 : }
8495 :
8496 0 : ctx->poller = SPDK_POLLER_REGISTER(bdev_open_async, ctx, 100 * 1000);
8497 0 : if (ctx->poller == NULL) {
8498 0 : SPDK_ERRLOG("Failed to register bdev_open_async poller\n");
8499 0 : free(ctx->bdev_name);
8500 0 : free(ctx);
8501 0 : return -ENOMEM;
8502 : }
8503 :
8504 0 : ctx->cb_fn = open_cb;
8505 0 : ctx->cb_arg = open_cb_arg;
8506 0 : ctx->write = write;
8507 0 : ctx->event_cb = event_cb;
8508 0 : ctx->event_ctx = event_ctx;
8509 0 : ctx->orig_thread = spdk_get_thread();
8510 0 : ctx->start_ticks = spdk_get_ticks();
8511 :
8512 0 : bdev_open_async_opts_get_default(&ctx->opts, sizeof(ctx->opts));
8513 0 : if (opts != NULL) {
8514 0 : bdev_open_async_opts_copy(&ctx->opts, opts, opts->size);
8515 : }
8516 :
8517 0 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8518 :
8519 0 : TAILQ_INSERT_TAIL(&g_bdev_mgr.async_bdev_opens, ctx, tailq);
8520 0 : _bdev_open_async(ctx);
8521 :
8522 0 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8523 :
8524 0 : return 0;
8525 : }
8526 :
8527 : static void
8528 265 : bdev_close(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc)
8529 : {
8530 : int rc;
8531 :
8532 265 : spdk_spin_lock(&bdev->internal.spinlock);
8533 265 : spdk_spin_lock(&desc->spinlock);
8534 :
8535 265 : TAILQ_REMOVE(&bdev->internal.open_descs, desc, link);
8536 :
8537 265 : desc->closed = true;
8538 :
8539 265 : if (desc->claim != NULL) {
8540 20 : bdev_desc_release_claims(desc);
8541 : }
8542 :
8543 265 : if (0 == desc->refs) {
8544 254 : spdk_spin_unlock(&desc->spinlock);
8545 254 : bdev_desc_free(desc);
8546 : } else {
8547 11 : spdk_spin_unlock(&desc->spinlock);
8548 : }
8549 :
8550 : /* If no more descriptors, kill QoS channel */
8551 265 : if (bdev->internal.qos && TAILQ_EMPTY(&bdev->internal.open_descs)) {
8552 7 : SPDK_DEBUGLOG(bdev, "Closed last descriptor for bdev %s on thread %p. Stopping QoS.\n",
8553 : bdev->name, spdk_get_thread());
8554 :
8555 7 : if (bdev_qos_destroy(bdev)) {
8556 : /* There isn't anything we can do to recover here. Just let the
8557 : * old QoS poller keep running. The QoS handling won't change
8558 : * cores when the user allocates a new channel, but it won't break. */
8559 0 : SPDK_ERRLOG("Unable to shut down QoS poller. It will continue running on the current thread.\n");
8560 : }
8561 : }
8562 :
8563 265 : if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->internal.open_descs)) {
8564 15 : rc = bdev_unregister_unsafe(bdev);
8565 15 : spdk_spin_unlock(&bdev->internal.spinlock);
8566 :
8567 15 : if (rc == 0) {
8568 15 : spdk_io_device_unregister(__bdev_to_io_dev(bdev), bdev_destroy_cb);
8569 : }
8570 : } else {
8571 250 : spdk_spin_unlock(&bdev->internal.spinlock);
8572 : }
8573 265 : }
8574 :
8575 : void
8576 126 : spdk_bdev_close(struct spdk_bdev_desc *desc)
8577 : {
8578 126 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
8579 :
8580 126 : SPDK_DEBUGLOG(bdev, "Closing descriptor %p for bdev %s on thread %p\n", desc, bdev->name,
8581 : spdk_get_thread());
8582 :
8583 126 : assert(desc->thread == spdk_get_thread());
8584 :
8585 126 : spdk_poller_unregister(&desc->io_timeout_poller);
8586 :
8587 126 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8588 :
8589 126 : bdev_close(bdev, desc);
8590 :
8591 126 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8592 126 : }
8593 :
8594 : int32_t
8595 3 : spdk_bdev_get_numa_id(struct spdk_bdev *bdev)
8596 : {
8597 3 : if (bdev->numa.id_valid) {
8598 2 : return bdev->numa.id;
8599 : } else {
8600 1 : return SPDK_ENV_NUMA_ID_ANY;
8601 : }
8602 : }
8603 :
8604 : static void
8605 128 : bdev_register_finished(void *arg)
8606 : {
8607 128 : struct spdk_bdev_desc *desc = arg;
8608 128 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
8609 :
8610 128 : spdk_notify_send("bdev_register", spdk_bdev_get_name(bdev));
8611 :
8612 128 : spdk_spin_lock(&g_bdev_mgr.spinlock);
8613 :
8614 128 : bdev_close(bdev, desc);
8615 :
8616 128 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
8617 128 : }
8618 :
8619 : int
8620 131 : spdk_bdev_register(struct spdk_bdev *bdev)
8621 : {
8622 131 : struct spdk_bdev_desc *desc;
8623 131 : struct spdk_thread *thread = spdk_get_thread();
8624 : int rc;
8625 :
8626 131 : if (spdk_unlikely(!spdk_thread_is_app_thread(NULL))) {
8627 1 : SPDK_ERRLOG("Cannot register bdev %s on thread %p (%s)\n", bdev->name, thread,
8628 : thread ? spdk_thread_get_name(thread) : "null");
8629 1 : return -EINVAL;
8630 : }
8631 :
8632 130 : rc = bdev_register(bdev);
8633 130 : if (rc != 0) {
8634 2 : return rc;
8635 : }
8636 :
8637 : /* A descriptor is opened to prevent bdev deletion during examination */
8638 128 : rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
8639 128 : if (rc != 0) {
8640 0 : spdk_bdev_unregister(bdev, NULL, NULL);
8641 0 : return rc;
8642 : }
8643 :
8644 128 : rc = bdev_open(bdev, false, desc);
8645 128 : if (rc != 0) {
8646 0 : bdev_desc_free(desc);
8647 0 : spdk_bdev_unregister(bdev, NULL, NULL);
8648 0 : return rc;
8649 : }
8650 :
8651 : /* Examine configuration before initializing I/O */
8652 128 : bdev_examine(bdev);
8653 :
8654 128 : rc = spdk_bdev_wait_for_examine(bdev_register_finished, desc);
8655 128 : if (rc != 0) {
8656 0 : bdev_close(bdev, desc);
8657 0 : spdk_bdev_unregister(bdev, NULL, NULL);
8658 : }
8659 :
8660 128 : return rc;
8661 : }
8662 :
8663 : int
8664 26 : spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
8665 : struct spdk_bdev_module *module)
8666 : {
8667 26 : spdk_spin_lock(&bdev->internal.spinlock);
8668 :
8669 26 : if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
8670 6 : LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
8671 6 : spdk_spin_unlock(&bdev->internal.spinlock);
8672 6 : return -EPERM;
8673 : }
8674 :
8675 20 : if (desc && !desc->write) {
8676 5 : desc->write = true;
8677 : }
8678 :
8679 20 : bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE;
8680 20 : bdev->internal.claim.v1.module = module;
8681 :
8682 20 : spdk_spin_unlock(&bdev->internal.spinlock);
8683 20 : return 0;
8684 : }
8685 :
8686 : void
8687 8 : spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
8688 : {
8689 8 : spdk_spin_lock(&bdev->internal.spinlock);
8690 :
8691 8 : assert(bdev->internal.claim.v1.module != NULL);
8692 8 : assert(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
8693 8 : bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
8694 8 : bdev->internal.claim.v1.module = NULL;
8695 :
8696 8 : spdk_spin_unlock(&bdev->internal.spinlock);
8697 8 : }
8698 :
8699 : /*
8700 : * Start claims v2
8701 : */
8702 :
8703 : const char *
8704 25 : spdk_bdev_claim_get_name(enum spdk_bdev_claim_type type)
8705 : {
8706 25 : switch (type) {
8707 0 : case SPDK_BDEV_CLAIM_NONE:
8708 0 : return "not_claimed";
8709 6 : case SPDK_BDEV_CLAIM_EXCL_WRITE:
8710 6 : return "exclusive_write";
8711 8 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE:
8712 8 : return "read_many_write_one";
8713 5 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE:
8714 5 : return "read_many_write_none";
8715 6 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
8716 6 : return "read_many_write_many";
8717 0 : default:
8718 0 : break;
8719 : }
8720 0 : return "invalid_claim";
8721 : }
8722 :
8723 : static bool
8724 115 : claim_type_is_v2(enum spdk_bdev_claim_type type)
8725 : {
8726 115 : switch (type) {
8727 115 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE:
8728 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE:
8729 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
8730 115 : return true;
8731 0 : default:
8732 0 : break;
8733 : }
8734 0 : return false;
8735 : }
8736 :
8737 : /* Returns true if taking a claim with desc->write == false should make the descriptor writable. */
8738 : static bool
8739 17 : claim_type_promotes_to_write(enum spdk_bdev_claim_type type)
8740 : {
8741 17 : switch (type) {
8742 6 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE:
8743 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
8744 6 : return true;
8745 11 : default:
8746 11 : break;
8747 : }
8748 11 : return false;
8749 : }
8750 :
8751 : void
8752 57 : spdk_bdev_claim_opts_init(struct spdk_bdev_claim_opts *opts, size_t size)
8753 : {
8754 57 : if (opts == NULL) {
8755 0 : SPDK_ERRLOG("opts should not be NULL\n");
8756 0 : assert(opts != NULL);
8757 0 : return;
8758 : }
8759 57 : if (size == 0) {
8760 0 : SPDK_ERRLOG("size should not be zero\n");
8761 0 : assert(size != 0);
8762 0 : return;
8763 : }
8764 :
8765 57 : memset(opts, 0, size);
8766 57 : opts->opts_size = size;
8767 :
8768 : #define FIELD_OK(field) \
8769 : offsetof(struct spdk_bdev_claim_opts, field) + sizeof(opts->field) <= size
8770 :
8771 : #define SET_FIELD(field, value) \
8772 : if (FIELD_OK(field)) { \
8773 : opts->field = value; \
8774 : } \
8775 :
8776 57 : SET_FIELD(shared_claim_key, 0);
8777 :
8778 : #undef FIELD_OK
8779 : #undef SET_FIELD
8780 : }
8781 :
8782 : static int
8783 22 : claim_opts_copy(struct spdk_bdev_claim_opts *src, struct spdk_bdev_claim_opts *dst)
8784 : {
8785 22 : if (src->opts_size == 0) {
8786 0 : SPDK_ERRLOG("size should not be zero\n");
8787 0 : return -1;
8788 : }
8789 :
8790 22 : memset(dst, 0, sizeof(*dst));
8791 22 : dst->opts_size = src->opts_size;
8792 :
8793 : #define FIELD_OK(field) \
8794 : offsetof(struct spdk_bdev_claim_opts, field) + sizeof(src->field) <= src->opts_size
8795 :
8796 : #define SET_FIELD(field) \
8797 : if (FIELD_OK(field)) { \
8798 : dst->field = src->field; \
8799 : } \
8800 :
8801 22 : if (FIELD_OK(name)) {
8802 22 : snprintf(dst->name, sizeof(dst->name), "%s", src->name);
8803 : }
8804 :
8805 22 : SET_FIELD(shared_claim_key);
8806 :
8807 : /* You should not remove this statement, but need to update the assert statement
8808 : * if you add a new field, and also add a corresponding SET_FIELD statement */
8809 : SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_claim_opts) == 48, "Incorrect size");
8810 :
8811 : #undef FIELD_OK
8812 : #undef SET_FIELD
8813 22 : return 0;
8814 : }
8815 :
8816 : /* Returns 0 if a read-write-once claim can be taken. */
8817 : static int
8818 10 : claim_verify_rwo(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
8819 : struct spdk_bdev_claim_opts *opts, struct spdk_bdev_module *module)
8820 : {
8821 10 : struct spdk_bdev *bdev = desc->bdev;
8822 : struct spdk_bdev_desc *open_desc;
8823 :
8824 10 : assert(spdk_spin_held(&bdev->internal.spinlock));
8825 10 : assert(type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
8826 :
8827 10 : if (opts->shared_claim_key != 0) {
8828 1 : SPDK_ERRLOG("%s: key option not supported with read-write-once claims\n",
8829 : bdev->name);
8830 1 : return -EINVAL;
8831 : }
8832 9 : if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
8833 1 : LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
8834 1 : return -EPERM;
8835 : }
8836 8 : if (desc->claim != NULL) {
8837 0 : SPDK_NOTICELOG("%s: descriptor already claimed bdev with module %s\n",
8838 : bdev->name, desc->claim->module->name);
8839 0 : return -EPERM;
8840 : }
8841 16 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
8842 10 : if (desc != open_desc && open_desc->write) {
8843 2 : SPDK_NOTICELOG("%s: Cannot obtain read-write-once claim while "
8844 : "another descriptor is open for writing\n",
8845 : bdev->name);
8846 2 : return -EPERM;
8847 : }
8848 : }
8849 :
8850 6 : return 0;
8851 : }
8852 :
8853 : /* Returns 0 if a read-only-many claim can be taken. */
8854 : static int
8855 15 : claim_verify_rom(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
8856 : struct spdk_bdev_claim_opts *opts, struct spdk_bdev_module *module)
8857 : {
8858 15 : struct spdk_bdev *bdev = desc->bdev;
8859 : struct spdk_bdev_desc *open_desc;
8860 :
8861 15 : assert(spdk_spin_held(&bdev->internal.spinlock));
8862 15 : assert(type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
8863 15 : assert(desc->claim == NULL);
8864 :
8865 15 : if (desc->write) {
8866 3 : SPDK_ERRLOG("%s: Cannot obtain read-only-many claim with writable descriptor\n",
8867 : bdev->name);
8868 3 : return -EINVAL;
8869 : }
8870 12 : if (opts->shared_claim_key != 0) {
8871 1 : SPDK_ERRLOG("%s: key option not supported with read-only-may claims\n", bdev->name);
8872 1 : return -EINVAL;
8873 : }
8874 11 : if (bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE) {
8875 19 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
8876 11 : if (open_desc->write) {
8877 0 : SPDK_NOTICELOG("%s: Cannot obtain read-only-many claim while "
8878 : "another descriptor is open for writing\n",
8879 : bdev->name);
8880 0 : return -EPERM;
8881 : }
8882 : }
8883 : }
8884 :
8885 11 : return 0;
8886 : }
8887 :
8888 : /* Returns 0 if a read-write-many claim can be taken. */
8889 : static int
8890 8 : claim_verify_rwm(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
8891 : struct spdk_bdev_claim_opts *opts, struct spdk_bdev_module *module)
8892 : {
8893 8 : struct spdk_bdev *bdev = desc->bdev;
8894 : struct spdk_bdev_desc *open_desc;
8895 :
8896 8 : assert(spdk_spin_held(&bdev->internal.spinlock));
8897 8 : assert(type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED);
8898 8 : assert(desc->claim == NULL);
8899 :
8900 8 : if (opts->shared_claim_key == 0) {
8901 2 : SPDK_ERRLOG("%s: shared_claim_key option required with read-write-may claims\n",
8902 : bdev->name);
8903 2 : return -EINVAL;
8904 : }
8905 6 : switch (bdev->internal.claim_type) {
8906 4 : case SPDK_BDEV_CLAIM_NONE:
8907 7 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
8908 5 : if (open_desc == desc) {
8909 3 : continue;
8910 : }
8911 2 : if (open_desc->write) {
8912 2 : SPDK_NOTICELOG("%s: Cannot obtain read-write-many claim while "
8913 : "another descriptor is open for writing without a "
8914 : "claim\n", bdev->name);
8915 2 : return -EPERM;
8916 : }
8917 : }
8918 2 : break;
8919 2 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
8920 2 : if (opts->shared_claim_key != bdev->internal.claim.v2.key) {
8921 1 : LOG_ALREADY_CLAIMED_ERROR("already claimed with another key", bdev);
8922 1 : return -EPERM;
8923 : }
8924 1 : break;
8925 0 : default:
8926 0 : LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
8927 0 : return -EBUSY;
8928 : }
8929 :
8930 3 : return 0;
8931 : }
8932 :
8933 : /* Updates desc and its bdev with a v2 claim. */
8934 : static int
8935 20 : claim_bdev(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
8936 : struct spdk_bdev_claim_opts *opts, struct spdk_bdev_module *module)
8937 : {
8938 20 : struct spdk_bdev *bdev = desc->bdev;
8939 : struct spdk_bdev_module_claim *claim;
8940 :
8941 20 : assert(spdk_spin_held(&bdev->internal.spinlock));
8942 20 : assert(claim_type_is_v2(type));
8943 20 : assert(desc->claim == NULL);
8944 :
8945 20 : claim = calloc(1, sizeof(*desc->claim));
8946 20 : if (claim == NULL) {
8947 0 : SPDK_ERRLOG("%s: out of memory while allocating claim\n", bdev->name);
8948 0 : return -ENOMEM;
8949 : }
8950 20 : claim->module = module;
8951 20 : claim->desc = desc;
8952 : SPDK_STATIC_ASSERT(sizeof(claim->name) == sizeof(opts->name), "sizes must match");
8953 20 : memcpy(claim->name, opts->name, sizeof(claim->name));
8954 20 : desc->claim = claim;
8955 :
8956 20 : if (bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE) {
8957 16 : bdev->internal.claim_type = type;
8958 16 : TAILQ_INIT(&bdev->internal.claim.v2.claims);
8959 16 : bdev->internal.claim.v2.key = opts->shared_claim_key;
8960 : }
8961 20 : assert(type == bdev->internal.claim_type);
8962 :
8963 20 : TAILQ_INSERT_TAIL(&bdev->internal.claim.v2.claims, claim, link);
8964 :
8965 20 : if (!desc->write && claim_type_promotes_to_write(type)) {
8966 6 : desc->write = true;
8967 : }
8968 :
8969 20 : return 0;
8970 : }
8971 :
8972 : int
8973 44 : spdk_bdev_module_claim_bdev_desc(struct spdk_bdev_desc *desc, enum spdk_bdev_claim_type type,
8974 : struct spdk_bdev_claim_opts *_opts,
8975 : struct spdk_bdev_module *module)
8976 : {
8977 : struct spdk_bdev *bdev;
8978 44 : struct spdk_bdev_claim_opts opts;
8979 44 : int rc = 0;
8980 :
8981 44 : if (desc == NULL) {
8982 0 : SPDK_ERRLOG("descriptor must not be NULL\n");
8983 0 : return -EINVAL;
8984 : }
8985 :
8986 44 : bdev = desc->bdev;
8987 :
8988 44 : if (_opts == NULL) {
8989 22 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
8990 22 : } else if (claim_opts_copy(_opts, &opts) != 0) {
8991 0 : return -EINVAL;
8992 : }
8993 :
8994 44 : spdk_spin_lock(&bdev->internal.spinlock);
8995 :
8996 44 : if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE &&
8997 17 : bdev->internal.claim_type != type) {
8998 11 : LOG_ALREADY_CLAIMED_ERROR("already claimed", bdev);
8999 11 : spdk_spin_unlock(&bdev->internal.spinlock);
9000 11 : return -EPERM;
9001 : }
9002 :
9003 33 : if (claim_type_is_v2(type) && desc->claim != NULL) {
9004 0 : SPDK_ERRLOG("%s: descriptor already has %s claim with name '%s'\n",
9005 : bdev->name, spdk_bdev_claim_get_name(type), desc->claim->name);
9006 0 : spdk_spin_unlock(&bdev->internal.spinlock);
9007 0 : return -EPERM;
9008 : }
9009 :
9010 33 : switch (type) {
9011 0 : case SPDK_BDEV_CLAIM_EXCL_WRITE:
9012 0 : spdk_spin_unlock(&bdev->internal.spinlock);
9013 0 : return spdk_bdev_module_claim_bdev(bdev, desc, module);
9014 10 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE:
9015 10 : rc = claim_verify_rwo(desc, type, &opts, module);
9016 10 : break;
9017 15 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE:
9018 15 : rc = claim_verify_rom(desc, type, &opts, module);
9019 15 : break;
9020 8 : case SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED:
9021 8 : rc = claim_verify_rwm(desc, type, &opts, module);
9022 8 : break;
9023 0 : default:
9024 0 : SPDK_ERRLOG("%s: claim type %d not supported\n", bdev->name, type);
9025 0 : rc = -ENOTSUP;
9026 : }
9027 :
9028 33 : if (rc == 0) {
9029 20 : rc = claim_bdev(desc, type, &opts, module);
9030 : }
9031 :
9032 33 : spdk_spin_unlock(&bdev->internal.spinlock);
9033 33 : return rc;
9034 : }
9035 :
9036 : static void
9037 16 : claim_reset(struct spdk_bdev *bdev)
9038 : {
9039 16 : assert(spdk_spin_held(&bdev->internal.spinlock));
9040 16 : assert(claim_type_is_v2(bdev->internal.claim_type));
9041 16 : assert(TAILQ_EMPTY(&bdev->internal.claim.v2.claims));
9042 :
9043 16 : memset(&bdev->internal.claim, 0, sizeof(bdev->internal.claim));
9044 16 : bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
9045 16 : }
9046 :
9047 : static void
9048 20 : bdev_desc_release_claims(struct spdk_bdev_desc *desc)
9049 : {
9050 20 : struct spdk_bdev *bdev = desc->bdev;
9051 :
9052 20 : assert(spdk_spin_held(&bdev->internal.spinlock));
9053 20 : assert(claim_type_is_v2(bdev->internal.claim_type));
9054 :
9055 20 : if (bdev->internal.examine_in_progress == 0) {
9056 20 : TAILQ_REMOVE(&bdev->internal.claim.v2.claims, desc->claim, link);
9057 20 : free(desc->claim);
9058 20 : if (TAILQ_EMPTY(&bdev->internal.claim.v2.claims)) {
9059 16 : claim_reset(bdev);
9060 : }
9061 : } else {
9062 : /* This is a dead claim that will be cleaned up when bdev_examine() is done. */
9063 0 : desc->claim->module = NULL;
9064 0 : desc->claim->desc = NULL;
9065 : }
9066 20 : desc->claim = NULL;
9067 20 : }
9068 :
9069 : /*
9070 : * End claims v2
9071 : */
9072 :
9073 : struct spdk_bdev *
9074 1197 : spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
9075 : {
9076 1197 : assert(desc != NULL);
9077 1197 : return desc->bdev;
9078 : }
9079 :
9080 : int
9081 1 : spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn)
9082 : {
9083 : struct spdk_bdev *bdev, *tmp;
9084 1 : struct spdk_bdev_desc *desc;
9085 1 : int rc = 0;
9086 :
9087 1 : assert(fn != NULL);
9088 :
9089 1 : spdk_spin_lock(&g_bdev_mgr.spinlock);
9090 1 : bdev = spdk_bdev_first();
9091 9 : while (bdev != NULL) {
9092 8 : rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
9093 8 : if (rc != 0) {
9094 0 : break;
9095 : }
9096 8 : rc = bdev_open(bdev, false, desc);
9097 8 : if (rc != 0) {
9098 1 : bdev_desc_free(desc);
9099 1 : if (rc == -ENODEV) {
9100 : /* Ignore the error and move to the next bdev. */
9101 1 : rc = 0;
9102 1 : bdev = spdk_bdev_next(bdev);
9103 1 : continue;
9104 : }
9105 0 : break;
9106 : }
9107 7 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
9108 :
9109 7 : rc = fn(ctx, bdev);
9110 :
9111 7 : spdk_spin_lock(&g_bdev_mgr.spinlock);
9112 7 : tmp = spdk_bdev_next(bdev);
9113 7 : bdev_close(bdev, desc);
9114 7 : if (rc != 0) {
9115 0 : break;
9116 : }
9117 7 : bdev = tmp;
9118 : }
9119 1 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
9120 :
9121 1 : return rc;
9122 : }
9123 :
9124 : int
9125 1 : spdk_for_each_bdev_leaf(void *ctx, spdk_for_each_bdev_fn fn)
9126 : {
9127 : struct spdk_bdev *bdev, *tmp;
9128 1 : struct spdk_bdev_desc *desc;
9129 1 : int rc = 0;
9130 :
9131 1 : assert(fn != NULL);
9132 :
9133 1 : spdk_spin_lock(&g_bdev_mgr.spinlock);
9134 1 : bdev = spdk_bdev_first_leaf();
9135 6 : while (bdev != NULL) {
9136 5 : rc = bdev_desc_alloc(bdev, _tmp_bdev_event_cb, NULL, &desc);
9137 5 : if (rc != 0) {
9138 0 : break;
9139 : }
9140 5 : rc = bdev_open(bdev, false, desc);
9141 5 : if (rc != 0) {
9142 1 : bdev_desc_free(desc);
9143 1 : if (rc == -ENODEV) {
9144 : /* Ignore the error and move to the next bdev. */
9145 1 : rc = 0;
9146 1 : bdev = spdk_bdev_next_leaf(bdev);
9147 1 : continue;
9148 : }
9149 0 : break;
9150 : }
9151 4 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
9152 :
9153 4 : rc = fn(ctx, bdev);
9154 :
9155 4 : spdk_spin_lock(&g_bdev_mgr.spinlock);
9156 4 : tmp = spdk_bdev_next_leaf(bdev);
9157 4 : bdev_close(bdev, desc);
9158 4 : if (rc != 0) {
9159 0 : break;
9160 : }
9161 4 : bdev = tmp;
9162 : }
9163 1 : spdk_spin_unlock(&g_bdev_mgr.spinlock);
9164 :
9165 1 : return rc;
9166 : }
9167 :
9168 : void
9169 0 : spdk_bdev_io_get_iovec(struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp)
9170 : {
9171 : struct iovec *iovs;
9172 : int iovcnt;
9173 :
9174 0 : if (bdev_io == NULL) {
9175 0 : return;
9176 : }
9177 :
9178 0 : switch (bdev_io->type) {
9179 0 : case SPDK_BDEV_IO_TYPE_READ:
9180 : case SPDK_BDEV_IO_TYPE_WRITE:
9181 : case SPDK_BDEV_IO_TYPE_ZCOPY:
9182 0 : iovs = bdev_io->u.bdev.iovs;
9183 0 : iovcnt = bdev_io->u.bdev.iovcnt;
9184 0 : break;
9185 0 : default:
9186 0 : iovs = NULL;
9187 0 : iovcnt = 0;
9188 0 : break;
9189 : }
9190 :
9191 0 : if (iovp) {
9192 0 : *iovp = iovs;
9193 : }
9194 0 : if (iovcntp) {
9195 0 : *iovcntp = iovcnt;
9196 : }
9197 : }
9198 :
9199 : void *
9200 0 : spdk_bdev_io_get_md_buf(struct spdk_bdev_io *bdev_io)
9201 : {
9202 0 : if (bdev_io == NULL) {
9203 0 : return NULL;
9204 : }
9205 :
9206 0 : if (!spdk_bdev_is_md_separate(bdev_io->bdev)) {
9207 0 : return NULL;
9208 : }
9209 :
9210 0 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ ||
9211 0 : bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
9212 0 : return bdev_io->u.bdev.md_buf;
9213 : }
9214 :
9215 0 : return NULL;
9216 : }
9217 :
9218 : void *
9219 0 : spdk_bdev_io_get_cb_arg(struct spdk_bdev_io *bdev_io)
9220 : {
9221 0 : if (bdev_io == NULL) {
9222 0 : assert(false);
9223 : return NULL;
9224 : }
9225 :
9226 0 : return bdev_io->internal.caller_ctx;
9227 : }
9228 :
9229 : void
9230 7 : spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
9231 : {
9232 :
9233 7 : if (spdk_bdev_module_list_find(bdev_module->name)) {
9234 0 : SPDK_ERRLOG("ERROR: module '%s' already registered.\n", bdev_module->name);
9235 0 : assert(false);
9236 : }
9237 :
9238 7 : spdk_spin_init(&bdev_module->internal.spinlock);
9239 7 : TAILQ_INIT(&bdev_module->internal.quiesced_ranges);
9240 :
9241 : /*
9242 : * Modules with examine callbacks must be initialized first, so they are
9243 : * ready to handle examine callbacks from later modules that will
9244 : * register physical bdevs.
9245 : */
9246 7 : if (bdev_module->examine_config != NULL || bdev_module->examine_disk != NULL) {
9247 4 : TAILQ_INSERT_HEAD(&g_bdev_mgr.bdev_modules, bdev_module, internal.tailq);
9248 : } else {
9249 3 : TAILQ_INSERT_TAIL(&g_bdev_mgr.bdev_modules, bdev_module, internal.tailq);
9250 : }
9251 7 : }
9252 :
9253 : struct spdk_bdev_module *
9254 7 : spdk_bdev_module_list_find(const char *name)
9255 : {
9256 : struct spdk_bdev_module *bdev_module;
9257 :
9258 14 : TAILQ_FOREACH(bdev_module, &g_bdev_mgr.bdev_modules, internal.tailq) {
9259 7 : if (strcmp(name, bdev_module->name) == 0) {
9260 0 : break;
9261 : }
9262 : }
9263 :
9264 7 : return bdev_module;
9265 : }
9266 :
9267 : static int
9268 6 : bdev_write_zero_buffer(struct spdk_bdev_io *bdev_io)
9269 : {
9270 : uint64_t num_blocks;
9271 6 : void *md_buf = NULL;
9272 :
9273 6 : num_blocks = bdev_io->u.bdev.num_blocks;
9274 :
9275 6 : if (spdk_bdev_is_md_separate(bdev_io->bdev)) {
9276 4 : md_buf = (char *)g_bdev_mgr.zero_buffer +
9277 2 : spdk_bdev_get_block_size(bdev_io->bdev) * num_blocks;
9278 : }
9279 :
9280 6 : return bdev_write_blocks_with_md(bdev_io->internal.desc,
9281 6 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
9282 : g_bdev_mgr.zero_buffer, md_buf,
9283 : bdev_io->u.bdev.offset_blocks, num_blocks,
9284 : bdev_write_zero_buffer_done, bdev_io);
9285 : }
9286 :
9287 : static void
9288 6 : bdev_write_zero_buffer_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
9289 : {
9290 6 : struct spdk_bdev_io *parent_io = cb_arg;
9291 :
9292 6 : spdk_bdev_free_io(bdev_io);
9293 :
9294 6 : parent_io->internal.status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
9295 6 : parent_io->internal.cb(parent_io, success, parent_io->internal.caller_ctx);
9296 6 : }
9297 :
9298 : static void
9299 10 : bdev_set_qos_limit_done(struct set_qos_limit_ctx *ctx, int status)
9300 : {
9301 10 : spdk_spin_lock(&ctx->bdev->internal.spinlock);
9302 10 : ctx->bdev->internal.qos_mod_in_progress = false;
9303 10 : spdk_spin_unlock(&ctx->bdev->internal.spinlock);
9304 :
9305 10 : if (ctx->cb_fn) {
9306 8 : ctx->cb_fn(ctx->cb_arg, status);
9307 : }
9308 10 : free(ctx);
9309 10 : }
9310 :
9311 : static void
9312 2 : bdev_disable_qos_done(void *cb_arg)
9313 : {
9314 2 : struct set_qos_limit_ctx *ctx = cb_arg;
9315 2 : struct spdk_bdev *bdev = ctx->bdev;
9316 : struct spdk_bdev_qos *qos;
9317 :
9318 2 : spdk_spin_lock(&bdev->internal.spinlock);
9319 2 : qos = bdev->internal.qos;
9320 2 : bdev->internal.qos = NULL;
9321 2 : spdk_spin_unlock(&bdev->internal.spinlock);
9322 :
9323 2 : if (qos->thread != NULL) {
9324 2 : spdk_put_io_channel(spdk_io_channel_from_ctx(qos->ch));
9325 2 : spdk_poller_unregister(&qos->poller);
9326 : }
9327 :
9328 2 : free(qos);
9329 :
9330 2 : bdev_set_qos_limit_done(ctx, 0);
9331 2 : }
9332 :
9333 : static void
9334 2 : bdev_disable_qos_msg_done(struct spdk_bdev *bdev, void *_ctx, int status)
9335 : {
9336 2 : struct set_qos_limit_ctx *ctx = _ctx;
9337 : struct spdk_thread *thread;
9338 :
9339 2 : spdk_spin_lock(&bdev->internal.spinlock);
9340 2 : thread = bdev->internal.qos->thread;
9341 2 : spdk_spin_unlock(&bdev->internal.spinlock);
9342 :
9343 2 : if (thread != NULL) {
9344 2 : spdk_thread_send_msg(thread, bdev_disable_qos_done, ctx);
9345 : } else {
9346 0 : bdev_disable_qos_done(ctx);
9347 : }
9348 2 : }
9349 :
9350 : static void
9351 4 : bdev_disable_qos_msg(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9352 : struct spdk_io_channel *ch, void *_ctx)
9353 : {
9354 4 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch);
9355 : struct spdk_bdev_io *bdev_io;
9356 :
9357 4 : bdev_ch->flags &= ~BDEV_CH_QOS_ENABLED;
9358 :
9359 6 : while (!TAILQ_EMPTY(&bdev_ch->qos_queued_io)) {
9360 : /* Re-submit the queued I/O. */
9361 2 : bdev_io = TAILQ_FIRST(&bdev_ch->qos_queued_io);
9362 2 : TAILQ_REMOVE(&bdev_ch->qos_queued_io, bdev_io, internal.link);
9363 2 : _bdev_io_submit(bdev_io);
9364 : }
9365 :
9366 4 : spdk_bdev_for_each_channel_continue(i, 0);
9367 4 : }
9368 :
9369 : static void
9370 1 : bdev_update_qos_rate_limit_msg(void *cb_arg)
9371 : {
9372 1 : struct set_qos_limit_ctx *ctx = cb_arg;
9373 1 : struct spdk_bdev *bdev = ctx->bdev;
9374 :
9375 1 : spdk_spin_lock(&bdev->internal.spinlock);
9376 1 : bdev_qos_update_max_quota_per_timeslice(bdev->internal.qos);
9377 1 : spdk_spin_unlock(&bdev->internal.spinlock);
9378 :
9379 1 : bdev_set_qos_limit_done(ctx, 0);
9380 1 : }
9381 :
9382 : static void
9383 9 : bdev_enable_qos_msg(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9384 : struct spdk_io_channel *ch, void *_ctx)
9385 : {
9386 9 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch);
9387 :
9388 9 : spdk_spin_lock(&bdev->internal.spinlock);
9389 9 : bdev_enable_qos(bdev, bdev_ch);
9390 9 : spdk_spin_unlock(&bdev->internal.spinlock);
9391 9 : spdk_bdev_for_each_channel_continue(i, 0);
9392 9 : }
9393 :
9394 : static void
9395 6 : bdev_enable_qos_done(struct spdk_bdev *bdev, void *_ctx, int status)
9396 : {
9397 6 : struct set_qos_limit_ctx *ctx = _ctx;
9398 :
9399 6 : bdev_set_qos_limit_done(ctx, status);
9400 6 : }
9401 :
9402 : static void
9403 7 : bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits)
9404 : {
9405 : int i;
9406 :
9407 7 : assert(bdev->internal.qos != NULL);
9408 :
9409 35 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
9410 28 : if (limits[i] != SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
9411 28 : bdev->internal.qos->rate_limits[i].limit = limits[i];
9412 :
9413 28 : if (limits[i] == 0) {
9414 19 : bdev->internal.qos->rate_limits[i].limit =
9415 : SPDK_BDEV_QOS_LIMIT_NOT_DEFINED;
9416 : }
9417 : }
9418 : }
9419 7 : }
9420 :
9421 : void
9422 9 : spdk_bdev_set_qos_rate_limits(struct spdk_bdev *bdev, uint64_t *limits,
9423 : void (*cb_fn)(void *cb_arg, int status), void *cb_arg)
9424 : {
9425 : struct set_qos_limit_ctx *ctx;
9426 : uint32_t limit_set_complement;
9427 : uint64_t min_limit_per_sec;
9428 : int i;
9429 9 : bool disable_rate_limit = true;
9430 :
9431 45 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
9432 36 : if (limits[i] == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED) {
9433 0 : continue;
9434 : }
9435 :
9436 36 : if (limits[i] > 0) {
9437 10 : disable_rate_limit = false;
9438 : }
9439 :
9440 36 : if (bdev_qos_is_iops_rate_limit(i) == true) {
9441 9 : min_limit_per_sec = SPDK_BDEV_QOS_MIN_IOS_PER_SEC;
9442 : } else {
9443 27 : if (limits[i] > SPDK_BDEV_QOS_MAX_MBYTES_PER_SEC) {
9444 0 : SPDK_WARNLOG("Requested rate limit %" PRIu64 " will result in uint64_t overflow, "
9445 : "reset to %" PRIu64 "\n", limits[i], SPDK_BDEV_QOS_MAX_MBYTES_PER_SEC);
9446 0 : limits[i] = SPDK_BDEV_QOS_MAX_MBYTES_PER_SEC;
9447 : }
9448 : /* Change from megabyte to byte rate limit */
9449 27 : limits[i] = limits[i] * 1024 * 1024;
9450 27 : min_limit_per_sec = SPDK_BDEV_QOS_MIN_BYTES_PER_SEC;
9451 : }
9452 :
9453 36 : limit_set_complement = limits[i] % min_limit_per_sec;
9454 36 : if (limit_set_complement) {
9455 0 : SPDK_ERRLOG("Requested rate limit %" PRIu64 " is not a multiple of %" PRIu64 "\n",
9456 : limits[i], min_limit_per_sec);
9457 0 : limits[i] += min_limit_per_sec - limit_set_complement;
9458 0 : SPDK_ERRLOG("Round up the rate limit to %" PRIu64 "\n", limits[i]);
9459 : }
9460 : }
9461 :
9462 9 : ctx = calloc(1, sizeof(*ctx));
9463 9 : if (ctx == NULL) {
9464 0 : cb_fn(cb_arg, -ENOMEM);
9465 0 : return;
9466 : }
9467 :
9468 9 : ctx->cb_fn = cb_fn;
9469 9 : ctx->cb_arg = cb_arg;
9470 9 : ctx->bdev = bdev;
9471 :
9472 9 : spdk_spin_lock(&bdev->internal.spinlock);
9473 9 : if (bdev->internal.qos_mod_in_progress) {
9474 1 : spdk_spin_unlock(&bdev->internal.spinlock);
9475 1 : free(ctx);
9476 1 : cb_fn(cb_arg, -EAGAIN);
9477 1 : return;
9478 : }
9479 8 : bdev->internal.qos_mod_in_progress = true;
9480 :
9481 8 : if (disable_rate_limit == true && bdev->internal.qos) {
9482 10 : for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
9483 8 : if (limits[i] == SPDK_BDEV_QOS_LIMIT_NOT_DEFINED &&
9484 0 : (bdev->internal.qos->rate_limits[i].limit > 0 &&
9485 0 : bdev->internal.qos->rate_limits[i].limit !=
9486 : SPDK_BDEV_QOS_LIMIT_NOT_DEFINED)) {
9487 0 : disable_rate_limit = false;
9488 0 : break;
9489 : }
9490 : }
9491 : }
9492 :
9493 8 : if (disable_rate_limit == false) {
9494 5 : if (bdev->internal.qos == NULL) {
9495 4 : bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
9496 4 : if (!bdev->internal.qos) {
9497 0 : spdk_spin_unlock(&bdev->internal.spinlock);
9498 0 : SPDK_ERRLOG("Unable to allocate memory for QoS tracking\n");
9499 0 : bdev_set_qos_limit_done(ctx, -ENOMEM);
9500 0 : return;
9501 : }
9502 : }
9503 :
9504 5 : if (bdev->internal.qos->thread == NULL) {
9505 : /* Enabling */
9506 4 : bdev_set_qos_rate_limits(bdev, limits);
9507 :
9508 4 : spdk_bdev_for_each_channel(bdev, bdev_enable_qos_msg, ctx,
9509 : bdev_enable_qos_done);
9510 : } else {
9511 : /* Updating */
9512 1 : bdev_set_qos_rate_limits(bdev, limits);
9513 :
9514 1 : spdk_thread_send_msg(bdev->internal.qos->thread,
9515 : bdev_update_qos_rate_limit_msg, ctx);
9516 : }
9517 : } else {
9518 3 : if (bdev->internal.qos != NULL) {
9519 2 : bdev_set_qos_rate_limits(bdev, limits);
9520 :
9521 : /* Disabling */
9522 2 : spdk_bdev_for_each_channel(bdev, bdev_disable_qos_msg, ctx,
9523 : bdev_disable_qos_msg_done);
9524 : } else {
9525 1 : spdk_spin_unlock(&bdev->internal.spinlock);
9526 1 : bdev_set_qos_limit_done(ctx, 0);
9527 1 : return;
9528 : }
9529 : }
9530 :
9531 7 : spdk_spin_unlock(&bdev->internal.spinlock);
9532 : }
9533 :
9534 : struct spdk_bdev_histogram_ctx {
9535 : spdk_bdev_histogram_status_cb cb_fn;
9536 : void *cb_arg;
9537 : struct spdk_bdev *bdev;
9538 : int status;
9539 : };
9540 :
9541 : static void
9542 2 : bdev_histogram_disable_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9543 : {
9544 2 : struct spdk_bdev_histogram_ctx *ctx = _ctx;
9545 :
9546 2 : spdk_spin_lock(&ctx->bdev->internal.spinlock);
9547 2 : ctx->bdev->internal.histogram_in_progress = false;
9548 2 : spdk_spin_unlock(&ctx->bdev->internal.spinlock);
9549 2 : ctx->cb_fn(ctx->cb_arg, ctx->status);
9550 2 : free(ctx);
9551 2 : }
9552 :
9553 : static void
9554 3 : bdev_histogram_disable_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9555 : struct spdk_io_channel *_ch, void *_ctx)
9556 : {
9557 3 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9558 :
9559 3 : if (ch->histogram != NULL) {
9560 3 : spdk_histogram_data_free(ch->histogram);
9561 3 : ch->histogram = NULL;
9562 : }
9563 3 : spdk_bdev_for_each_channel_continue(i, 0);
9564 3 : }
9565 :
9566 : static void
9567 2 : bdev_histogram_enable_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9568 : {
9569 2 : struct spdk_bdev_histogram_ctx *ctx = _ctx;
9570 :
9571 2 : if (status != 0) {
9572 0 : ctx->status = status;
9573 0 : ctx->bdev->internal.histogram_enabled = false;
9574 0 : spdk_bdev_for_each_channel(ctx->bdev, bdev_histogram_disable_channel, ctx,
9575 : bdev_histogram_disable_channel_cb);
9576 : } else {
9577 2 : spdk_spin_lock(&ctx->bdev->internal.spinlock);
9578 2 : ctx->bdev->internal.histogram_in_progress = false;
9579 2 : spdk_spin_unlock(&ctx->bdev->internal.spinlock);
9580 2 : ctx->cb_fn(ctx->cb_arg, ctx->status);
9581 2 : free(ctx);
9582 : }
9583 2 : }
9584 :
9585 : static void
9586 3 : bdev_histogram_enable_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9587 : struct spdk_io_channel *_ch, void *_ctx)
9588 : {
9589 3 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9590 3 : int status = 0;
9591 :
9592 3 : if (ch->histogram == NULL) {
9593 3 : ch->histogram = spdk_histogram_data_alloc();
9594 3 : if (ch->histogram == NULL) {
9595 0 : status = -ENOMEM;
9596 : }
9597 : }
9598 :
9599 3 : spdk_bdev_for_each_channel_continue(i, status);
9600 3 : }
9601 :
9602 : void
9603 4 : spdk_bdev_histogram_enable_ext(struct spdk_bdev *bdev, spdk_bdev_histogram_status_cb cb_fn,
9604 : void *cb_arg, bool enable, struct spdk_bdev_enable_histogram_opts *opts)
9605 : {
9606 : struct spdk_bdev_histogram_ctx *ctx;
9607 :
9608 4 : ctx = calloc(1, sizeof(struct spdk_bdev_histogram_ctx));
9609 4 : if (ctx == NULL) {
9610 0 : cb_fn(cb_arg, -ENOMEM);
9611 0 : return;
9612 : }
9613 :
9614 4 : ctx->bdev = bdev;
9615 4 : ctx->status = 0;
9616 4 : ctx->cb_fn = cb_fn;
9617 4 : ctx->cb_arg = cb_arg;
9618 :
9619 4 : spdk_spin_lock(&bdev->internal.spinlock);
9620 4 : if (bdev->internal.histogram_in_progress) {
9621 0 : spdk_spin_unlock(&bdev->internal.spinlock);
9622 0 : free(ctx);
9623 0 : cb_fn(cb_arg, -EAGAIN);
9624 0 : return;
9625 : }
9626 :
9627 4 : bdev->internal.histogram_in_progress = true;
9628 4 : spdk_spin_unlock(&bdev->internal.spinlock);
9629 :
9630 4 : bdev->internal.histogram_enabled = enable;
9631 4 : bdev->internal.histogram_io_type = opts->io_type;
9632 :
9633 4 : if (enable) {
9634 : /* Allocate histogram for each channel */
9635 2 : spdk_bdev_for_each_channel(bdev, bdev_histogram_enable_channel, ctx,
9636 : bdev_histogram_enable_channel_cb);
9637 : } else {
9638 2 : spdk_bdev_for_each_channel(bdev, bdev_histogram_disable_channel, ctx,
9639 : bdev_histogram_disable_channel_cb);
9640 : }
9641 : }
9642 :
9643 : void
9644 4 : spdk_bdev_enable_histogram_opts_init(struct spdk_bdev_enable_histogram_opts *opts, size_t size)
9645 : {
9646 4 : if (opts == NULL) {
9647 0 : SPDK_ERRLOG("opts should not be NULL\n");
9648 0 : assert(opts != NULL);
9649 0 : return;
9650 : }
9651 4 : if (size == 0) {
9652 0 : SPDK_ERRLOG("size should not be zero\n");
9653 0 : assert(size != 0);
9654 0 : return;
9655 : }
9656 :
9657 4 : memset(opts, 0, size);
9658 4 : opts->size = size;
9659 :
9660 : #define FIELD_OK(field) \
9661 : offsetof(struct spdk_bdev_enable_histogram_opts, field) + sizeof(opts->field) <= size
9662 :
9663 : #define SET_FIELD(field, value) \
9664 : if (FIELD_OK(field)) { \
9665 : opts->field = value; \
9666 : } \
9667 :
9668 4 : SET_FIELD(io_type, 0);
9669 :
9670 : /* You should not remove this statement, but need to update the assert statement
9671 : * if you add a new field, and also add a corresponding SET_FIELD statement */
9672 : SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_enable_histogram_opts) == 9, "Incorrect size");
9673 :
9674 : #undef FIELD_OK
9675 : #undef SET_FIELD
9676 : }
9677 :
9678 : void
9679 4 : spdk_bdev_histogram_enable(struct spdk_bdev *bdev, spdk_bdev_histogram_status_cb cb_fn,
9680 : void *cb_arg, bool enable)
9681 : {
9682 4 : struct spdk_bdev_enable_histogram_opts opts;
9683 :
9684 4 : spdk_bdev_enable_histogram_opts_init(&opts, sizeof(opts));
9685 4 : spdk_bdev_histogram_enable_ext(bdev, cb_fn, cb_arg, enable, &opts);
9686 4 : }
9687 :
9688 : struct spdk_bdev_histogram_data_ctx {
9689 : spdk_bdev_histogram_data_cb cb_fn;
9690 : void *cb_arg;
9691 : struct spdk_bdev *bdev;
9692 : /** merged histogram data from all channels */
9693 : struct spdk_histogram_data *histogram;
9694 : };
9695 :
9696 : static void
9697 5 : bdev_histogram_get_channel_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9698 : {
9699 5 : struct spdk_bdev_histogram_data_ctx *ctx = _ctx;
9700 :
9701 5 : ctx->cb_fn(ctx->cb_arg, status, ctx->histogram);
9702 5 : free(ctx);
9703 5 : }
9704 :
9705 : static void
9706 7 : bdev_histogram_get_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9707 : struct spdk_io_channel *_ch, void *_ctx)
9708 : {
9709 7 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9710 7 : struct spdk_bdev_histogram_data_ctx *ctx = _ctx;
9711 7 : int status = 0;
9712 :
9713 7 : if (ch->histogram == NULL) {
9714 1 : status = -EFAULT;
9715 : } else {
9716 6 : spdk_histogram_data_merge(ctx->histogram, ch->histogram);
9717 : }
9718 :
9719 7 : spdk_bdev_for_each_channel_continue(i, status);
9720 7 : }
9721 :
9722 : void
9723 5 : spdk_bdev_histogram_get(struct spdk_bdev *bdev, struct spdk_histogram_data *histogram,
9724 : spdk_bdev_histogram_data_cb cb_fn,
9725 : void *cb_arg)
9726 : {
9727 : struct spdk_bdev_histogram_data_ctx *ctx;
9728 :
9729 5 : ctx = calloc(1, sizeof(struct spdk_bdev_histogram_data_ctx));
9730 5 : if (ctx == NULL) {
9731 0 : cb_fn(cb_arg, -ENOMEM, NULL);
9732 0 : return;
9733 : }
9734 :
9735 5 : ctx->bdev = bdev;
9736 5 : ctx->cb_fn = cb_fn;
9737 5 : ctx->cb_arg = cb_arg;
9738 :
9739 5 : ctx->histogram = histogram;
9740 :
9741 5 : spdk_bdev_for_each_channel(bdev, bdev_histogram_get_channel, ctx,
9742 : bdev_histogram_get_channel_cb);
9743 : }
9744 :
9745 : void
9746 2 : spdk_bdev_channel_get_histogram(struct spdk_io_channel *ch, spdk_bdev_histogram_data_cb cb_fn,
9747 : void *cb_arg)
9748 : {
9749 2 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(ch);
9750 2 : int status = 0;
9751 :
9752 2 : assert(cb_fn != NULL);
9753 :
9754 2 : if (bdev_ch->histogram == NULL) {
9755 1 : status = -EFAULT;
9756 : }
9757 2 : cb_fn(cb_arg, status, bdev_ch->histogram);
9758 2 : }
9759 :
9760 : size_t
9761 0 : spdk_bdev_get_media_events(struct spdk_bdev_desc *desc, struct spdk_bdev_media_event *events,
9762 : size_t max_events)
9763 : {
9764 : struct media_event_entry *entry;
9765 0 : size_t num_events = 0;
9766 :
9767 0 : for (; num_events < max_events; ++num_events) {
9768 0 : entry = TAILQ_FIRST(&desc->pending_media_events);
9769 0 : if (entry == NULL) {
9770 0 : break;
9771 : }
9772 :
9773 0 : events[num_events] = entry->event;
9774 0 : TAILQ_REMOVE(&desc->pending_media_events, entry, tailq);
9775 0 : TAILQ_INSERT_TAIL(&desc->free_media_events, entry, tailq);
9776 : }
9777 :
9778 0 : return num_events;
9779 : }
9780 :
9781 : int
9782 0 : spdk_bdev_push_media_events(struct spdk_bdev *bdev, const struct spdk_bdev_media_event *events,
9783 : size_t num_events)
9784 : {
9785 : struct spdk_bdev_desc *desc;
9786 : struct media_event_entry *entry;
9787 : size_t event_id;
9788 0 : int rc = 0;
9789 :
9790 0 : assert(bdev->media_events);
9791 :
9792 0 : spdk_spin_lock(&bdev->internal.spinlock);
9793 0 : TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
9794 0 : if (desc->write) {
9795 0 : break;
9796 : }
9797 : }
9798 :
9799 0 : if (desc == NULL || desc->media_events_buffer == NULL) {
9800 0 : rc = -ENODEV;
9801 0 : goto out;
9802 : }
9803 :
9804 0 : for (event_id = 0; event_id < num_events; ++event_id) {
9805 0 : entry = TAILQ_FIRST(&desc->free_media_events);
9806 0 : if (entry == NULL) {
9807 0 : break;
9808 : }
9809 :
9810 0 : TAILQ_REMOVE(&desc->free_media_events, entry, tailq);
9811 0 : TAILQ_INSERT_TAIL(&desc->pending_media_events, entry, tailq);
9812 0 : entry->event = events[event_id];
9813 : }
9814 :
9815 0 : rc = event_id;
9816 0 : out:
9817 0 : spdk_spin_unlock(&bdev->internal.spinlock);
9818 0 : return rc;
9819 : }
9820 :
9821 : static void
9822 0 : _media_management_notify(void *arg)
9823 : {
9824 0 : struct spdk_bdev_desc *desc = arg;
9825 :
9826 0 : _event_notify(desc, SPDK_BDEV_EVENT_MEDIA_MANAGEMENT);
9827 0 : }
9828 :
9829 : void
9830 0 : spdk_bdev_notify_media_management(struct spdk_bdev *bdev)
9831 : {
9832 : struct spdk_bdev_desc *desc;
9833 :
9834 0 : spdk_spin_lock(&bdev->internal.spinlock);
9835 0 : TAILQ_FOREACH(desc, &bdev->internal.open_descs, link) {
9836 0 : if (!TAILQ_EMPTY(&desc->pending_media_events)) {
9837 0 : event_notify(desc, _media_management_notify);
9838 : }
9839 : }
9840 0 : spdk_spin_unlock(&bdev->internal.spinlock);
9841 0 : }
9842 :
9843 : struct locked_lba_range_ctx {
9844 : struct lba_range range;
9845 : struct lba_range *current_range;
9846 : struct lba_range *owner_range;
9847 : struct spdk_poller *poller;
9848 : lock_range_cb cb_fn;
9849 : void *cb_arg;
9850 : };
9851 :
9852 : static void
9853 0 : bdev_lock_error_cleanup_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9854 : {
9855 0 : struct locked_lba_range_ctx *ctx = _ctx;
9856 :
9857 0 : ctx->cb_fn(&ctx->range, ctx->cb_arg, -ENOMEM);
9858 0 : free(ctx);
9859 0 : }
9860 :
9861 : static void bdev_unlock_lba_range_get_channel(struct spdk_bdev_channel_iter *i,
9862 : struct spdk_bdev *bdev, struct spdk_io_channel *ch, void *_ctx);
9863 :
9864 : static void
9865 14 : bdev_lock_lba_range_cb(struct spdk_bdev *bdev, void *_ctx, int status)
9866 : {
9867 14 : struct locked_lba_range_ctx *ctx = _ctx;
9868 :
9869 14 : if (status == -ENOMEM) {
9870 : /* One of the channels could not allocate a range object.
9871 : * So we have to go back and clean up any ranges that were
9872 : * allocated successfully before we return error status to
9873 : * the caller. We can reuse the unlock function to do that
9874 : * clean up.
9875 : */
9876 0 : spdk_bdev_for_each_channel(bdev, bdev_unlock_lba_range_get_channel, ctx,
9877 : bdev_lock_error_cleanup_cb);
9878 0 : return;
9879 : }
9880 :
9881 : /* All channels have locked this range and no I/O overlapping the range
9882 : * are outstanding! Set the owner_ch for the range object for the
9883 : * locking channel, so that this channel will know that it is allowed
9884 : * to write to this range.
9885 : */
9886 14 : if (ctx->owner_range != NULL) {
9887 10 : ctx->owner_range->owner_ch = ctx->range.owner_ch;
9888 : }
9889 :
9890 14 : ctx->cb_fn(&ctx->range, ctx->cb_arg, status);
9891 :
9892 : /* Don't free the ctx here. Its range is in the bdev's global list of
9893 : * locked ranges still, and will be removed and freed when this range
9894 : * is later unlocked.
9895 : */
9896 : }
9897 :
9898 : static int
9899 17 : bdev_lock_lba_range_check_io(void *_i)
9900 : {
9901 17 : struct spdk_bdev_channel_iter *i = _i;
9902 17 : struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i->i);
9903 17 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9904 17 : struct locked_lba_range_ctx *ctx = i->ctx;
9905 17 : struct lba_range *range = ctx->current_range;
9906 : struct spdk_bdev_io *bdev_io;
9907 :
9908 17 : spdk_poller_unregister(&ctx->poller);
9909 :
9910 : /* The range is now in the locked_ranges, so no new IO can be submitted to this
9911 : * range. But we need to wait until any outstanding IO overlapping with this range
9912 : * are completed.
9913 : */
9914 18 : TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
9915 3 : if (bdev_io_range_is_locked(bdev_io, range)) {
9916 2 : ctx->poller = SPDK_POLLER_REGISTER(bdev_lock_lba_range_check_io, i, 100);
9917 2 : return SPDK_POLLER_BUSY;
9918 : }
9919 : }
9920 :
9921 15 : spdk_bdev_for_each_channel_continue(i, 0);
9922 15 : return SPDK_POLLER_BUSY;
9923 : }
9924 :
9925 : static void
9926 15 : bdev_lock_lba_range_get_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
9927 : struct spdk_io_channel *_ch, void *_ctx)
9928 : {
9929 15 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
9930 15 : struct locked_lba_range_ctx *ctx = _ctx;
9931 : struct lba_range *range;
9932 :
9933 16 : TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
9934 1 : if (range->length == ctx->range.length &&
9935 0 : range->offset == ctx->range.offset &&
9936 0 : range->locked_ctx == ctx->range.locked_ctx) {
9937 : /* This range already exists on this channel, so don't add
9938 : * it again. This can happen when a new channel is created
9939 : * while the for_each_channel operation is in progress.
9940 : * Do not check for outstanding I/O in that case, since the
9941 : * range was locked before any I/O could be submitted to the
9942 : * new channel.
9943 : */
9944 0 : spdk_bdev_for_each_channel_continue(i, 0);
9945 0 : return;
9946 : }
9947 : }
9948 :
9949 15 : range = calloc(1, sizeof(*range));
9950 15 : if (range == NULL) {
9951 0 : spdk_bdev_for_each_channel_continue(i, -ENOMEM);
9952 0 : return;
9953 : }
9954 :
9955 15 : range->length = ctx->range.length;
9956 15 : range->offset = ctx->range.offset;
9957 15 : range->locked_ctx = ctx->range.locked_ctx;
9958 15 : range->quiesce = ctx->range.quiesce;
9959 15 : ctx->current_range = range;
9960 15 : if (ctx->range.owner_ch == ch) {
9961 : /* This is the range object for the channel that will hold
9962 : * the lock. Store it in the ctx object so that we can easily
9963 : * set its owner_ch after the lock is finally acquired.
9964 : */
9965 10 : ctx->owner_range = range;
9966 : }
9967 15 : TAILQ_INSERT_TAIL(&ch->locked_ranges, range, tailq);
9968 15 : bdev_lock_lba_range_check_io(i);
9969 : }
9970 :
9971 : static void
9972 14 : bdev_lock_lba_range_ctx(struct spdk_bdev *bdev, struct locked_lba_range_ctx *ctx)
9973 : {
9974 14 : assert(spdk_get_thread() == ctx->range.owner_thread);
9975 14 : assert(ctx->range.owner_ch == NULL ||
9976 : spdk_io_channel_get_thread(ctx->range.owner_ch->channel) == ctx->range.owner_thread);
9977 :
9978 : /* We will add a copy of this range to each channel now. */
9979 14 : spdk_bdev_for_each_channel(bdev, bdev_lock_lba_range_get_channel, ctx,
9980 : bdev_lock_lba_range_cb);
9981 14 : }
9982 :
9983 : static bool
9984 17 : bdev_lba_range_overlaps_tailq(struct lba_range *range, lba_range_tailq_t *tailq)
9985 : {
9986 : struct lba_range *r;
9987 :
9988 18 : TAILQ_FOREACH(r, tailq, tailq) {
9989 4 : if (bdev_lba_range_overlapped(range, r)) {
9990 3 : return true;
9991 : }
9992 : }
9993 14 : return false;
9994 : }
9995 :
9996 : static void bdev_quiesce_range_locked(struct lba_range *range, void *ctx, int status);
9997 :
9998 : static int
9999 14 : _bdev_lock_lba_range(struct spdk_bdev *bdev, struct spdk_bdev_channel *ch,
10000 : uint64_t offset, uint64_t length,
10001 : lock_range_cb cb_fn, void *cb_arg)
10002 : {
10003 : struct locked_lba_range_ctx *ctx;
10004 :
10005 14 : ctx = calloc(1, sizeof(*ctx));
10006 14 : if (ctx == NULL) {
10007 0 : return -ENOMEM;
10008 : }
10009 :
10010 14 : ctx->range.offset = offset;
10011 14 : ctx->range.length = length;
10012 14 : ctx->range.owner_thread = spdk_get_thread();
10013 14 : ctx->range.owner_ch = ch;
10014 14 : ctx->range.locked_ctx = cb_arg;
10015 14 : ctx->range.bdev = bdev;
10016 14 : ctx->range.quiesce = (cb_fn == bdev_quiesce_range_locked);
10017 14 : ctx->cb_fn = cb_fn;
10018 14 : ctx->cb_arg = cb_arg;
10019 :
10020 14 : spdk_spin_lock(&bdev->internal.spinlock);
10021 14 : if (bdev_lba_range_overlaps_tailq(&ctx->range, &bdev->internal.locked_ranges)) {
10022 : /* There is an active lock overlapping with this range.
10023 : * Put it on the pending list until this range no
10024 : * longer overlaps with another.
10025 : */
10026 2 : TAILQ_INSERT_TAIL(&bdev->internal.pending_locked_ranges, &ctx->range, tailq);
10027 : } else {
10028 12 : TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, &ctx->range, tailq);
10029 12 : bdev_lock_lba_range_ctx(bdev, ctx);
10030 : }
10031 14 : spdk_spin_unlock(&bdev->internal.spinlock);
10032 14 : return 0;
10033 : }
10034 :
10035 : static int
10036 10 : bdev_lock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
10037 : uint64_t offset, uint64_t length,
10038 : lock_range_cb cb_fn, void *cb_arg)
10039 : {
10040 10 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
10041 10 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
10042 :
10043 10 : if (cb_arg == NULL) {
10044 0 : SPDK_ERRLOG("cb_arg must not be NULL\n");
10045 0 : return -EINVAL;
10046 : }
10047 :
10048 10 : return _bdev_lock_lba_range(bdev, ch, offset, length, cb_fn, cb_arg);
10049 : }
10050 :
10051 : static void
10052 2 : bdev_lock_lba_range_ctx_msg(void *_ctx)
10053 : {
10054 2 : struct locked_lba_range_ctx *ctx = _ctx;
10055 :
10056 2 : bdev_lock_lba_range_ctx(ctx->range.bdev, ctx);
10057 2 : }
10058 :
10059 : static void
10060 14 : bdev_unlock_lba_range_cb(struct spdk_bdev *bdev, void *_ctx, int status)
10061 : {
10062 14 : struct locked_lba_range_ctx *ctx = _ctx;
10063 : struct locked_lba_range_ctx *pending_ctx;
10064 : struct lba_range *range, *tmp;
10065 :
10066 14 : spdk_spin_lock(&bdev->internal.spinlock);
10067 : /* Check if there are any pending locked ranges that overlap with this range
10068 : * that was just unlocked. If there are, check that it doesn't overlap with any
10069 : * other locked ranges before calling bdev_lock_lba_range_ctx which will start
10070 : * the lock process.
10071 : */
10072 17 : TAILQ_FOREACH_SAFE(range, &bdev->internal.pending_locked_ranges, tailq, tmp) {
10073 3 : if (bdev_lba_range_overlapped(range, &ctx->range) &&
10074 3 : !bdev_lba_range_overlaps_tailq(range, &bdev->internal.locked_ranges)) {
10075 2 : TAILQ_REMOVE(&bdev->internal.pending_locked_ranges, range, tailq);
10076 2 : pending_ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
10077 2 : TAILQ_INSERT_TAIL(&bdev->internal.locked_ranges, range, tailq);
10078 2 : spdk_thread_send_msg(pending_ctx->range.owner_thread,
10079 : bdev_lock_lba_range_ctx_msg, pending_ctx);
10080 : }
10081 : }
10082 14 : spdk_spin_unlock(&bdev->internal.spinlock);
10083 :
10084 14 : ctx->cb_fn(&ctx->range, ctx->cb_arg, status);
10085 14 : free(ctx);
10086 14 : }
10087 :
10088 : static void
10089 16 : bdev_unlock_lba_range_get_channel(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
10090 : struct spdk_io_channel *_ch, void *_ctx)
10091 : {
10092 16 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
10093 16 : struct locked_lba_range_ctx *ctx = _ctx;
10094 16 : TAILQ_HEAD(, spdk_bdev_io) io_locked;
10095 : struct spdk_bdev_io *bdev_io;
10096 : struct lba_range *range;
10097 :
10098 16 : TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
10099 16 : if (ctx->range.offset == range->offset &&
10100 16 : ctx->range.length == range->length &&
10101 16 : ctx->range.locked_ctx == range->locked_ctx) {
10102 16 : TAILQ_REMOVE(&ch->locked_ranges, range, tailq);
10103 16 : free(range);
10104 16 : break;
10105 : }
10106 : }
10107 :
10108 : /* Note: we should almost always be able to assert that the range specified
10109 : * was found. But there are some very rare corner cases where a new channel
10110 : * gets created simultaneously with a range unlock, where this function
10111 : * would execute on that new channel and wouldn't have the range.
10112 : * We also use this to clean up range allocations when a later allocation
10113 : * fails in the locking path.
10114 : * So we can't actually assert() here.
10115 : */
10116 :
10117 : /* Swap the locked IO into a temporary list, and then try to submit them again.
10118 : * We could hyper-optimize this to only resubmit locked I/O that overlap
10119 : * with the range that was just unlocked, but this isn't a performance path so
10120 : * we go for simplicity here.
10121 : */
10122 16 : TAILQ_INIT(&io_locked);
10123 16 : TAILQ_SWAP(&ch->io_locked, &io_locked, spdk_bdev_io, internal.ch_link);
10124 19 : while (!TAILQ_EMPTY(&io_locked)) {
10125 3 : bdev_io = TAILQ_FIRST(&io_locked);
10126 3 : TAILQ_REMOVE(&io_locked, bdev_io, internal.ch_link);
10127 3 : bdev_io_submit(bdev_io);
10128 : }
10129 :
10130 16 : spdk_bdev_for_each_channel_continue(i, 0);
10131 16 : }
10132 :
10133 : static int
10134 14 : _bdev_unlock_lba_range(struct spdk_bdev *bdev, uint64_t offset, uint64_t length,
10135 : lock_range_cb cb_fn, void *cb_arg)
10136 : {
10137 : struct locked_lba_range_ctx *ctx;
10138 : struct lba_range *range;
10139 :
10140 14 : spdk_spin_lock(&bdev->internal.spinlock);
10141 : /* To start the unlock the process, we find the range in the bdev's locked_ranges
10142 : * and remove it. This ensures new channels don't inherit the locked range.
10143 : * Then we will send a message to each channel to remove the range from its
10144 : * per-channel list.
10145 : */
10146 14 : TAILQ_FOREACH(range, &bdev->internal.locked_ranges, tailq) {
10147 14 : if (range->offset == offset && range->length == length &&
10148 14 : (range->owner_ch == NULL || range->locked_ctx == cb_arg)) {
10149 : break;
10150 : }
10151 : }
10152 14 : if (range == NULL) {
10153 0 : assert(false);
10154 : spdk_spin_unlock(&bdev->internal.spinlock);
10155 : return -EINVAL;
10156 : }
10157 14 : TAILQ_REMOVE(&bdev->internal.locked_ranges, range, tailq);
10158 14 : ctx = SPDK_CONTAINEROF(range, struct locked_lba_range_ctx, range);
10159 14 : spdk_spin_unlock(&bdev->internal.spinlock);
10160 :
10161 14 : ctx->cb_fn = cb_fn;
10162 14 : ctx->cb_arg = cb_arg;
10163 :
10164 14 : spdk_bdev_for_each_channel(bdev, bdev_unlock_lba_range_get_channel, ctx,
10165 : bdev_unlock_lba_range_cb);
10166 14 : return 0;
10167 : }
10168 :
10169 : static int
10170 12 : bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
10171 : uint64_t offset, uint64_t length,
10172 : lock_range_cb cb_fn, void *cb_arg)
10173 : {
10174 12 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
10175 12 : struct spdk_bdev_channel *ch = __io_ch_to_bdev_ch(_ch);
10176 : struct lba_range *range;
10177 12 : bool range_found = false;
10178 :
10179 : /* Let's make sure the specified channel actually has a lock on
10180 : * the specified range. Note that the range must match exactly.
10181 : */
10182 14 : TAILQ_FOREACH(range, &ch->locked_ranges, tailq) {
10183 12 : if (range->offset == offset && range->length == length &&
10184 11 : range->owner_ch == ch && range->locked_ctx == cb_arg) {
10185 10 : range_found = true;
10186 10 : break;
10187 : }
10188 : }
10189 :
10190 12 : if (!range_found) {
10191 2 : return -EINVAL;
10192 : }
10193 :
10194 10 : return _bdev_unlock_lba_range(bdev, offset, length, cb_fn, cb_arg);
10195 : }
10196 :
10197 : struct bdev_quiesce_ctx {
10198 : spdk_bdev_quiesce_cb cb_fn;
10199 : void *cb_arg;
10200 : };
10201 :
10202 : static void
10203 4 : bdev_unquiesce_range_unlocked(struct lba_range *range, void *ctx, int status)
10204 : {
10205 4 : struct bdev_quiesce_ctx *quiesce_ctx = ctx;
10206 :
10207 4 : if (quiesce_ctx->cb_fn != NULL) {
10208 4 : quiesce_ctx->cb_fn(quiesce_ctx->cb_arg, status);
10209 : }
10210 :
10211 4 : free(quiesce_ctx);
10212 4 : }
10213 :
10214 : static void
10215 4 : bdev_quiesce_range_locked(struct lba_range *range, void *ctx, int status)
10216 : {
10217 4 : struct bdev_quiesce_ctx *quiesce_ctx = ctx;
10218 4 : struct spdk_bdev_module *module = range->bdev->module;
10219 :
10220 4 : if (status != 0) {
10221 0 : if (quiesce_ctx->cb_fn != NULL) {
10222 0 : quiesce_ctx->cb_fn(quiesce_ctx->cb_arg, status);
10223 : }
10224 0 : free(quiesce_ctx);
10225 0 : return;
10226 : }
10227 :
10228 4 : spdk_spin_lock(&module->internal.spinlock);
10229 4 : TAILQ_INSERT_TAIL(&module->internal.quiesced_ranges, range, tailq_module);
10230 4 : spdk_spin_unlock(&module->internal.spinlock);
10231 :
10232 4 : if (quiesce_ctx->cb_fn != NULL) {
10233 : /* copy the context in case the range is unlocked by the callback */
10234 4 : struct bdev_quiesce_ctx tmp = *quiesce_ctx;
10235 :
10236 4 : quiesce_ctx->cb_fn = NULL;
10237 4 : quiesce_ctx->cb_arg = NULL;
10238 :
10239 4 : tmp.cb_fn(tmp.cb_arg, status);
10240 : }
10241 : /* quiesce_ctx will be freed on unquiesce */
10242 : }
10243 :
10244 : static int
10245 9 : _spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10246 : uint64_t offset, uint64_t length,
10247 : spdk_bdev_quiesce_cb cb_fn, void *cb_arg,
10248 : bool unquiesce)
10249 : {
10250 : struct bdev_quiesce_ctx *quiesce_ctx;
10251 : int rc;
10252 :
10253 9 : if (module != bdev->module) {
10254 0 : SPDK_ERRLOG("Bdev does not belong to specified module.\n");
10255 0 : return -EINVAL;
10256 : }
10257 :
10258 9 : if (!bdev_io_valid_blocks(bdev, offset, length)) {
10259 0 : return -EINVAL;
10260 : }
10261 :
10262 9 : if (unquiesce) {
10263 : struct lba_range *range;
10264 :
10265 : /* Make sure the specified range is actually quiesced in the specified module and
10266 : * then remove it from the list. Note that the range must match exactly.
10267 : */
10268 5 : spdk_spin_lock(&module->internal.spinlock);
10269 6 : TAILQ_FOREACH(range, &module->internal.quiesced_ranges, tailq_module) {
10270 5 : if (range->bdev == bdev && range->offset == offset && range->length == length) {
10271 4 : TAILQ_REMOVE(&module->internal.quiesced_ranges, range, tailq_module);
10272 4 : break;
10273 : }
10274 : }
10275 5 : spdk_spin_unlock(&module->internal.spinlock);
10276 :
10277 5 : if (range == NULL) {
10278 1 : SPDK_ERRLOG("The range to unquiesce was not found.\n");
10279 1 : return -EINVAL;
10280 : }
10281 :
10282 4 : quiesce_ctx = range->locked_ctx;
10283 4 : quiesce_ctx->cb_fn = cb_fn;
10284 4 : quiesce_ctx->cb_arg = cb_arg;
10285 :
10286 4 : rc = _bdev_unlock_lba_range(bdev, offset, length, bdev_unquiesce_range_unlocked, quiesce_ctx);
10287 : } else {
10288 4 : quiesce_ctx = malloc(sizeof(*quiesce_ctx));
10289 4 : if (quiesce_ctx == NULL) {
10290 0 : return -ENOMEM;
10291 : }
10292 :
10293 4 : quiesce_ctx->cb_fn = cb_fn;
10294 4 : quiesce_ctx->cb_arg = cb_arg;
10295 :
10296 4 : rc = _bdev_lock_lba_range(bdev, NULL, offset, length, bdev_quiesce_range_locked, quiesce_ctx);
10297 4 : if (rc != 0) {
10298 0 : free(quiesce_ctx);
10299 : }
10300 : }
10301 :
10302 8 : return rc;
10303 : }
10304 :
10305 : int
10306 3 : spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10307 : spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
10308 : {
10309 3 : return _spdk_bdev_quiesce(bdev, module, 0, bdev->blockcnt, cb_fn, cb_arg, false);
10310 : }
10311 :
10312 : int
10313 3 : spdk_bdev_unquiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10314 : spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
10315 : {
10316 3 : return _spdk_bdev_quiesce(bdev, module, 0, bdev->blockcnt, cb_fn, cb_arg, true);
10317 : }
10318 :
10319 : int
10320 1 : spdk_bdev_quiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10321 : uint64_t offset, uint64_t length,
10322 : spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
10323 : {
10324 1 : return _spdk_bdev_quiesce(bdev, module, offset, length, cb_fn, cb_arg, false);
10325 : }
10326 :
10327 : int
10328 2 : spdk_bdev_unquiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
10329 : uint64_t offset, uint64_t length,
10330 : spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
10331 : {
10332 2 : return _spdk_bdev_quiesce(bdev, module, offset, length, cb_fn, cb_arg, true);
10333 : }
10334 :
10335 : int
10336 279 : spdk_bdev_get_memory_domains(struct spdk_bdev *bdev, struct spdk_memory_domain **domains,
10337 : int array_size)
10338 : {
10339 279 : if (!bdev) {
10340 1 : return -EINVAL;
10341 : }
10342 :
10343 278 : if (bdev->fn_table->get_memory_domains) {
10344 3 : return bdev->fn_table->get_memory_domains(bdev->ctxt, domains, array_size);
10345 : }
10346 :
10347 275 : return 0;
10348 : }
10349 :
10350 : struct spdk_bdev_for_each_io_ctx {
10351 : void *ctx;
10352 : spdk_bdev_io_fn fn;
10353 : spdk_bdev_for_each_io_cb cb;
10354 : };
10355 :
10356 : static void
10357 0 : bdev_channel_for_each_io(struct spdk_bdev_channel_iter *i, struct spdk_bdev *bdev,
10358 : struct spdk_io_channel *io_ch, void *_ctx)
10359 : {
10360 0 : struct spdk_bdev_for_each_io_ctx *ctx = _ctx;
10361 0 : struct spdk_bdev_channel *bdev_ch = __io_ch_to_bdev_ch(io_ch);
10362 : struct spdk_bdev_io *bdev_io;
10363 0 : int rc = 0;
10364 :
10365 0 : TAILQ_FOREACH(bdev_io, &bdev_ch->io_submitted, internal.ch_link) {
10366 0 : rc = ctx->fn(ctx->ctx, bdev_io);
10367 0 : if (rc != 0) {
10368 0 : break;
10369 : }
10370 : }
10371 :
10372 0 : spdk_bdev_for_each_channel_continue(i, rc);
10373 0 : }
10374 :
10375 : static void
10376 0 : bdev_for_each_io_done(struct spdk_bdev *bdev, void *_ctx, int status)
10377 : {
10378 0 : struct spdk_bdev_for_each_io_ctx *ctx = _ctx;
10379 :
10380 0 : ctx->cb(ctx->ctx, status);
10381 :
10382 0 : free(ctx);
10383 0 : }
10384 :
10385 : void
10386 0 : spdk_bdev_for_each_bdev_io(struct spdk_bdev *bdev, void *_ctx, spdk_bdev_io_fn fn,
10387 : spdk_bdev_for_each_io_cb cb)
10388 : {
10389 : struct spdk_bdev_for_each_io_ctx *ctx;
10390 :
10391 0 : assert(fn != NULL && cb != NULL);
10392 :
10393 0 : ctx = calloc(1, sizeof(*ctx));
10394 0 : if (ctx == NULL) {
10395 0 : SPDK_ERRLOG("Failed to allocate context.\n");
10396 0 : cb(_ctx, -ENOMEM);
10397 0 : return;
10398 : }
10399 :
10400 0 : ctx->ctx = _ctx;
10401 0 : ctx->fn = fn;
10402 0 : ctx->cb = cb;
10403 :
10404 0 : spdk_bdev_for_each_channel(bdev, bdev_channel_for_each_io, ctx,
10405 : bdev_for_each_io_done);
10406 : }
10407 :
10408 : void
10409 135 : spdk_bdev_for_each_channel_continue(struct spdk_bdev_channel_iter *iter, int status)
10410 : {
10411 135 : spdk_for_each_channel_continue(iter->i, status);
10412 135 : }
10413 :
10414 : static struct spdk_bdev *
10415 369 : io_channel_iter_get_bdev(struct spdk_io_channel_iter *i)
10416 : {
10417 369 : void *io_device = spdk_io_channel_iter_get_io_device(i);
10418 :
10419 369 : return __bdev_from_io_dev(io_device);
10420 : }
10421 :
10422 : static void
10423 135 : bdev_each_channel_msg(struct spdk_io_channel_iter *i)
10424 : {
10425 135 : struct spdk_bdev_channel_iter *iter = spdk_io_channel_iter_get_ctx(i);
10426 135 : struct spdk_bdev *bdev = io_channel_iter_get_bdev(i);
10427 135 : struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
10428 :
10429 135 : iter->i = i;
10430 135 : iter->fn(iter, bdev, ch, iter->ctx);
10431 135 : }
10432 :
10433 : static void
10434 234 : bdev_each_channel_cpl(struct spdk_io_channel_iter *i, int status)
10435 : {
10436 234 : struct spdk_bdev_channel_iter *iter = spdk_io_channel_iter_get_ctx(i);
10437 234 : struct spdk_bdev *bdev = io_channel_iter_get_bdev(i);
10438 :
10439 234 : iter->i = i;
10440 234 : iter->cpl(bdev, iter->ctx, status);
10441 :
10442 234 : free(iter);
10443 234 : }
10444 :
10445 : void
10446 234 : spdk_bdev_for_each_channel(struct spdk_bdev *bdev, spdk_bdev_for_each_channel_msg fn,
10447 : void *ctx, spdk_bdev_for_each_channel_done cpl)
10448 : {
10449 : struct spdk_bdev_channel_iter *iter;
10450 :
10451 234 : assert(bdev != NULL && fn != NULL && ctx != NULL);
10452 :
10453 234 : iter = calloc(1, sizeof(struct spdk_bdev_channel_iter));
10454 234 : if (iter == NULL) {
10455 0 : SPDK_ERRLOG("Unable to allocate iterator\n");
10456 0 : assert(false);
10457 : return;
10458 : }
10459 :
10460 234 : iter->fn = fn;
10461 234 : iter->cpl = cpl;
10462 234 : iter->ctx = ctx;
10463 :
10464 234 : spdk_for_each_channel(__bdev_to_io_dev(bdev), bdev_each_channel_msg,
10465 : iter, bdev_each_channel_cpl);
10466 : }
10467 :
10468 : static void
10469 3 : bdev_copy_do_write_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
10470 : {
10471 3 : struct spdk_bdev_io *parent_io = cb_arg;
10472 :
10473 3 : spdk_bdev_free_io(bdev_io);
10474 :
10475 : /* Check return status of write */
10476 3 : parent_io->internal.status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
10477 3 : parent_io->internal.cb(parent_io, success, parent_io->internal.caller_ctx);
10478 3 : }
10479 :
10480 : static void
10481 3 : bdev_copy_do_write(void *_bdev_io)
10482 : {
10483 3 : struct spdk_bdev_io *bdev_io = _bdev_io;
10484 : int rc;
10485 :
10486 : /* Write blocks */
10487 3 : rc = spdk_bdev_write_blocks_with_md(bdev_io->internal.desc,
10488 3 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
10489 3 : bdev_io->u.bdev.iovs[0].iov_base,
10490 : bdev_io->u.bdev.md_buf, bdev_io->u.bdev.offset_blocks,
10491 : bdev_io->u.bdev.num_blocks, bdev_copy_do_write_done, bdev_io);
10492 :
10493 3 : if (rc == -ENOMEM) {
10494 0 : bdev_queue_io_wait_with_cb(bdev_io, bdev_copy_do_write);
10495 3 : } else if (rc != 0) {
10496 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
10497 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
10498 : }
10499 3 : }
10500 :
10501 : static void
10502 3 : bdev_copy_do_read_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
10503 : {
10504 3 : struct spdk_bdev_io *parent_io = cb_arg;
10505 :
10506 3 : spdk_bdev_free_io(bdev_io);
10507 :
10508 : /* Check return status of read */
10509 3 : if (!success) {
10510 0 : parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
10511 0 : parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
10512 0 : return;
10513 : }
10514 :
10515 : /* Do write */
10516 3 : bdev_copy_do_write(parent_io);
10517 : }
10518 :
10519 : static void
10520 3 : bdev_copy_do_read(void *_bdev_io)
10521 : {
10522 3 : struct spdk_bdev_io *bdev_io = _bdev_io;
10523 : int rc;
10524 :
10525 : /* Read blocks */
10526 3 : rc = spdk_bdev_read_blocks_with_md(bdev_io->internal.desc,
10527 3 : spdk_io_channel_from_ctx(bdev_io->internal.ch),
10528 3 : bdev_io->u.bdev.iovs[0].iov_base,
10529 : bdev_io->u.bdev.md_buf, bdev_io->u.bdev.copy.src_offset_blocks,
10530 : bdev_io->u.bdev.num_blocks, bdev_copy_do_read_done, bdev_io);
10531 :
10532 3 : if (rc == -ENOMEM) {
10533 0 : bdev_queue_io_wait_with_cb(bdev_io, bdev_copy_do_read);
10534 3 : } else if (rc != 0) {
10535 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
10536 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
10537 : }
10538 3 : }
10539 :
10540 : static void
10541 3 : bdev_copy_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
10542 : {
10543 3 : if (!success) {
10544 0 : bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
10545 0 : bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
10546 0 : return;
10547 : }
10548 :
10549 3 : bdev_copy_do_read(bdev_io);
10550 : }
10551 :
10552 : int
10553 27 : spdk_bdev_copy_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
10554 : uint64_t dst_offset_blocks, uint64_t src_offset_blocks, uint64_t num_blocks,
10555 : spdk_bdev_io_completion_cb cb, void *cb_arg)
10556 : {
10557 27 : struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
10558 : struct spdk_bdev_io *bdev_io;
10559 27 : struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
10560 :
10561 27 : if (!desc->write) {
10562 0 : return -EBADF;
10563 : }
10564 :
10565 27 : if (!bdev_io_valid_blocks(bdev, dst_offset_blocks, num_blocks) ||
10566 27 : !bdev_io_valid_blocks(bdev, src_offset_blocks, num_blocks)) {
10567 0 : SPDK_DEBUGLOG(bdev,
10568 : "Invalid offset or number of blocks: dst %lu, src %lu, count %lu\n",
10569 : dst_offset_blocks, src_offset_blocks, num_blocks);
10570 0 : return -EINVAL;
10571 : }
10572 :
10573 27 : bdev_io = bdev_channel_get_io(channel);
10574 27 : if (!bdev_io) {
10575 0 : return -ENOMEM;
10576 : }
10577 :
10578 27 : bdev_io->internal.ch = channel;
10579 27 : bdev_io->internal.desc = desc;
10580 27 : bdev_io->type = SPDK_BDEV_IO_TYPE_COPY;
10581 :
10582 27 : bdev_io->u.bdev.offset_blocks = dst_offset_blocks;
10583 27 : bdev_io->u.bdev.copy.src_offset_blocks = src_offset_blocks;
10584 27 : bdev_io->u.bdev.num_blocks = num_blocks;
10585 27 : bdev_io->u.bdev.memory_domain = NULL;
10586 27 : bdev_io->u.bdev.memory_domain_ctx = NULL;
10587 27 : bdev_io->u.bdev.iovs = NULL;
10588 27 : bdev_io->u.bdev.iovcnt = 0;
10589 27 : bdev_io->u.bdev.md_buf = NULL;
10590 27 : bdev_io->u.bdev.accel_sequence = NULL;
10591 27 : bdev_io_init(bdev_io, bdev, cb_arg, cb);
10592 :
10593 27 : if (dst_offset_blocks == src_offset_blocks || num_blocks == 0) {
10594 0 : spdk_thread_send_msg(spdk_get_thread(), bdev_io_complete_cb, bdev_io);
10595 0 : return 0;
10596 : }
10597 :
10598 :
10599 : /* If the copy size is large and should be split, use the generic split logic
10600 : * regardless of whether SPDK_BDEV_IO_TYPE_COPY is supported or not.
10601 : *
10602 : * Then, send the copy request if SPDK_BDEV_IO_TYPE_COPY is supported or
10603 : * emulate it using regular read and write requests otherwise.
10604 : */
10605 27 : if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY) ||
10606 : bdev_io->internal.f.split) {
10607 24 : bdev_io_submit(bdev_io);
10608 24 : return 0;
10609 : }
10610 :
10611 3 : spdk_bdev_io_get_buf(bdev_io, bdev_copy_get_buf_cb, num_blocks * spdk_bdev_get_block_size(bdev));
10612 :
10613 3 : return 0;
10614 : }
10615 :
10616 3 : SPDK_LOG_REGISTER_COMPONENT(bdev)
10617 :
10618 : static void
10619 0 : bdev_trace(void)
10620 : {
10621 0 : struct spdk_trace_tpoint_opts opts[] = {
10622 : {
10623 : "BDEV_IO_START", TRACE_BDEV_IO_START,
10624 : OWNER_TYPE_BDEV, OBJECT_BDEV_IO, 1,
10625 : {
10626 : { "type", SPDK_TRACE_ARG_TYPE_INT, 8 },
10627 : { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 },
10628 : { "offset", SPDK_TRACE_ARG_TYPE_INT, 8 },
10629 : { "qd", SPDK_TRACE_ARG_TYPE_INT, 4 }
10630 : }
10631 : },
10632 : {
10633 : "BDEV_IO_DONE", TRACE_BDEV_IO_DONE,
10634 : OWNER_TYPE_BDEV, OBJECT_BDEV_IO, 0,
10635 : {
10636 : { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 },
10637 : { "qd", SPDK_TRACE_ARG_TYPE_INT, 4 }
10638 : }
10639 : },
10640 : {
10641 : "BDEV_IOCH_CREATE", TRACE_BDEV_IOCH_CREATE,
10642 : OWNER_TYPE_BDEV, OBJECT_NONE, 0,
10643 : {
10644 : { "tid", SPDK_TRACE_ARG_TYPE_INT, 8 }
10645 : }
10646 : },
10647 : {
10648 : "BDEV_IOCH_DESTROY", TRACE_BDEV_IOCH_DESTROY,
10649 : OWNER_TYPE_BDEV, OBJECT_NONE, 0,
10650 : {
10651 : { "tid", SPDK_TRACE_ARG_TYPE_INT, 8 }
10652 : }
10653 : },
10654 : };
10655 :
10656 :
10657 0 : spdk_trace_register_owner_type(OWNER_TYPE_BDEV, 'b');
10658 0 : spdk_trace_register_object(OBJECT_BDEV_IO, 'i');
10659 0 : spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts));
10660 0 : spdk_trace_tpoint_register_relation(TRACE_BDEV_NVME_IO_START, OBJECT_BDEV_IO, 0);
10661 0 : spdk_trace_tpoint_register_relation(TRACE_BDEV_NVME_IO_DONE, OBJECT_BDEV_IO, 0);
10662 0 : spdk_trace_tpoint_register_relation(TRACE_BLOB_REQ_SET_START, OBJECT_BDEV_IO, 0);
10663 0 : spdk_trace_tpoint_register_relation(TRACE_BLOB_REQ_SET_COMPLETE, OBJECT_BDEV_IO, 0);
10664 0 : spdk_trace_tpoint_register_relation(TRACE_BDEV_RAID_IO_START, OBJECT_BDEV_IO, 0);
10665 0 : spdk_trace_tpoint_register_relation(TRACE_BDEV_RAID_IO_DONE, OBJECT_BDEV_IO, 0);
10666 0 : }
10667 3 : SPDK_TRACE_REGISTER_FN(bdev_trace, "bdev", TRACE_GROUP_BDEV)
|