Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright (C) 2017 Intel Corporation. All rights reserved.
3 : : * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4 : : * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : : */
6 : :
7 : : #include "spdk_internal/cunit.h"
8 : :
9 : : #include "common/lib/ut_multithread.c"
10 : : #include "unit/lib/json_mock.c"
11 : :
12 : : #include "spdk/config.h"
13 : : /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
14 : : #undef SPDK_CONFIG_VTUNE
15 : :
16 : : #include "bdev/bdev.c"
17 : :
18 [ - + - + ]: 664 : DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
19 [ - + # # ]: 328 : DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
20 [ # # # # ]: 0 : DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
21 : : "test_domain");
22 [ # # # # ]: 0 : DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
23 : : (struct spdk_memory_domain *domain), 0);
24 : 0 : DEFINE_STUB_V(spdk_accel_sequence_finish,
25 : : (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
26 : 0 : DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
27 : 0 : DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
28 [ # # # # ]: 0 : DEFINE_STUB(spdk_accel_append_copy, int,
29 : : (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs,
30 : : uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
31 : : struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain,
32 : : void *src_domain_ctx, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
33 [ # # # # ]: 0 : DEFINE_STUB(spdk_accel_append_dif_verify_copy, int,
34 : : (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch,
35 : : struct iovec *dst_iovs, size_t dst_iovcnt,
36 : : struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
37 : : struct iovec *src_iovs, size_t src_iovcnt,
38 : : struct spdk_memory_domain *src_domain, void *src_domain_ctx,
39 : : uint32_t num_blocks,
40 : : const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
41 : : spdk_accel_step_cb cb_fn, void *cb_arg), 0);
42 [ # # # # ]: 0 : DEFINE_STUB(spdk_accel_append_dif_generate_copy, int,
43 : : (struct spdk_accel_sequence **seq,
44 : : struct spdk_io_channel *ch,
45 : : struct iovec *dst_iovs, size_t dst_iovcnt,
46 : : struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
47 : : struct iovec *src_iovs, size_t src_iovcnt,
48 : : struct spdk_memory_domain *src_domain, void *src_domain_ctx,
49 : : uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
50 : : spdk_accel_step_cb cb_fn, void *cb_arg), 0);
51 [ - + - + ]: 168 : DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL);
52 [ # # # # ]: 0 : DEFINE_STUB(spdk_accel_get_buf, int, (struct spdk_io_channel *ch, uint64_t len, void **buf,
53 : : struct spdk_memory_domain **domain, void **domain_ctx), 0);
54 : 168 : DEFINE_STUB_V(spdk_accel_put_buf, (struct spdk_io_channel *ch, void *buf,
55 : : struct spdk_memory_domain *domain, void *domain_ctx));
56 : :
57 : : static bool g_memory_domain_pull_data_called;
58 : : static bool g_memory_domain_push_data_called;
59 : : static int g_accel_io_device;
60 : :
61 [ # # ]: 0 : DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
62 : : int
63 : 20 : spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
64 : : struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
65 : : spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
66 : : {
67 : 20 : g_memory_domain_pull_data_called = true;
68 [ - + + + : 20 : HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
+ + ]
69 : 16 : cpl_cb(cpl_cb_arg, 0);
70 : 16 : return 0;
71 : 5 : }
72 : :
73 [ # # ]: 0 : DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
74 : : int
75 : 20 : spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
76 : : struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
77 : : spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
78 : : {
79 : 20 : g_memory_domain_push_data_called = true;
80 [ - + + + : 20 : HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
+ + ]
81 : 16 : cpl_cb(cpl_cb_arg, 0);
82 : 16 : return 0;
83 : 5 : }
84 : :
85 : : struct spdk_io_channel *
86 : 144 : spdk_accel_get_io_channel(void)
87 : : {
88 : 144 : return spdk_get_io_channel(&g_accel_io_device);
89 : : }
90 : :
91 : : int g_status;
92 : : int g_count;
93 : : enum spdk_bdev_event_type g_event_type1;
94 : : enum spdk_bdev_event_type g_event_type2;
95 : : enum spdk_bdev_event_type g_event_type3;
96 : : enum spdk_bdev_event_type g_event_type4;
97 : : struct spdk_histogram_data *g_histogram;
98 : : void *g_unregister_arg;
99 : : int g_unregister_rc;
100 : :
101 : : void
102 : 0 : spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
103 : : int *sc, int *sk, int *asc, int *ascq)
104 : : {
105 : 0 : }
106 : :
107 : : static int
108 : 144 : ut_accel_ch_create_cb(void *io_device, void *ctx)
109 : : {
110 : 144 : return 0;
111 : : }
112 : :
113 : : static void
114 : 144 : ut_accel_ch_destroy_cb(void *io_device, void *ctx)
115 : : {
116 : 144 : }
117 : :
118 : : static int
119 : 4 : ut_bdev_setup(void)
120 : : {
121 : 4 : spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
122 : : ut_accel_ch_destroy_cb, 0, NULL);
123 : 4 : return 0;
124 : : }
125 : :
126 : : static int
127 : 4 : ut_bdev_teardown(void)
128 : : {
129 : 4 : spdk_io_device_unregister(&g_accel_io_device, NULL);
130 : :
131 : 4 : return 0;
132 : : }
133 : :
134 : : static int
135 : 332 : stub_destruct(void *ctx)
136 : : {
137 : 332 : return 0;
138 : : }
139 : :
140 : : struct ut_expected_io {
141 : : uint8_t type;
142 : : uint64_t offset;
143 : : uint64_t src_offset;
144 : : uint64_t length;
145 : : int iovcnt;
146 : : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV];
147 : : void *md_buf;
148 : : TAILQ_ENTRY(ut_expected_io) link;
149 : : };
150 : :
151 : : struct bdev_ut_io {
152 : : TAILQ_ENTRY(bdev_ut_io) link;
153 : : };
154 : :
155 : : struct bdev_ut_channel {
156 : : TAILQ_HEAD(, bdev_ut_io) outstanding_io;
157 : : uint32_t outstanding_io_count;
158 : : TAILQ_HEAD(, ut_expected_io) expected_io;
159 : : };
160 : :
161 : : static bool g_io_done;
162 : : static struct spdk_bdev_io *g_bdev_io;
163 : : static enum spdk_bdev_io_status g_io_status;
164 : : static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
165 : : static uint32_t g_bdev_ut_io_device;
166 : : static struct bdev_ut_channel *g_bdev_ut_channel;
167 : : static void *g_compare_read_buf;
168 : : static uint32_t g_compare_read_buf_len;
169 : : static void *g_compare_write_buf;
170 : : static uint32_t g_compare_write_buf_len;
171 : : static void *g_compare_md_buf;
172 : : static bool g_abort_done;
173 : : static enum spdk_bdev_io_status g_abort_status;
174 : : static void *g_zcopy_read_buf;
175 : : static uint32_t g_zcopy_read_buf_len;
176 : : static void *g_zcopy_write_buf;
177 : : static uint32_t g_zcopy_write_buf_len;
178 : : static struct spdk_bdev_io *g_zcopy_bdev_io;
179 : : static uint64_t g_seek_data_offset;
180 : : static uint64_t g_seek_hole_offset;
181 : : static uint64_t g_seek_offset;
182 : :
183 : : static struct ut_expected_io *
184 : 1028 : ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
185 : : {
186 : : struct ut_expected_io *expected_io;
187 : :
188 : 1028 : expected_io = calloc(1, sizeof(*expected_io));
189 [ + + ]: 1028 : SPDK_CU_ASSERT_FATAL(expected_io != NULL);
190 : :
191 : 1028 : expected_io->type = type;
192 : 1028 : expected_io->offset = offset;
193 : 1028 : expected_io->length = length;
194 : 1028 : expected_io->iovcnt = iovcnt;
195 : :
196 : 1028 : return expected_io;
197 : : }
198 : :
199 : : static struct ut_expected_io *
200 : 84 : ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length)
201 : : {
202 : : struct ut_expected_io *expected_io;
203 : :
204 : 84 : expected_io = calloc(1, sizeof(*expected_io));
205 [ + + ]: 84 : SPDK_CU_ASSERT_FATAL(expected_io != NULL);
206 : :
207 : 84 : expected_io->type = type;
208 : 84 : expected_io->offset = offset;
209 : 84 : expected_io->src_offset = src_offset;
210 : 84 : expected_io->length = length;
211 : :
212 : 84 : return expected_io;
213 : : }
214 : :
215 : : static void
216 : 2184 : ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
217 : : {
218 : 2184 : expected_io->iov[pos].iov_base = base;
219 : 2184 : expected_io->iov[pos].iov_len = len;
220 : 2184 : }
221 : :
222 : : static void
223 : 1624 : stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
224 : : {
225 : 1624 : struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
226 : : struct ut_expected_io *expected_io;
227 : : struct iovec *iov, *expected_iov;
228 : : struct spdk_bdev_io *bio_to_abort;
229 : : struct bdev_ut_io *bio;
230 : : int i;
231 : :
232 : 1624 : g_bdev_io = bdev_io;
233 : :
234 [ + + + + ]: 1624 : if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
235 : 44 : uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
236 : :
237 : 44 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
238 : 44 : CU_ASSERT(g_compare_read_buf_len == len);
239 [ - + - + ]: 44 : memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len);
240 [ + + + + : 44 : if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) {
+ + ]
241 [ - + - + ]: 15 : memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf,
242 : 12 : bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks);
243 : 3 : }
244 : 11 : }
245 : :
246 [ + + + + ]: 1624 : if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
247 : 4 : uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
248 : :
249 : 4 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
250 : 4 : CU_ASSERT(g_compare_write_buf_len == len);
251 [ - + - + ]: 4 : memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len);
252 : 1 : }
253 : :
254 [ + + + + ]: 1624 : if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) {
255 : 36 : uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
256 : :
257 : 36 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
258 : 36 : CU_ASSERT(g_compare_read_buf_len == len);
259 [ + + - + : 36 : if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) {
+ + ]
260 : 16 : g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
261 : 4 : }
262 [ + + + + ]: 36 : if (bdev_io->u.bdev.md_buf &&
263 [ - + - + : 15 : memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf,
+ + ]
264 [ + + ]: 12 : bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) {
265 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
266 : 1 : }
267 : 9 : }
268 : :
269 [ + + ]: 1624 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
270 [ + + ]: 56 : if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) {
271 [ + + ]: 52 : TAILQ_FOREACH(bio, &ch->outstanding_io, link) {
272 : 52 : bio_to_abort = spdk_bdev_io_from_ctx(bio);
273 [ + + ]: 52 : if (bio_to_abort == bdev_io->u.abort.bio_to_abort) {
274 [ + + ]: 52 : TAILQ_REMOVE(&ch->outstanding_io, bio, link);
275 : 52 : ch->outstanding_io_count--;
276 : 52 : spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED);
277 : 52 : break;
278 : : }
279 : 0 : }
280 : 13 : }
281 : 14 : }
282 : :
283 [ + + ]: 1624 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) {
284 [ + + ]: 16 : if (bdev_io->u.bdev.zcopy.start) {
285 : 8 : g_zcopy_bdev_io = bdev_io;
286 [ + + ]: 8 : if (bdev_io->u.bdev.zcopy.populate) {
287 : : /* Start of a read */
288 : 4 : CU_ASSERT(g_zcopy_read_buf != NULL);
289 : 4 : CU_ASSERT(g_zcopy_read_buf_len > 0);
290 : 4 : bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf;
291 : 4 : bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len;
292 : 4 : bdev_io->u.bdev.iovcnt = 1;
293 : 1 : } else {
294 : : /* Start of a write */
295 : 4 : CU_ASSERT(g_zcopy_write_buf != NULL);
296 : 4 : CU_ASSERT(g_zcopy_write_buf_len > 0);
297 : 4 : bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf;
298 : 4 : bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len;
299 : 4 : bdev_io->u.bdev.iovcnt = 1;
300 : : }
301 : 2 : } else {
302 [ + + ]: 8 : if (bdev_io->u.bdev.zcopy.commit) {
303 : : /* End of write */
304 : 4 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf);
305 : 4 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len);
306 : 4 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
307 : 4 : g_zcopy_write_buf = NULL;
308 : 4 : g_zcopy_write_buf_len = 0;
309 : 1 : } else {
310 : : /* End of read */
311 : 4 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf);
312 : 4 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len);
313 : 4 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
314 : 4 : g_zcopy_read_buf = NULL;
315 : 4 : g_zcopy_read_buf_len = 0;
316 : : }
317 : : }
318 : 4 : }
319 : :
320 [ + + ]: 1624 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) {
321 : 4 : bdev_io->u.bdev.seek.offset = g_seek_data_offset;
322 : 1 : }
323 : :
324 [ + + ]: 1624 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) {
325 : 4 : bdev_io->u.bdev.seek.offset = g_seek_hole_offset;
326 : 1 : }
327 : :
328 : 1624 : TAILQ_INSERT_TAIL(&ch->outstanding_io, (struct bdev_ut_io *)bdev_io->driver_ctx, link);
329 : 1624 : ch->outstanding_io_count++;
330 : :
331 : 1624 : expected_io = TAILQ_FIRST(&ch->expected_io);
332 [ + + ]: 1624 : if (expected_io == NULL) {
333 : 512 : return;
334 : : }
335 [ + + ]: 1112 : TAILQ_REMOVE(&ch->expected_io, expected_io, link);
336 : :
337 [ + + ]: 1112 : if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
338 : 1112 : CU_ASSERT(bdev_io->type == expected_io->type);
339 : 278 : }
340 : :
341 [ + + ]: 1112 : if (expected_io->md_buf != NULL) {
342 : 112 : CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf);
343 : 28 : }
344 : :
345 [ + + ]: 1112 : if (expected_io->length == 0) {
346 : 0 : free(expected_io);
347 : 0 : return;
348 : : }
349 : :
350 : 1112 : CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
351 : 1112 : CU_ASSERT(expected_io->length == bdev_io->u.bdev.num_blocks);
352 [ + + ]: 1112 : if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) {
353 : 84 : CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks);
354 : 21 : }
355 : :
356 [ + + ]: 1112 : if (expected_io->iovcnt == 0) {
357 : 404 : free(expected_io);
358 : : /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */
359 : 404 : return;
360 : : }
361 : :
362 : 708 : CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
363 [ + + ]: 2892 : for (i = 0; i < expected_io->iovcnt; i++) {
364 : 2184 : expected_iov = &expected_io->iov[i];
365 [ + + ]: 2184 : if (bdev_io->internal.f.has_bounce_buf == false) {
366 : 2168 : iov = &bdev_io->u.bdev.iovs[i];
367 : 542 : } else {
368 : 16 : iov = bdev_io->internal.bounce_buf.orig_iovs;
369 : : }
370 : 2184 : CU_ASSERT(iov->iov_len == expected_iov->iov_len);
371 : 2184 : CU_ASSERT(iov->iov_base == expected_iov->iov_base);
372 : 546 : }
373 : :
374 : 708 : free(expected_io);
375 : 406 : }
376 : :
377 : : static void
378 : 212 : stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch,
379 : : struct spdk_bdev_io *bdev_io, bool success)
380 : : {
381 : 212 : CU_ASSERT(success == true);
382 : :
383 : 212 : stub_submit_request(_ch, bdev_io);
384 : 212 : }
385 : :
386 : : static void
387 : 212 : stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
388 : : {
389 : 265 : spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb,
390 : 212 : bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
391 : 212 : }
392 : :
393 : : static uint32_t
394 : 692 : stub_complete_io(uint32_t num_to_complete)
395 : : {
396 : 692 : struct bdev_ut_channel *ch = g_bdev_ut_channel;
397 : : struct bdev_ut_io *bio;
398 : : struct spdk_bdev_io *bdev_io;
399 : : static enum spdk_bdev_io_status io_status;
400 : 692 : uint32_t num_completed = 0;
401 : :
402 [ + + ]: 2260 : while (num_completed < num_to_complete) {
403 [ + + ]: 1580 : if (TAILQ_EMPTY(&ch->outstanding_io)) {
404 : 12 : break;
405 : : }
406 : 1568 : bio = TAILQ_FIRST(&ch->outstanding_io);
407 [ + + ]: 1568 : TAILQ_REMOVE(&ch->outstanding_io, bio, link);
408 : 1568 : bdev_io = spdk_bdev_io_from_ctx(bio);
409 : 1568 : ch->outstanding_io_count--;
410 [ + + ]: 1568 : io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS :
411 : 12 : g_io_exp_status;
412 : 1568 : spdk_bdev_io_complete(bdev_io, io_status);
413 : 1568 : num_completed++;
414 : : }
415 : :
416 : 692 : return num_completed;
417 : : }
418 : :
419 : : static struct spdk_io_channel *
420 : 144 : bdev_ut_get_io_channel(void *ctx)
421 : : {
422 : 144 : return spdk_get_io_channel(&g_bdev_ut_io_device);
423 : : }
424 : :
425 : : static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = {
426 : : [SPDK_BDEV_IO_TYPE_READ] = true,
427 : : [SPDK_BDEV_IO_TYPE_WRITE] = true,
428 : : [SPDK_BDEV_IO_TYPE_COMPARE] = true,
429 : : [SPDK_BDEV_IO_TYPE_UNMAP] = true,
430 : : [SPDK_BDEV_IO_TYPE_FLUSH] = true,
431 : : [SPDK_BDEV_IO_TYPE_RESET] = true,
432 : : [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true,
433 : : [SPDK_BDEV_IO_TYPE_NVME_IO] = true,
434 : : [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true,
435 : : [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true,
436 : : [SPDK_BDEV_IO_TYPE_ZCOPY] = true,
437 : : [SPDK_BDEV_IO_TYPE_ABORT] = true,
438 : : [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true,
439 : : [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true,
440 : : [SPDK_BDEV_IO_TYPE_COPY] = true,
441 : : };
442 : :
443 : : static void
444 : 88 : ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable)
445 : : {
446 : 88 : g_io_types_supported[io_type] = enable;
447 : 88 : }
448 : :
449 : : static bool
450 : 1568 : stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
451 : : {
452 [ - + ]: 1568 : return g_io_types_supported[io_type];
453 : : }
454 : :
455 : : static struct spdk_bdev_fn_table fn_table = {
456 : : .destruct = stub_destruct,
457 : : .submit_request = stub_submit_request,
458 : : .get_io_channel = bdev_ut_get_io_channel,
459 : : .io_type_supported = stub_io_type_supported,
460 : : };
461 : :
462 : : static int
463 : 144 : bdev_ut_create_ch(void *io_device, void *ctx_buf)
464 : : {
465 : 144 : struct bdev_ut_channel *ch = ctx_buf;
466 : :
467 : 144 : CU_ASSERT(g_bdev_ut_channel == NULL);
468 : 144 : g_bdev_ut_channel = ch;
469 : :
470 : 144 : TAILQ_INIT(&ch->outstanding_io);
471 : 144 : ch->outstanding_io_count = 0;
472 : 144 : TAILQ_INIT(&ch->expected_io);
473 : 144 : return 0;
474 : : }
475 : :
476 : : static void
477 : 144 : bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
478 : : {
479 : 144 : CU_ASSERT(g_bdev_ut_channel != NULL);
480 : 144 : g_bdev_ut_channel = NULL;
481 : 144 : }
482 : :
483 : : struct spdk_bdev_module bdev_ut_if;
484 : :
485 : : static int
486 : 164 : bdev_ut_module_init(void)
487 : : {
488 : 164 : spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
489 : : sizeof(struct bdev_ut_channel), NULL);
490 : 164 : spdk_bdev_module_init_done(&bdev_ut_if);
491 : 164 : return 0;
492 : : }
493 : :
494 : : static void
495 : 164 : bdev_ut_module_fini(void)
496 : : {
497 : 164 : spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
498 : 164 : }
499 : :
500 : : struct spdk_bdev_module bdev_ut_if = {
501 : : .name = "bdev_ut",
502 : : .module_init = bdev_ut_module_init,
503 : : .module_fini = bdev_ut_module_fini,
504 : : .async_init = true,
505 : : };
506 : :
507 : : static void vbdev_ut_examine_config(struct spdk_bdev *bdev);
508 : : static void vbdev_ut_examine_disk(struct spdk_bdev *bdev);
509 : :
510 : : static int
511 : 164 : vbdev_ut_module_init(void)
512 : : {
513 : 164 : return 0;
514 : : }
515 : :
516 : : static void
517 : 492 : vbdev_ut_module_fini(void)
518 : : {
519 : 492 : }
520 : :
521 : : static int
522 : 328 : vbdev_ut_get_ctx_size(void)
523 : : {
524 : 328 : return sizeof(struct bdev_ut_io);
525 : : }
526 : :
527 : : struct spdk_bdev_module vbdev_ut_if = {
528 : : .name = "vbdev_ut",
529 : : .module_init = vbdev_ut_module_init,
530 : : .module_fini = vbdev_ut_module_fini,
531 : : .examine_config = vbdev_ut_examine_config,
532 : : .examine_disk = vbdev_ut_examine_disk,
533 : : .get_ctx_size = vbdev_ut_get_ctx_size,
534 : : };
535 : :
536 : 4 : SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
537 : 4 : SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
538 : :
539 : : struct ut_examine_ctx {
540 : : void (*examine_config)(struct spdk_bdev *bdev);
541 : : void (*examine_disk)(struct spdk_bdev *bdev);
542 : : uint32_t examine_config_count;
543 : : uint32_t examine_disk_count;
544 : : };
545 : :
546 : : static void
547 : 332 : vbdev_ut_examine_config(struct spdk_bdev *bdev)
548 : : {
549 : 332 : struct ut_examine_ctx *ctx = bdev->ctxt;
550 : :
551 [ + + ]: 332 : if (ctx != NULL) {
552 : 12 : ctx->examine_config_count++;
553 [ + - ]: 12 : if (ctx->examine_config != NULL) {
554 : 12 : ctx->examine_config(bdev);
555 : 3 : }
556 : 3 : }
557 : :
558 : 332 : spdk_bdev_module_examine_done(&vbdev_ut_if);
559 : 332 : }
560 : :
561 : : static void
562 : 320 : vbdev_ut_examine_disk(struct spdk_bdev *bdev)
563 : : {
564 : 320 : struct ut_examine_ctx *ctx = bdev->ctxt;
565 : :
566 [ + + ]: 320 : if (ctx != NULL) {
567 : 12 : ctx->examine_disk_count++;
568 [ + - ]: 12 : if (ctx->examine_disk != NULL) {
569 : 12 : ctx->examine_disk(bdev);
570 : 3 : }
571 : 3 : }
572 : :
573 : 320 : spdk_bdev_module_examine_done(&vbdev_ut_if);
574 : 320 : }
575 : :
576 : : static void
577 : 164 : bdev_init_cb(void *arg, int rc)
578 : : {
579 : 164 : CU_ASSERT(rc == 0);
580 : 164 : }
581 : :
582 : : static void
583 : 328 : bdev_fini_cb(void *arg)
584 : : {
585 : 328 : }
586 : :
587 : : static void
588 : 164 : ut_init_bdev(struct spdk_bdev_opts *opts)
589 : : {
590 : : int rc;
591 : :
592 [ + + ]: 164 : if (opts != NULL) {
593 : 52 : rc = spdk_bdev_set_opts(opts);
594 : 52 : CU_ASSERT(rc == 0);
595 : 13 : }
596 : 164 : rc = spdk_iobuf_initialize();
597 : 164 : CU_ASSERT(rc == 0);
598 : 164 : spdk_bdev_initialize(bdev_init_cb, NULL);
599 : 164 : poll_threads();
600 : 164 : }
601 : :
602 : : static void
603 : 164 : ut_fini_bdev(void)
604 : : {
605 : 164 : spdk_bdev_finish(bdev_fini_cb, NULL);
606 : 164 : spdk_iobuf_finish(bdev_fini_cb, NULL);
607 : 164 : poll_threads();
608 : 164 : }
609 : :
610 : : static struct spdk_bdev *
611 : 300 : allocate_bdev_ctx(char *name, void *ctx)
612 : : {
613 : : struct spdk_bdev *bdev;
614 : : int rc;
615 : :
616 : 300 : bdev = calloc(1, sizeof(*bdev));
617 [ + + ]: 300 : SPDK_CU_ASSERT_FATAL(bdev != NULL);
618 : :
619 : 300 : bdev->ctxt = ctx;
620 : 300 : bdev->name = name;
621 : 300 : bdev->fn_table = &fn_table;
622 : 300 : bdev->module = &bdev_ut_if;
623 : 300 : bdev->blockcnt = 1024;
624 : 300 : bdev->blocklen = 512;
625 : :
626 : 300 : spdk_uuid_generate(&bdev->uuid);
627 : :
628 : 300 : rc = spdk_bdev_register(bdev);
629 : 300 : poll_threads();
630 : 300 : CU_ASSERT(rc == 0);
631 : :
632 : 300 : return bdev;
633 : : }
634 : :
635 : : static struct spdk_bdev *
636 : 288 : allocate_bdev(char *name)
637 : : {
638 : 288 : return allocate_bdev_ctx(name, NULL);
639 : : }
640 : :
641 : : static struct spdk_bdev *
642 : 20 : allocate_vbdev(char *name)
643 : : {
644 : : struct spdk_bdev *bdev;
645 : : int rc;
646 : :
647 : 20 : bdev = calloc(1, sizeof(*bdev));
648 [ + + ]: 20 : SPDK_CU_ASSERT_FATAL(bdev != NULL);
649 : :
650 : 20 : bdev->name = name;
651 : 20 : bdev->fn_table = &fn_table;
652 : 20 : bdev->module = &vbdev_ut_if;
653 : 20 : bdev->blockcnt = 1024;
654 : 20 : bdev->blocklen = 512;
655 : :
656 : 20 : rc = spdk_bdev_register(bdev);
657 : 20 : poll_threads();
658 : 20 : CU_ASSERT(rc == 0);
659 : :
660 : 20 : return bdev;
661 : : }
662 : :
663 : : static void
664 : 288 : free_bdev(struct spdk_bdev *bdev)
665 : : {
666 : 288 : spdk_bdev_unregister(bdev, NULL, NULL);
667 : 288 : poll_threads();
668 [ - + ]: 288 : memset(bdev, 0xFF, sizeof(*bdev));
669 : 288 : free(bdev);
670 : 288 : }
671 : :
672 : : static void
673 : 20 : free_vbdev(struct spdk_bdev *bdev)
674 : : {
675 : 20 : spdk_bdev_unregister(bdev, NULL, NULL);
676 : 20 : poll_threads();
677 [ - + ]: 20 : memset(bdev, 0xFF, sizeof(*bdev));
678 : 20 : free(bdev);
679 : 20 : }
680 : :
681 : : static void
682 : 4 : get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
683 : : {
684 : : const char *bdev_name;
685 : :
686 : 4 : CU_ASSERT(bdev != NULL);
687 : 4 : CU_ASSERT(rc == 0);
688 : 4 : bdev_name = spdk_bdev_get_name(bdev);
689 [ - + ]: 4 : CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
690 : :
691 : 4 : free(stat);
692 : :
693 : 4 : *(bool *)cb_arg = true;
694 : 4 : }
695 : :
696 : : static void
697 : 12 : bdev_unregister_cb(void *cb_arg, int rc)
698 : : {
699 : 12 : g_unregister_arg = cb_arg;
700 : 12 : g_unregister_rc = rc;
701 : 12 : }
702 : :
703 : : static void
704 : 4 : bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
705 : : {
706 : 4 : }
707 : :
708 : : static void
709 : 16 : bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
710 : : {
711 : 16 : struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
712 : :
713 : 16 : g_event_type1 = type;
714 [ + + ]: 16 : if (SPDK_BDEV_EVENT_REMOVE == type) {
715 : 8 : spdk_bdev_close(desc);
716 : 2 : }
717 : 16 : }
718 : :
719 : : static void
720 : 8 : bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
721 : : {
722 : 8 : struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
723 : :
724 : 8 : g_event_type2 = type;
725 [ + + ]: 8 : if (SPDK_BDEV_EVENT_REMOVE == type) {
726 : 8 : spdk_bdev_close(desc);
727 : 2 : }
728 : 8 : }
729 : :
730 : : static void
731 : 4 : bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
732 : : {
733 : 4 : g_event_type3 = type;
734 : 4 : }
735 : :
736 : : static void
737 : 4 : bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
738 : : {
739 : 4 : g_event_type4 = type;
740 : 4 : }
741 : :
742 : : static void
743 : 16 : bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
744 : : {
745 : 16 : g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io);
746 : 16 : spdk_bdev_free_io(bdev_io);
747 : 16 : }
748 : :
749 : : static void
750 : 4 : get_device_stat_test(void)
751 : : {
752 : : struct spdk_bdev *bdev;
753 : : struct spdk_bdev_io_stat *stat;
754 : 3 : bool done;
755 : :
756 : 4 : bdev = allocate_bdev("bdev0");
757 : 4 : stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
758 [ - + ]: 4 : if (stat == NULL) {
759 : 0 : free_bdev(bdev);
760 : 0 : return;
761 : : }
762 : :
763 : 4 : done = false;
764 : 4 : spdk_bdev_get_device_stat(bdev, stat, SPDK_BDEV_RESET_STAT_NONE, get_device_stat_cb, &done);
765 [ + + + + ]: 8 : while (!done) { poll_threads(); }
766 : :
767 : 4 : free_bdev(bdev);
768 : 1 : }
769 : :
770 : : static void
771 : 4 : open_write_test(void)
772 : : {
773 : : struct spdk_bdev *bdev[9];
774 : 4 : struct spdk_bdev_desc *desc[9] = {};
775 : : int rc;
776 : :
777 : 4 : ut_init_bdev(NULL);
778 : :
779 : : /*
780 : : * Create a tree of bdevs to test various open w/ write cases.
781 : : *
782 : : * bdev0 through bdev3 are physical block devices, such as NVMe
783 : : * namespaces or Ceph block devices.
784 : : *
785 : : * bdev4 is a virtual bdev with multiple base bdevs. This models
786 : : * caching or RAID use cases.
787 : : *
788 : : * bdev5 through bdev7 are all virtual bdevs with the same base
789 : : * bdev (except bdev7). This models partitioning or logical volume
790 : : * use cases.
791 : : *
792 : : * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
793 : : * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
794 : : * models caching, RAID, partitioning or logical volumes use cases.
795 : : *
796 : : * bdev8 is a virtual bdev with multiple base bdevs, but these
797 : : * base bdevs are themselves virtual bdevs.
798 : : *
799 : : * bdev8
800 : : * |
801 : : * +----------+
802 : : * | |
803 : : * bdev4 bdev5 bdev6 bdev7
804 : : * | | | |
805 : : * +---+---+ +---+ + +---+---+
806 : : * | | \ | / \
807 : : * bdev0 bdev1 bdev2 bdev3
808 : : */
809 : :
810 : 4 : bdev[0] = allocate_bdev("bdev0");
811 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
812 : 4 : CU_ASSERT(rc == 0);
813 : :
814 : 4 : bdev[1] = allocate_bdev("bdev1");
815 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
816 : 4 : CU_ASSERT(rc == 0);
817 : :
818 : 4 : bdev[2] = allocate_bdev("bdev2");
819 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
820 : 4 : CU_ASSERT(rc == 0);
821 : :
822 : 4 : bdev[3] = allocate_bdev("bdev3");
823 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
824 : 4 : CU_ASSERT(rc == 0);
825 : :
826 : 4 : bdev[4] = allocate_vbdev("bdev4");
827 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
828 : 4 : CU_ASSERT(rc == 0);
829 : :
830 : 4 : bdev[5] = allocate_vbdev("bdev5");
831 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
832 : 4 : CU_ASSERT(rc == 0);
833 : :
834 : 4 : bdev[6] = allocate_vbdev("bdev6");
835 : :
836 : 4 : bdev[7] = allocate_vbdev("bdev7");
837 : :
838 : 4 : bdev[8] = allocate_vbdev("bdev8");
839 : :
840 : : /* Open bdev0 read-only. This should succeed. */
841 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]);
842 : 4 : CU_ASSERT(rc == 0);
843 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
844 : 4 : CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0]));
845 : 4 : spdk_bdev_close(desc[0]);
846 : :
847 : : /*
848 : : * Open bdev1 read/write. This should fail since bdev1 has been claimed
849 : : * by a vbdev module.
850 : : */
851 : 4 : rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]);
852 : 4 : CU_ASSERT(rc == -EPERM);
853 : :
854 : : /*
855 : : * Open bdev4 read/write. This should fail since bdev3 has been claimed
856 : : * by a vbdev module.
857 : : */
858 : 4 : rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]);
859 : 4 : CU_ASSERT(rc == -EPERM);
860 : :
861 : : /* Open bdev4 read-only. This should succeed. */
862 : 4 : rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]);
863 : 4 : CU_ASSERT(rc == 0);
864 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
865 : 4 : CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4]));
866 : 4 : spdk_bdev_close(desc[4]);
867 : :
868 : : /*
869 : : * Open bdev8 read/write. This should succeed since it is a leaf
870 : : * bdev.
871 : : */
872 : 4 : rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]);
873 : 4 : CU_ASSERT(rc == 0);
874 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
875 : 4 : CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8]));
876 : 4 : spdk_bdev_close(desc[8]);
877 : :
878 : : /*
879 : : * Open bdev5 read/write. This should fail since bdev4 has been claimed
880 : : * by a vbdev module.
881 : : */
882 : 4 : rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]);
883 : 4 : CU_ASSERT(rc == -EPERM);
884 : :
885 : : /* Open bdev4 read-only. This should succeed. */
886 : 4 : rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]);
887 : 4 : CU_ASSERT(rc == 0);
888 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
889 : 4 : CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5]));
890 : 4 : spdk_bdev_close(desc[5]);
891 : :
892 : 4 : free_vbdev(bdev[8]);
893 : :
894 : 4 : free_vbdev(bdev[5]);
895 : 4 : free_vbdev(bdev[6]);
896 : 4 : free_vbdev(bdev[7]);
897 : :
898 : 4 : free_vbdev(bdev[4]);
899 : :
900 : 4 : free_bdev(bdev[0]);
901 : 4 : free_bdev(bdev[1]);
902 : 4 : free_bdev(bdev[2]);
903 : 4 : free_bdev(bdev[3]);
904 : :
905 : 4 : ut_fini_bdev();
906 : 4 : }
907 : :
908 : : static void
909 : 4 : claim_test(void)
910 : : {
911 : : struct spdk_bdev *bdev;
912 : 3 : struct spdk_bdev_desc *desc, *open_desc;
913 : : int rc;
914 : : uint32_t count;
915 : :
916 : 4 : ut_init_bdev(NULL);
917 : :
918 : : /*
919 : : * A vbdev that uses a read-only bdev may need it to remain read-only.
920 : : * To do so, it opens the bdev read-only, then claims it without
921 : : * passing a spdk_bdev_desc.
922 : : */
923 : 4 : bdev = allocate_bdev("bdev0");
924 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
925 : 4 : CU_ASSERT(rc == 0);
926 [ - + ]: 4 : CU_ASSERT(desc->write == false);
927 : :
928 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
929 : 4 : CU_ASSERT(rc == 0);
930 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
931 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if);
932 : :
933 : : /* There should be only one open descriptor and it should still be ro */
934 : 4 : count = 0;
935 [ + + ]: 8 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
936 : 4 : CU_ASSERT(open_desc == desc);
937 [ - + ]: 4 : CU_ASSERT(!open_desc->write);
938 : 4 : count++;
939 : 1 : }
940 : 4 : CU_ASSERT(count == 1);
941 : :
942 : : /* A read-only bdev is upgraded to read-write if desc is passed. */
943 : 4 : spdk_bdev_module_release_bdev(bdev);
944 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if);
945 : 4 : CU_ASSERT(rc == 0);
946 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
947 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if);
948 : :
949 : : /* There should be only one open descriptor and it should be rw */
950 : 4 : count = 0;
951 [ + + ]: 8 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
952 : 4 : CU_ASSERT(open_desc == desc);
953 [ - + ]: 4 : CU_ASSERT(open_desc->write);
954 : 4 : count++;
955 : 1 : }
956 : 4 : CU_ASSERT(count == 1);
957 : :
958 : 4 : spdk_bdev_close(desc);
959 : 4 : free_bdev(bdev);
960 : 4 : ut_fini_bdev();
961 : 4 : }
962 : :
963 : : static void
964 : 4 : bytes_to_blocks_test(void)
965 : : {
966 : 4 : struct spdk_bdev_desc desc = {0};
967 : 4 : struct spdk_bdev bdev = {0};
968 : 3 : uint64_t offset_blocks, num_blocks;
969 : :
970 : :
971 : 4 : desc.bdev = &bdev;
972 [ - + ]: 4 : memset(&bdev, 0, sizeof(bdev));
973 : :
974 : 4 : bdev.blocklen = 512;
975 : :
976 : : /* All parameters valid */
977 : 4 : offset_blocks = 0;
978 : 4 : num_blocks = 0;
979 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&desc, 512, &offset_blocks, 1024, &num_blocks) == 0);
980 : 4 : CU_ASSERT(offset_blocks == 1);
981 : 4 : CU_ASSERT(num_blocks == 2);
982 : :
983 : : /* Offset not a block multiple */
984 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&desc, 3, &offset_blocks, 512, &num_blocks) != 0);
985 : :
986 : : /* Length not a block multiple */
987 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&desc, 512, &offset_blocks, 3, &num_blocks) != 0);
988 : :
989 : : /* In case blocklen not the power of two */
990 : 4 : bdev.blocklen = 100;
991 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&desc, 100, &offset_blocks, 200, &num_blocks) == 0);
992 : 4 : CU_ASSERT(offset_blocks == 1);
993 : 4 : CU_ASSERT(num_blocks == 2);
994 : :
995 : : /* Offset not a block multiple */
996 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&desc, 3, &offset_blocks, 100, &num_blocks) != 0);
997 : :
998 : : /* Length not a block multiple */
999 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&desc, 100, &offset_blocks, 3, &num_blocks) != 0);
1000 : 4 : }
1001 : :
1002 : : static void
1003 : 4 : num_blocks_test(void)
1004 : : {
1005 : : struct spdk_bdev *bdev;
1006 : 4 : struct spdk_bdev_desc *desc = NULL;
1007 : : int rc;
1008 : :
1009 : 4 : ut_init_bdev(NULL);
1010 : 4 : bdev = allocate_bdev("num_blocks");
1011 : :
1012 : 4 : spdk_bdev_notify_blockcnt_change(bdev, 50);
1013 : :
1014 : : /* Growing block number */
1015 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 70) == 0);
1016 : : /* Shrinking block number */
1017 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 30) == 0);
1018 : :
1019 : 4 : rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc);
1020 : 4 : CU_ASSERT(rc == 0);
1021 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1022 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1023 : :
1024 : : /* Growing block number */
1025 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 80) == 0);
1026 : : /* Shrinking block number */
1027 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 20) != 0);
1028 : :
1029 : 4 : g_event_type1 = 0xFF;
1030 : : /* Growing block number */
1031 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 90) == 0);
1032 : :
1033 : 4 : poll_threads();
1034 : 4 : CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE);
1035 : :
1036 : 4 : g_event_type1 = 0xFF;
1037 : : /* Growing block number and closing */
1038 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 100) == 0);
1039 : :
1040 : 4 : spdk_bdev_close(desc);
1041 : 4 : free_bdev(bdev);
1042 : 4 : ut_fini_bdev();
1043 : :
1044 : 4 : poll_threads();
1045 : :
1046 : : /* Callback is not called for closed device */
1047 : 4 : CU_ASSERT_EQUAL(g_event_type1, 0xFF);
1048 : 4 : }
1049 : :
1050 : : static void
1051 : 4 : io_valid_test(void)
1052 : : {
1053 : 3 : struct spdk_bdev bdev;
1054 : :
1055 [ - + ]: 4 : memset(&bdev, 0, sizeof(bdev));
1056 : :
1057 : 4 : bdev.blocklen = 512;
1058 : 4 : spdk_spin_init(&bdev.internal.spinlock);
1059 : :
1060 : 4 : spdk_bdev_notify_blockcnt_change(&bdev, 100);
1061 : :
1062 : : /* All parameters valid */
1063 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true);
1064 : :
1065 : : /* Last valid block */
1066 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true);
1067 : :
1068 : : /* Offset past end of bdev */
1069 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false);
1070 : :
1071 : : /* Offset + length past end of bdev */
1072 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false);
1073 : :
1074 : : /* Offset near end of uint64_t range (2^64 - 1) */
1075 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
1076 : :
1077 : 4 : spdk_spin_destroy(&bdev.internal.spinlock);
1078 : 4 : }
1079 : :
1080 : : static void
1081 : 4 : alias_add_del_test(void)
1082 : : {
1083 : : struct spdk_bdev *bdev[3];
1084 : : int rc;
1085 : :
1086 : 4 : ut_init_bdev(NULL);
1087 : :
1088 : : /* Creating and registering bdevs */
1089 : 4 : bdev[0] = allocate_bdev("bdev0");
1090 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
1091 : :
1092 : 4 : bdev[1] = allocate_bdev("bdev1");
1093 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
1094 : :
1095 : 4 : bdev[2] = allocate_bdev("bdev2");
1096 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
1097 : :
1098 : 4 : poll_threads();
1099 : :
1100 : : /*
1101 : : * Trying adding an alias identical to name.
1102 : : * Alias is identical to name, so it can not be added to aliases list
1103 : : */
1104 : 4 : rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
1105 : 4 : CU_ASSERT(rc == -EEXIST);
1106 : :
1107 : : /*
1108 : : * Trying to add empty alias,
1109 : : * this one should fail
1110 : : */
1111 : 4 : rc = spdk_bdev_alias_add(bdev[0], NULL);
1112 : 4 : CU_ASSERT(rc == -EINVAL);
1113 : :
1114 : : /* Trying adding same alias to two different registered bdevs */
1115 : :
1116 : : /* Alias is used first time, so this one should pass */
1117 : 4 : rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
1118 : 4 : CU_ASSERT(rc == 0);
1119 : :
1120 : : /* Alias was added to another bdev, so this one should fail */
1121 : 4 : rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
1122 : 4 : CU_ASSERT(rc == -EEXIST);
1123 : :
1124 : : /* Alias is used first time, so this one should pass */
1125 : 4 : rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
1126 : 4 : CU_ASSERT(rc == 0);
1127 : :
1128 : : /* Trying removing an alias from registered bdevs */
1129 : :
1130 : : /* Alias is not on a bdev aliases list, so this one should fail */
1131 : 4 : rc = spdk_bdev_alias_del(bdev[0], "not existing");
1132 : 4 : CU_ASSERT(rc == -ENOENT);
1133 : :
1134 : : /* Alias is present on a bdev aliases list, so this one should pass */
1135 : 4 : rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
1136 : 4 : CU_ASSERT(rc == 0);
1137 : :
1138 : : /* Alias is present on a bdev aliases list, so this one should pass */
1139 : 4 : rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
1140 : 4 : CU_ASSERT(rc == 0);
1141 : :
1142 : : /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
1143 : 4 : rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
1144 : 4 : CU_ASSERT(rc != 0);
1145 : :
1146 : : /* Trying to del all alias from empty alias list */
1147 : 4 : spdk_bdev_alias_del_all(bdev[2]);
1148 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
1149 : :
1150 : : /* Trying to del all alias from non-empty alias list */
1151 : 4 : rc = spdk_bdev_alias_add(bdev[2], "alias0");
1152 : 4 : CU_ASSERT(rc == 0);
1153 : 4 : rc = spdk_bdev_alias_add(bdev[2], "alias1");
1154 : 4 : CU_ASSERT(rc == 0);
1155 : 4 : spdk_bdev_alias_del_all(bdev[2]);
1156 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
1157 : :
1158 : : /* Unregister and free bdevs */
1159 : 4 : spdk_bdev_unregister(bdev[0], NULL, NULL);
1160 : 4 : spdk_bdev_unregister(bdev[1], NULL, NULL);
1161 : 4 : spdk_bdev_unregister(bdev[2], NULL, NULL);
1162 : :
1163 : 4 : poll_threads();
1164 : :
1165 : 4 : free(bdev[0]);
1166 : 4 : free(bdev[1]);
1167 : 4 : free(bdev[2]);
1168 : :
1169 : 4 : ut_fini_bdev();
1170 : 4 : }
1171 : :
1172 : : static void
1173 : 596 : io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1174 : : {
1175 : 596 : g_io_done = true;
1176 : 596 : g_io_status = bdev_io->internal.status;
1177 [ + + + + ]: 596 : if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) &&
1178 : 4 : (bdev_io->u.bdev.zcopy.start)) {
1179 : 8 : g_zcopy_bdev_io = bdev_io;
1180 : 2 : } else {
1181 : 588 : spdk_bdev_free_io(bdev_io);
1182 : 588 : g_zcopy_bdev_io = NULL;
1183 : : }
1184 : 596 : }
1185 : :
1186 : : struct bdev_ut_io_wait_entry {
1187 : : struct spdk_bdev_io_wait_entry entry;
1188 : : struct spdk_io_channel *io_ch;
1189 : : struct spdk_bdev_desc *desc;
1190 : : bool submitted;
1191 : : };
1192 : :
1193 : : static void
1194 : 8 : io_wait_cb(void *arg)
1195 : : {
1196 : 8 : struct bdev_ut_io_wait_entry *entry = arg;
1197 : : int rc;
1198 : :
1199 : 8 : rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
1200 : 8 : CU_ASSERT(rc == 0);
1201 : 8 : entry->submitted = true;
1202 : 8 : }
1203 : :
1204 : : static void
1205 : 4 : bdev_io_types_test(void)
1206 : : {
1207 : : struct spdk_bdev *bdev;
1208 : 4 : struct spdk_bdev_desc *desc = NULL;
1209 : : struct spdk_io_channel *io_ch;
1210 : 4 : struct spdk_bdev_opts bdev_opts = {};
1211 : : int rc;
1212 : :
1213 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
1214 : 4 : bdev_opts.bdev_io_pool_size = 4;
1215 : 4 : bdev_opts.bdev_io_cache_size = 2;
1216 : 4 : ut_init_bdev(&bdev_opts);
1217 : :
1218 : 4 : bdev = allocate_bdev("bdev0");
1219 : :
1220 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
1221 : 4 : CU_ASSERT(rc == 0);
1222 : 4 : poll_threads();
1223 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1224 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1225 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
1226 : 4 : CU_ASSERT(io_ch != NULL);
1227 : :
1228 : : /* WRITE and WRITE ZEROES are not supported */
1229 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
1230 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false);
1231 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL);
1232 : 4 : CU_ASSERT(rc == -ENOTSUP);
1233 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
1234 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true);
1235 : :
1236 : : /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */
1237 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false);
1238 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false);
1239 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false);
1240 : 4 : rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL);
1241 : 4 : CU_ASSERT(rc == -ENOTSUP);
1242 : 4 : rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL);
1243 : 4 : CU_ASSERT(rc == -ENOTSUP);
1244 : 4 : rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL);
1245 : 4 : CU_ASSERT(rc == -ENOTSUP);
1246 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true);
1247 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true);
1248 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true);
1249 : :
1250 : 4 : spdk_put_io_channel(io_ch);
1251 : 4 : spdk_bdev_close(desc);
1252 : 4 : free_bdev(bdev);
1253 : 4 : ut_fini_bdev();
1254 : 4 : }
1255 : :
1256 : : static void
1257 : 4 : bdev_io_wait_test(void)
1258 : : {
1259 : : struct spdk_bdev *bdev;
1260 : 4 : struct spdk_bdev_desc *desc = NULL;
1261 : : struct spdk_io_channel *io_ch;
1262 : 4 : struct spdk_bdev_opts bdev_opts = {};
1263 : 3 : struct bdev_ut_io_wait_entry io_wait_entry;
1264 : 3 : struct bdev_ut_io_wait_entry io_wait_entry2;
1265 : : int rc;
1266 : :
1267 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
1268 : 4 : bdev_opts.bdev_io_pool_size = 4;
1269 : 4 : bdev_opts.bdev_io_cache_size = 2;
1270 : 4 : ut_init_bdev(&bdev_opts);
1271 : :
1272 : 4 : bdev = allocate_bdev("bdev0");
1273 : :
1274 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
1275 : 4 : CU_ASSERT(rc == 0);
1276 : 4 : poll_threads();
1277 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1278 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1279 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
1280 : 4 : CU_ASSERT(io_ch != NULL);
1281 : :
1282 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1283 : 4 : CU_ASSERT(rc == 0);
1284 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1285 : 4 : CU_ASSERT(rc == 0);
1286 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1287 : 4 : CU_ASSERT(rc == 0);
1288 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1289 : 4 : CU_ASSERT(rc == 0);
1290 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
1291 : :
1292 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1293 : 4 : CU_ASSERT(rc == -ENOMEM);
1294 : :
1295 : 4 : io_wait_entry.entry.bdev = bdev;
1296 : 4 : io_wait_entry.entry.cb_fn = io_wait_cb;
1297 : 4 : io_wait_entry.entry.cb_arg = &io_wait_entry;
1298 : 4 : io_wait_entry.io_ch = io_ch;
1299 : 4 : io_wait_entry.desc = desc;
1300 : 4 : io_wait_entry.submitted = false;
1301 : : /* Cannot use the same io_wait_entry for two different calls. */
1302 : 4 : memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
1303 : 4 : io_wait_entry2.entry.cb_arg = &io_wait_entry2;
1304 : :
1305 : : /* Queue two I/O waits. */
1306 : 4 : rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
1307 : 4 : CU_ASSERT(rc == 0);
1308 [ - + ]: 4 : CU_ASSERT(io_wait_entry.submitted == false);
1309 : 4 : rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
1310 : 4 : CU_ASSERT(rc == 0);
1311 [ - + ]: 4 : CU_ASSERT(io_wait_entry2.submitted == false);
1312 : :
1313 : 4 : stub_complete_io(1);
1314 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
1315 [ - + ]: 4 : CU_ASSERT(io_wait_entry.submitted == true);
1316 [ - + ]: 4 : CU_ASSERT(io_wait_entry2.submitted == false);
1317 : :
1318 : 4 : stub_complete_io(1);
1319 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
1320 [ - + ]: 4 : CU_ASSERT(io_wait_entry2.submitted == true);
1321 : :
1322 : 4 : stub_complete_io(4);
1323 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1324 : :
1325 : 4 : spdk_put_io_channel(io_ch);
1326 : 4 : spdk_bdev_close(desc);
1327 : 4 : free_bdev(bdev);
1328 : 4 : ut_fini_bdev();
1329 : 4 : }
1330 : :
1331 : : static void
1332 : 4 : bdev_io_spans_split_test(void)
1333 : : {
1334 : 3 : struct spdk_bdev bdev;
1335 : 3 : struct spdk_bdev_io bdev_io;
1336 : 3 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV];
1337 : :
1338 [ - + ]: 4 : memset(&bdev, 0, sizeof(bdev));
1339 : 4 : bdev_io.u.bdev.iovs = iov;
1340 : :
1341 : 4 : bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
1342 : 4 : bdev.optimal_io_boundary = 0;
1343 : 4 : bdev.max_segment_size = 0;
1344 : 4 : bdev.max_num_segments = 0;
1345 : 4 : bdev_io.bdev = &bdev;
1346 : :
1347 : : /* bdev has no optimal_io_boundary and max_size set - so this should return false. */
1348 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1349 : :
1350 : 4 : bdev.split_on_optimal_io_boundary = true;
1351 : 4 : bdev.optimal_io_boundary = 32;
1352 : 4 : bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
1353 : :
1354 : : /* RESETs are not based on LBAs - so this should return false. */
1355 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1356 : :
1357 : 4 : bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
1358 : 4 : bdev_io.u.bdev.offset_blocks = 0;
1359 : 4 : bdev_io.u.bdev.num_blocks = 32;
1360 : :
1361 : : /* This I/O run right up to, but does not cross, the boundary - so this should return false. */
1362 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1363 : :
1364 : 4 : bdev_io.u.bdev.num_blocks = 33;
1365 : :
1366 : : /* This I/O spans a boundary. */
1367 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1368 : :
1369 : 4 : bdev_io.u.bdev.num_blocks = 32;
1370 : 4 : bdev.max_segment_size = 512 * 32;
1371 : 4 : bdev.max_num_segments = 1;
1372 : 4 : bdev_io.u.bdev.iovcnt = 1;
1373 : 4 : iov[0].iov_len = 512;
1374 : :
1375 : : /* Does not cross and exceed max_size or max_segs */
1376 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1377 : :
1378 : 4 : bdev.split_on_optimal_io_boundary = false;
1379 : 4 : bdev.max_segment_size = 512;
1380 : 4 : bdev.max_num_segments = 1;
1381 : 4 : bdev_io.u.bdev.iovcnt = 2;
1382 : :
1383 : : /* Exceed max_segs */
1384 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1385 : :
1386 : 4 : bdev.max_num_segments = 2;
1387 : 4 : iov[0].iov_len = 513;
1388 : 4 : iov[1].iov_len = 512;
1389 : :
1390 : : /* Exceed max_sizes */
1391 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1392 : :
1393 : 4 : bdev.max_segment_size = 0;
1394 : 4 : bdev.write_unit_size = 32;
1395 : 4 : bdev.split_on_write_unit = true;
1396 : 4 : bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE;
1397 : :
1398 : : /* This I/O is one write unit */
1399 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1400 : :
1401 : 4 : bdev_io.u.bdev.num_blocks = 32 * 2;
1402 : :
1403 : : /* This I/O is more than one write unit */
1404 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1405 : :
1406 : 4 : bdev_io.u.bdev.offset_blocks = 1;
1407 : 4 : bdev_io.u.bdev.num_blocks = 32;
1408 : :
1409 : : /* This I/O is not aligned to write unit size */
1410 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1411 : 4 : }
1412 : :
1413 : : static void
1414 : 4 : bdev_io_boundary_split_test(void)
1415 : : {
1416 : : struct spdk_bdev *bdev;
1417 : 4 : struct spdk_bdev_desc *desc = NULL;
1418 : : struct spdk_io_channel *io_ch;
1419 : 4 : struct spdk_bdev_opts bdev_opts = {};
1420 : 3 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
1421 : : struct ut_expected_io *expected_io;
1422 : 4 : void *md_buf = (void *)0xFF000000;
1423 : : uint64_t i;
1424 : : int rc;
1425 : :
1426 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
1427 : 4 : bdev_opts.bdev_io_pool_size = 512;
1428 : 4 : bdev_opts.bdev_io_cache_size = 64;
1429 : 4 : ut_init_bdev(&bdev_opts);
1430 : :
1431 : 4 : bdev = allocate_bdev("bdev0");
1432 : :
1433 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
1434 : 4 : CU_ASSERT(rc == 0);
1435 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1436 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1437 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
1438 : 4 : CU_ASSERT(io_ch != NULL);
1439 : :
1440 : 4 : bdev->optimal_io_boundary = 16;
1441 : 4 : bdev->split_on_optimal_io_boundary = false;
1442 : :
1443 : 4 : g_io_done = false;
1444 : :
1445 : : /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
1446 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
1447 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
1448 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1449 : :
1450 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1451 : 4 : CU_ASSERT(rc == 0);
1452 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1453 : :
1454 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1455 : 4 : stub_complete_io(1);
1456 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1457 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1458 : :
1459 : 4 : bdev->split_on_optimal_io_boundary = true;
1460 : 4 : bdev->md_interleave = false;
1461 : 4 : bdev->md_len = 8;
1462 : :
1463 : : /* Now test that a single-vector command is split correctly.
1464 : : * Offset 14, length 8, payload 0xF000
1465 : : * Child - Offset 14, length 2, payload 0xF000
1466 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1467 : : *
1468 : : * Set up the expected values before calling spdk_bdev_read_blocks
1469 : : */
1470 : 4 : g_io_done = false;
1471 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1472 : 4 : expected_io->md_buf = md_buf;
1473 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1474 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1475 : :
1476 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1477 : 4 : expected_io->md_buf = md_buf + 2 * 8;
1478 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1479 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1480 : :
1481 : : /* spdk_bdev_read_blocks will submit the first child immediately. */
1482 : 4 : rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf,
1483 : : 14, 8, io_done, NULL);
1484 : 4 : CU_ASSERT(rc == 0);
1485 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1486 : :
1487 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1488 : 4 : stub_complete_io(2);
1489 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1490 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1491 : :
1492 : : /* Now set up a more complex, multi-vector command that needs to be split,
1493 : : * including splitting iovecs.
1494 : : */
1495 : 4 : iov[0].iov_base = (void *)0x10000;
1496 : 4 : iov[0].iov_len = 512;
1497 : 4 : iov[1].iov_base = (void *)0x20000;
1498 : 4 : iov[1].iov_len = 20 * 512;
1499 : 4 : iov[2].iov_base = (void *)0x30000;
1500 : 4 : iov[2].iov_len = 11 * 512;
1501 : :
1502 : 4 : g_io_done = false;
1503 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1504 : 4 : expected_io->md_buf = md_buf;
1505 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1506 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1507 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1508 : :
1509 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1510 : 4 : expected_io->md_buf = md_buf + 2 * 8;
1511 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1512 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1513 : :
1514 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1515 : 4 : expected_io->md_buf = md_buf + 18 * 8;
1516 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1517 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1518 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1519 : :
1520 : 4 : rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf,
1521 : : 14, 32, io_done, NULL);
1522 : 4 : CU_ASSERT(rc == 0);
1523 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1524 : :
1525 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
1526 : 4 : stub_complete_io(3);
1527 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1528 : :
1529 : : /* Test multi vector command that needs to be split by strip and then needs to be
1530 : : * split further due to the capacity of child iovs.
1531 : : */
1532 [ + + ]: 260 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) {
1533 : 256 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1534 : 256 : iov[i].iov_len = 512;
1535 : 64 : }
1536 : :
1537 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1538 : 4 : g_io_done = false;
1539 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV,
1540 : : SPDK_BDEV_IO_NUM_CHILD_IOV);
1541 : 4 : expected_io->md_buf = md_buf;
1542 [ + + ]: 132 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1543 : 128 : ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
1544 : 32 : }
1545 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1546 : :
1547 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1548 : : SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV);
1549 : 4 : expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8;
1550 [ + + ]: 132 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1551 : 160 : ut_expected_io_set_iov(expected_io, i,
1552 : 128 : (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
1553 : 32 : }
1554 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1555 : :
1556 : 4 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf,
1557 : : 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1558 : 4 : CU_ASSERT(rc == 0);
1559 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1560 : :
1561 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1562 : 4 : stub_complete_io(1);
1563 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1564 : :
1565 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1566 : 4 : stub_complete_io(1);
1567 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1568 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1569 : :
1570 : : /* Test multi vector command that needs to be split by strip and then needs to be
1571 : : * split further due to the capacity of child iovs. In this case, the length of
1572 : : * the rest of iovec array with an I/O boundary is the multiple of block size.
1573 : : */
1574 : :
1575 : : /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary
1576 : : * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs.
1577 : : */
1578 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1579 : 120 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1580 : 120 : iov[i].iov_len = 512;
1581 : 30 : }
1582 [ + + ]: 12 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1583 : 8 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1584 : 8 : iov[i].iov_len = 256;
1585 : 2 : }
1586 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1587 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512;
1588 : :
1589 : : /* Add an extra iovec to trigger split */
1590 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1591 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1592 : :
1593 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1594 : 4 : g_io_done = false;
1595 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1596 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV);
1597 : 4 : expected_io->md_buf = md_buf;
1598 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1599 : 150 : ut_expected_io_set_iov(expected_io, i,
1600 : 120 : (void *)((i + 1) * 0x10000), 512);
1601 : 30 : }
1602 [ + + ]: 12 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1603 : 10 : ut_expected_io_set_iov(expected_io, i,
1604 : 8 : (void *)((i + 1) * 0x10000), 256);
1605 : 2 : }
1606 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1607 : :
1608 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1,
1609 : : 1, 1);
1610 : 4 : expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8;
1611 : 4 : ut_expected_io_set_iov(expected_io, 0,
1612 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512);
1613 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1614 : :
1615 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1616 : : 1, 1);
1617 : 4 : expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8;
1618 : 4 : ut_expected_io_set_iov(expected_io, 0,
1619 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1620 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1621 : :
1622 : 4 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf,
1623 : : 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1624 : 4 : CU_ASSERT(rc == 0);
1625 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1626 : :
1627 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1628 : 4 : stub_complete_io(1);
1629 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1630 : :
1631 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1632 : 4 : stub_complete_io(2);
1633 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1634 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1635 : :
1636 : : /* Test multi vector command that needs to be split by strip and then needs to be
1637 : : * split further due to the capacity of child iovs, the child request offset should
1638 : : * be rewind to last aligned offset and go success without error.
1639 : : */
1640 [ + + ]: 128 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1641 : 124 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1642 : 124 : iov[i].iov_len = 512;
1643 : 31 : }
1644 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000);
1645 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1646 : :
1647 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1648 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
1649 : :
1650 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1651 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1652 : :
1653 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1654 : 4 : g_io_done = false;
1655 : 4 : g_io_status = 0;
1656 : : /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */
1657 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1658 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1);
1659 : 4 : expected_io->md_buf = md_buf;
1660 [ + + ]: 128 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1661 : 155 : ut_expected_io_set_iov(expected_io, i,
1662 : 124 : (void *)((i + 1) * 0x10000), 512);
1663 : 31 : }
1664 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1665 : : /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */
1666 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1,
1667 : : 1, 2);
1668 : 4 : expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8;
1669 : 4 : ut_expected_io_set_iov(expected_io, 0,
1670 : : (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256);
1671 : 4 : ut_expected_io_set_iov(expected_io, 1,
1672 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256);
1673 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1674 : : /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */
1675 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1676 : : 1, 1);
1677 : 4 : expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8;
1678 : 4 : ut_expected_io_set_iov(expected_io, 0,
1679 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1680 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1681 : :
1682 : 4 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf,
1683 : : 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1684 : 4 : CU_ASSERT(rc == 0);
1685 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1686 : :
1687 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1688 : 4 : stub_complete_io(1);
1689 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1690 : :
1691 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1692 : 4 : stub_complete_io(2);
1693 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1694 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1695 : :
1696 : : /* Test multi vector command that needs to be split due to the IO boundary and
1697 : : * the capacity of child iovs. Especially test the case when the command is
1698 : : * split due to the capacity of child iovs, the tail address is not aligned with
1699 : : * block size and is rewinded to the aligned address.
1700 : : *
1701 : : * The iovecs used in read request is complex but is based on the data
1702 : : * collected in the real issue. We change the base addresses but keep the lengths
1703 : : * not to loose the credibility of the test.
1704 : : */
1705 : 4 : bdev->optimal_io_boundary = 128;
1706 : 4 : g_io_done = false;
1707 : 4 : g_io_status = 0;
1708 : :
1709 [ + + ]: 128 : for (i = 0; i < 31; i++) {
1710 : 124 : iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20));
1711 : 124 : iov[i].iov_len = 1024;
1712 : 31 : }
1713 : 4 : iov[31].iov_base = (void *)0xFEED1F00000;
1714 : 4 : iov[31].iov_len = 32768;
1715 : 4 : iov[32].iov_base = (void *)0xFEED2000000;
1716 : 4 : iov[32].iov_len = 160;
1717 : 4 : iov[33].iov_base = (void *)0xFEED2100000;
1718 : 4 : iov[33].iov_len = 4096;
1719 : 4 : iov[34].iov_base = (void *)0xFEED2200000;
1720 : 4 : iov[34].iov_len = 4096;
1721 : 4 : iov[35].iov_base = (void *)0xFEED2300000;
1722 : 4 : iov[35].iov_len = 4096;
1723 : 4 : iov[36].iov_base = (void *)0xFEED2400000;
1724 : 4 : iov[36].iov_len = 4096;
1725 : 4 : iov[37].iov_base = (void *)0xFEED2500000;
1726 : 4 : iov[37].iov_len = 4096;
1727 : 4 : iov[38].iov_base = (void *)0xFEED2600000;
1728 : 4 : iov[38].iov_len = 4096;
1729 : 4 : iov[39].iov_base = (void *)0xFEED2700000;
1730 : 4 : iov[39].iov_len = 4096;
1731 : 4 : iov[40].iov_base = (void *)0xFEED2800000;
1732 : 4 : iov[40].iov_len = 4096;
1733 : 4 : iov[41].iov_base = (void *)0xFEED2900000;
1734 : 4 : iov[41].iov_len = 4096;
1735 : 4 : iov[42].iov_base = (void *)0xFEED2A00000;
1736 : 4 : iov[42].iov_len = 4096;
1737 : 4 : iov[43].iov_base = (void *)0xFEED2B00000;
1738 : 4 : iov[43].iov_len = 12288;
1739 : 4 : iov[44].iov_base = (void *)0xFEED2C00000;
1740 : 4 : iov[44].iov_len = 8192;
1741 : 4 : iov[45].iov_base = (void *)0xFEED2F00000;
1742 : 4 : iov[45].iov_len = 4096;
1743 : 4 : iov[46].iov_base = (void *)0xFEED3000000;
1744 : 4 : iov[46].iov_len = 4096;
1745 : 4 : iov[47].iov_base = (void *)0xFEED3100000;
1746 : 4 : iov[47].iov_len = 4096;
1747 : 4 : iov[48].iov_base = (void *)0xFEED3200000;
1748 : 4 : iov[48].iov_len = 24576;
1749 : 4 : iov[49].iov_base = (void *)0xFEED3300000;
1750 : 4 : iov[49].iov_len = 16384;
1751 : 4 : iov[50].iov_base = (void *)0xFEED3400000;
1752 : 4 : iov[50].iov_len = 12288;
1753 : 4 : iov[51].iov_base = (void *)0xFEED3500000;
1754 : 4 : iov[51].iov_len = 4096;
1755 : 4 : iov[52].iov_base = (void *)0xFEED3600000;
1756 : 4 : iov[52].iov_len = 4096;
1757 : 4 : iov[53].iov_base = (void *)0xFEED3700000;
1758 : 4 : iov[53].iov_len = 4096;
1759 : 4 : iov[54].iov_base = (void *)0xFEED3800000;
1760 : 4 : iov[54].iov_len = 28672;
1761 : 4 : iov[55].iov_base = (void *)0xFEED3900000;
1762 : 4 : iov[55].iov_len = 20480;
1763 : 4 : iov[56].iov_base = (void *)0xFEED3A00000;
1764 : 4 : iov[56].iov_len = 4096;
1765 : 4 : iov[57].iov_base = (void *)0xFEED3B00000;
1766 : 4 : iov[57].iov_len = 12288;
1767 : 4 : iov[58].iov_base = (void *)0xFEED3C00000;
1768 : 4 : iov[58].iov_len = 4096;
1769 : 4 : iov[59].iov_base = (void *)0xFEED3D00000;
1770 : 4 : iov[59].iov_len = 4096;
1771 : 4 : iov[60].iov_base = (void *)0xFEED3E00000;
1772 : 4 : iov[60].iov_len = 352;
1773 : :
1774 : : /* The 1st child IO must be from iov[0] to iov[31] split by the capacity
1775 : : * of child iovs,
1776 : : */
1777 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32);
1778 : 4 : expected_io->md_buf = md_buf;
1779 [ + + ]: 132 : for (i = 0; i < 32; i++) {
1780 : 128 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
1781 : 32 : }
1782 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1783 : :
1784 : : /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33]
1785 : : * split by the IO boundary requirement.
1786 : : */
1787 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2);
1788 : 4 : expected_io->md_buf = md_buf + 126 * 8;
1789 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len);
1790 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864);
1791 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1792 : :
1793 : : /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to
1794 : : * the first 864 bytes of iov[46] split by the IO boundary requirement.
1795 : : */
1796 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14);
1797 : 4 : expected_io->md_buf = md_buf + 128 * 8;
1798 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864),
1799 : 4 : iov[33].iov_len - 864);
1800 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len);
1801 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len);
1802 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len);
1803 : 4 : ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len);
1804 : 4 : ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len);
1805 : 4 : ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len);
1806 : 4 : ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len);
1807 : 4 : ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len);
1808 : 4 : ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len);
1809 : 4 : ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len);
1810 : 4 : ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len);
1811 : 4 : ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len);
1812 : 4 : ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864);
1813 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1814 : :
1815 : : /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the
1816 : : * first 864 bytes of iov[52] split by the IO boundary requirement.
1817 : : */
1818 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7);
1819 : 4 : expected_io->md_buf = md_buf + 256 * 8;
1820 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864),
1821 : 4 : iov[46].iov_len - 864);
1822 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len);
1823 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len);
1824 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len);
1825 : 4 : ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len);
1826 : 4 : ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len);
1827 : 4 : ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864);
1828 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1829 : :
1830 : : /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to
1831 : : * the first 4096 bytes of iov[57] split by the IO boundary requirement.
1832 : : */
1833 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6);
1834 : 4 : expected_io->md_buf = md_buf + 384 * 8;
1835 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864),
1836 : 4 : iov[52].iov_len - 864);
1837 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len);
1838 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len);
1839 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len);
1840 : 4 : ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len);
1841 : 4 : ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960);
1842 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1843 : :
1844 : : /* The 6th child IO must be from the remaining 7328 bytes of iov[57]
1845 : : * to the first 3936 bytes of iov[58] split by the capacity of child iovs.
1846 : : */
1847 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3);
1848 : 4 : expected_io->md_buf = md_buf + 512 * 8;
1849 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960),
1850 : 4 : iov[57].iov_len - 4960);
1851 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len);
1852 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936);
1853 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1854 : :
1855 : : /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */
1856 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2);
1857 : 4 : expected_io->md_buf = md_buf + 542 * 8;
1858 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936),
1859 : 4 : iov[59].iov_len - 3936);
1860 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len);
1861 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1862 : :
1863 : 4 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf,
1864 : : 0, 543, io_done, NULL);
1865 : 4 : CU_ASSERT(rc == 0);
1866 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1867 : :
1868 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1869 : 4 : stub_complete_io(1);
1870 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1871 : :
1872 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1873 : 4 : stub_complete_io(5);
1874 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1875 : :
1876 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1877 : 4 : stub_complete_io(1);
1878 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1879 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1880 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1881 : :
1882 : : /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be
1883 : : * split, so test that.
1884 : : */
1885 : 4 : bdev->optimal_io_boundary = 15;
1886 : 4 : g_io_done = false;
1887 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1888 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1889 : :
1890 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1891 : 4 : CU_ASSERT(rc == 0);
1892 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1893 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1894 : 4 : stub_complete_io(1);
1895 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1896 : :
1897 : : /* Test an UNMAP. This should also not be split. */
1898 : 4 : bdev->optimal_io_boundary = 16;
1899 : 4 : g_io_done = false;
1900 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1901 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1902 : :
1903 : 4 : rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1904 : 4 : CU_ASSERT(rc == 0);
1905 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1906 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1907 : 4 : stub_complete_io(1);
1908 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1909 : :
1910 : : /* Test a FLUSH. This should also not be split. */
1911 : 4 : bdev->optimal_io_boundary = 16;
1912 : 4 : g_io_done = false;
1913 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1914 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1915 : :
1916 : 4 : rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1917 : 4 : CU_ASSERT(rc == 0);
1918 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1919 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1920 : 4 : stub_complete_io(1);
1921 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1922 : :
1923 : : /* Test a COPY. This should also not be split. */
1924 : 4 : bdev->optimal_io_boundary = 15;
1925 : 4 : g_io_done = false;
1926 : 4 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36);
1927 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1928 : :
1929 : 4 : rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL);
1930 : 4 : CU_ASSERT(rc == 0);
1931 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1932 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1933 : 4 : stub_complete_io(1);
1934 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1935 : :
1936 : 4 : CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1937 : :
1938 : : /* Children requests return an error status */
1939 : 4 : bdev->optimal_io_boundary = 16;
1940 : 4 : iov[0].iov_base = (void *)0x10000;
1941 : 4 : iov[0].iov_len = 512 * 64;
1942 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1943 : 4 : g_io_done = false;
1944 : 4 : g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1945 : :
1946 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL);
1947 : 4 : CU_ASSERT(rc == 0);
1948 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1949 : 4 : stub_complete_io(4);
1950 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1951 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1952 : 4 : stub_complete_io(1);
1953 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1954 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1955 : :
1956 : : /* Test if a multi vector command terminated with failure before continuing
1957 : : * splitting process when one of child I/O failed.
1958 : : * The multi vector command is as same as the above that needs to be split by strip
1959 : : * and then needs to be split further due to the capacity of child iovs.
1960 : : */
1961 [ + + ]: 128 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1962 : 124 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1963 : 124 : iov[i].iov_len = 512;
1964 : 31 : }
1965 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000);
1966 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1967 : :
1968 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1969 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
1970 : :
1971 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1972 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1973 : :
1974 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1975 : :
1976 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1977 : 4 : g_io_done = false;
1978 : 4 : g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1979 : :
1980 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0,
1981 : : SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1982 : 4 : CU_ASSERT(rc == 0);
1983 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1984 : :
1985 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1986 : 4 : stub_complete_io(1);
1987 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1988 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1989 : :
1990 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1991 : :
1992 : : /* for this test we will create the following conditions to hit the code path where
1993 : : * we are trying to send and IO following a split that has no iovs because we had to
1994 : : * trim them for alignment reasons.
1995 : : *
1996 : : * - 16K boundary, our IO will start at offset 0 with a length of 0x4200
1997 : : * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV
1998 : : * position 30 and overshoot by 0x2e.
1999 : : * - That means we'll send the IO and loop back to pick up the remaining bytes at
2000 : : * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e
2001 : : * which eliniates that vector so we just send the first split IO with 30 vectors
2002 : : * and let the completion pick up the last 2 vectors.
2003 : : */
2004 : 4 : bdev->optimal_io_boundary = 32;
2005 : 4 : bdev->split_on_optimal_io_boundary = true;
2006 : 4 : g_io_done = false;
2007 : :
2008 : : /* Init all parent IOVs to 0x212 */
2009 [ + + ]: 140 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) {
2010 : 136 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2011 : 136 : iov[i].iov_len = 0x212;
2012 : 34 : }
2013 : :
2014 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV,
2015 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1);
2016 : : /* expect 0-29 to be 1:1 with the parent iov */
2017 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2018 : 120 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
2019 : 30 : }
2020 : :
2021 : : /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment
2022 : : * where 0x1e is the amount we overshot the 16K boundary
2023 : : */
2024 : 5 : ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2,
2025 : 1 : (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4);
2026 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2027 : :
2028 : : /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was
2029 : : * shortened that take it to the next boundary and then a final one to get us to
2030 : : * 0x4200 bytes for the IO.
2031 : : */
2032 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
2033 : : 1, 2);
2034 : : /* position 30 picked up the remaining bytes to the next boundary */
2035 : 5 : ut_expected_io_set_iov(expected_io, 0,
2036 : 4 : (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e);
2037 : :
2038 : : /* position 31 picked the the rest of the transfer to get us to 0x4200 */
2039 : 5 : ut_expected_io_set_iov(expected_io, 1,
2040 : 1 : (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2);
2041 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2042 : :
2043 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0,
2044 : : SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
2045 : 4 : CU_ASSERT(rc == 0);
2046 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2047 : :
2048 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2049 : 4 : stub_complete_io(1);
2050 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2051 : :
2052 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2053 : 4 : stub_complete_io(1);
2054 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2055 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2056 : :
2057 : 4 : spdk_put_io_channel(io_ch);
2058 : 4 : spdk_bdev_close(desc);
2059 : 4 : free_bdev(bdev);
2060 : 4 : ut_fini_bdev();
2061 : 4 : }
2062 : :
2063 : : static void
2064 : 4 : bdev_io_max_size_and_segment_split_test(void)
2065 : : {
2066 : : struct spdk_bdev *bdev;
2067 : 4 : struct spdk_bdev_desc *desc = NULL;
2068 : : struct spdk_io_channel *io_ch;
2069 : 4 : struct spdk_bdev_opts bdev_opts = {};
2070 : 3 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
2071 : : struct ut_expected_io *expected_io;
2072 : : uint64_t i;
2073 : : int rc;
2074 : :
2075 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
2076 : 4 : bdev_opts.bdev_io_pool_size = 512;
2077 : 4 : bdev_opts.bdev_io_cache_size = 64;
2078 : 4 : bdev_opts.opts_size = sizeof(bdev_opts);
2079 : 4 : ut_init_bdev(&bdev_opts);
2080 : :
2081 : 4 : bdev = allocate_bdev("bdev0");
2082 : :
2083 : 4 : rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc);
2084 : 4 : CU_ASSERT(rc == 0);
2085 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
2086 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
2087 : 4 : CU_ASSERT(io_ch != NULL);
2088 : :
2089 : 4 : bdev->split_on_optimal_io_boundary = false;
2090 : 4 : bdev->optimal_io_boundary = 0;
2091 : :
2092 : : /* Case 0 max_num_segments == 0.
2093 : : * but segment size 2 * 512 > 512
2094 : : */
2095 : 4 : bdev->max_segment_size = 512;
2096 : 4 : bdev->max_num_segments = 0;
2097 : 4 : g_io_done = false;
2098 : :
2099 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2);
2100 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512);
2101 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512);
2102 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2103 : :
2104 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL);
2105 : 4 : CU_ASSERT(rc == 0);
2106 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2107 : :
2108 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2109 : 4 : stub_complete_io(1);
2110 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2111 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2112 : :
2113 : : /* Case 1 max_segment_size == 0
2114 : : * but iov num 2 > 1.
2115 : : */
2116 : 4 : bdev->max_segment_size = 0;
2117 : 4 : bdev->max_num_segments = 1;
2118 : 4 : g_io_done = false;
2119 : :
2120 : 4 : iov[0].iov_base = (void *)0x10000;
2121 : 4 : iov[0].iov_len = 512;
2122 : 4 : iov[1].iov_base = (void *)0x20000;
2123 : 4 : iov[1].iov_len = 8 * 512;
2124 : :
2125 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1);
2126 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len);
2127 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2128 : :
2129 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1);
2130 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len);
2131 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2132 : :
2133 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL);
2134 : 4 : CU_ASSERT(rc == 0);
2135 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2136 : :
2137 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2138 : 4 : stub_complete_io(2);
2139 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2140 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2141 : :
2142 : : /* Test that a non-vector command is split correctly.
2143 : : * Set up the expected values before calling spdk_bdev_read_blocks
2144 : : */
2145 : 4 : bdev->max_segment_size = 512;
2146 : 4 : bdev->max_num_segments = 1;
2147 : 4 : g_io_done = false;
2148 : :
2149 : : /* Child IO 0 */
2150 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1);
2151 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512);
2152 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2153 : :
2154 : : /* Child IO 1 */
2155 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1);
2156 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512);
2157 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2158 : :
2159 : : /* spdk_bdev_read_blocks will submit the first child immediately. */
2160 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL);
2161 : 4 : CU_ASSERT(rc == 0);
2162 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2163 : :
2164 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2165 : 4 : stub_complete_io(2);
2166 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2167 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2168 : :
2169 : : /* Now set up a more complex, multi-vector command that needs to be split,
2170 : : * including splitting iovecs.
2171 : : */
2172 : 4 : bdev->max_segment_size = 2 * 512;
2173 : 4 : bdev->max_num_segments = 1;
2174 : 4 : g_io_done = false;
2175 : :
2176 : 4 : iov[0].iov_base = (void *)0x10000;
2177 : 4 : iov[0].iov_len = 2 * 512;
2178 : 4 : iov[1].iov_base = (void *)0x20000;
2179 : 4 : iov[1].iov_len = 4 * 512;
2180 : 4 : iov[2].iov_base = (void *)0x30000;
2181 : 4 : iov[2].iov_len = 6 * 512;
2182 : :
2183 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
2184 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2);
2185 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2186 : :
2187 : : /* Split iov[1].size to 2 iov entries then split the segments */
2188 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1);
2189 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2);
2190 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2191 : :
2192 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1);
2193 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2);
2194 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2195 : :
2196 : : /* Split iov[2].size to 3 iov entries then split the segments */
2197 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1);
2198 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2);
2199 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2200 : :
2201 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1);
2202 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2);
2203 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2204 : :
2205 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1);
2206 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2);
2207 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2208 : :
2209 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL);
2210 : 4 : CU_ASSERT(rc == 0);
2211 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2212 : :
2213 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
2214 : 4 : stub_complete_io(6);
2215 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2216 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2217 : :
2218 : : /* Test multi vector command that needs to be split by strip and then needs to be
2219 : : * split further due to the capacity of parent IO child iovs.
2220 : : */
2221 : 4 : bdev->max_segment_size = 512;
2222 : 4 : bdev->max_num_segments = 1;
2223 : 4 : g_io_done = false;
2224 : :
2225 [ + + ]: 132 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
2226 : 128 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2227 : 128 : iov[i].iov_len = 512 * 2;
2228 : 32 : }
2229 : :
2230 : : /* Each input iov.size is split into 2 iovs,
2231 : : * half of the input iov can fill all child iov entries of a single IO.
2232 : : */
2233 [ + + ]: 68 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) {
2234 : 64 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1);
2235 : 64 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512);
2236 : 64 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2237 : :
2238 : 64 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1);
2239 : 64 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512);
2240 : 64 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2241 : 16 : }
2242 : :
2243 : : /* The remaining iov is split in the second round */
2244 [ + + ]: 68 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
2245 : 64 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1);
2246 : 64 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512);
2247 : 64 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2248 : :
2249 : 64 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1);
2250 : 64 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512);
2251 : 64 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2252 : 16 : }
2253 : :
2254 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0,
2255 : : SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
2256 : 4 : CU_ASSERT(rc == 0);
2257 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2258 : :
2259 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV);
2260 : 4 : stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV);
2261 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2262 : :
2263 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV);
2264 : 4 : stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV);
2265 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2266 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2267 : :
2268 : : /* A wrong case, a child IO that is divided does
2269 : : * not meet the principle of multiples of block size,
2270 : : * and exits with error
2271 : : */
2272 : 4 : bdev->max_segment_size = 512;
2273 : 4 : bdev->max_num_segments = 1;
2274 : 4 : g_io_done = false;
2275 : :
2276 : 4 : iov[0].iov_base = (void *)0x10000;
2277 : 4 : iov[0].iov_len = 512 + 256;
2278 : 4 : iov[1].iov_base = (void *)0x20000;
2279 : 4 : iov[1].iov_len = 256;
2280 : :
2281 : : /* iov[0] is split to 512 and 256.
2282 : : * 256 is less than a block size, and it is found
2283 : : * in the next round of split that it is the first child IO smaller than
2284 : : * the block size, so the error exit
2285 : : */
2286 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1);
2287 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512);
2288 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2289 : :
2290 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL);
2291 : 4 : CU_ASSERT(rc == 0);
2292 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2293 : :
2294 : : /* First child IO is OK */
2295 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2296 : 4 : stub_complete_io(1);
2297 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2298 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2299 : :
2300 : : /* error exit */
2301 : 4 : stub_complete_io(1);
2302 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2303 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
2304 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2305 : :
2306 : : /* Test multi vector command that needs to be split by strip and then needs to be
2307 : : * split further due to the capacity of child iovs.
2308 : : *
2309 : : * In this case, the last two iovs need to be split, but it will exceed the capacity
2310 : : * of child iovs, so it needs to wait until the first batch completed.
2311 : : */
2312 : 4 : bdev->max_segment_size = 512;
2313 : 4 : bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV;
2314 : 4 : g_io_done = false;
2315 : :
2316 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2317 : 120 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2318 : 120 : iov[i].iov_len = 512;
2319 : 30 : }
2320 [ + + ]: 12 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
2321 : 8 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2322 : 8 : iov[i].iov_len = 512 * 2;
2323 : 2 : }
2324 : :
2325 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
2326 : : SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV);
2327 : : /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */
2328 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2329 : 120 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
2330 : 30 : }
2331 : : /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */
2332 : 4 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512);
2333 : 4 : ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512);
2334 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2335 : :
2336 : : /* Child iov entries exceed the max num of parent IO so split it in next round */
2337 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2);
2338 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512);
2339 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512);
2340 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2341 : :
2342 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0,
2343 : : SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL);
2344 : 4 : CU_ASSERT(rc == 0);
2345 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2346 : :
2347 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2348 : 4 : stub_complete_io(1);
2349 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2350 : :
2351 : : /* Next round */
2352 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2353 : 4 : stub_complete_io(1);
2354 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2355 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2356 : :
2357 : : /* This case is similar to the previous one, but the io composed of
2358 : : * the last few entries of child iov is not enough for a blocklen, so they
2359 : : * cannot be put into this IO, but wait until the next time.
2360 : : */
2361 : 4 : bdev->max_segment_size = 512;
2362 : 4 : bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV;
2363 : 4 : g_io_done = false;
2364 : :
2365 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2366 : 120 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2367 : 120 : iov[i].iov_len = 512;
2368 : 30 : }
2369 : :
2370 [ + + ]: 20 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) {
2371 : 16 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2372 : 16 : iov[i].iov_len = 128;
2373 : 4 : }
2374 : :
2375 : : /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2.
2376 : : * Because the left 2 iov is not enough for a blocklen.
2377 : : */
2378 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
2379 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2);
2380 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2381 : 120 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
2382 : 30 : }
2383 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2384 : :
2385 : : /* The second child io waits until the end of the first child io before executing.
2386 : : * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO.
2387 : : * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2
2388 : : */
2389 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2,
2390 : : 1, 4);
2391 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len);
2392 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len);
2393 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len);
2394 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len);
2395 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2396 : :
2397 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0,
2398 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL);
2399 : 4 : CU_ASSERT(rc == 0);
2400 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2401 : :
2402 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2403 : 4 : stub_complete_io(1);
2404 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2405 : :
2406 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2407 : 4 : stub_complete_io(1);
2408 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2409 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2410 : :
2411 : : /* A very complicated case. Each sg entry exceeds max_segment_size and
2412 : : * needs to be split. At the same time, child io must be a multiple of blocklen.
2413 : : * At the same time, child iovcnt exceeds parent iovcnt.
2414 : : */
2415 : 4 : bdev->max_segment_size = 512 + 128;
2416 : 4 : bdev->max_num_segments = 3;
2417 : 4 : g_io_done = false;
2418 : :
2419 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2420 : 120 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2421 : 120 : iov[i].iov_len = 512 + 256;
2422 : 30 : }
2423 : :
2424 [ + + ]: 20 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) {
2425 : 16 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2426 : 16 : iov[i].iov_len = 512 + 128;
2427 : 4 : }
2428 : :
2429 : : /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries.
2430 : : * Consume 4 parent IO iov entries per for() round and 6 block size.
2431 : : * Generate 9 child IOs.
2432 : : */
2433 [ + + ]: 16 : for (i = 0; i < 3; i++) {
2434 : 12 : uint32_t j = i * 4;
2435 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3);
2436 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640);
2437 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128);
2438 : 12 : ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256);
2439 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2440 : :
2441 : : /* Child io must be a multiple of blocklen
2442 : : * iov[j + 2] must be split. If the third entry is also added,
2443 : : * the multiple of blocklen cannot be guaranteed. But it still
2444 : : * occupies one iov entry of the parent child iov.
2445 : : */
2446 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2);
2447 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512);
2448 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512);
2449 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2450 : :
2451 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3);
2452 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256);
2453 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640);
2454 : 12 : ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128);
2455 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2456 : 3 : }
2457 : :
2458 : : /* Child iov position at 27, the 10th child IO
2459 : : * iov entry index is 3 * 4 and offset is 3 * 6
2460 : : */
2461 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3);
2462 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640);
2463 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128);
2464 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256);
2465 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2466 : :
2467 : : /* Child iov position at 30, the 11th child IO */
2468 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2);
2469 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512);
2470 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512);
2471 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2472 : :
2473 : : /* The 2nd split round and iovpos is 0, the 12th child IO */
2474 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3);
2475 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256);
2476 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640);
2477 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128);
2478 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2479 : :
2480 : : /* Consume 9 child IOs and 27 child iov entries.
2481 : : * Consume 4 parent IO iov entries per for() round and 6 block size.
2482 : : * Parent IO iov index start from 16 and block offset start from 24
2483 : : */
2484 [ + + ]: 16 : for (i = 0; i < 3; i++) {
2485 : 12 : uint32_t j = i * 4 + 16;
2486 : 12 : uint32_t offset = i * 6 + 24;
2487 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3);
2488 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640);
2489 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128);
2490 : 12 : ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256);
2491 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2492 : :
2493 : : /* Child io must be a multiple of blocklen
2494 : : * iov[j + 2] must be split. If the third entry is also added,
2495 : : * the multiple of blocklen cannot be guaranteed. But it still
2496 : : * occupies one iov entry of the parent child iov.
2497 : : */
2498 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2);
2499 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512);
2500 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512);
2501 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2502 : :
2503 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3);
2504 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256);
2505 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640);
2506 : 12 : ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128);
2507 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2508 : 3 : }
2509 : :
2510 : : /* The 22th child IO, child iov position at 30 */
2511 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1);
2512 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512);
2513 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2514 : :
2515 : : /* The third round */
2516 : : /* Here is the 23nd child IO and child iovpos is 0 */
2517 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3);
2518 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256);
2519 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640);
2520 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128);
2521 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2522 : :
2523 : : /* The 24th child IO */
2524 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3);
2525 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640);
2526 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640);
2527 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256);
2528 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2529 : :
2530 : : /* The 25th child IO */
2531 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2);
2532 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384);
2533 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640);
2534 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2535 : :
2536 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0,
2537 : : 50, io_done, NULL);
2538 : 4 : CU_ASSERT(rc == 0);
2539 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2540 : :
2541 : : /* Parent IO supports up to 32 child iovs, so it is calculated that
2542 : : * a maximum of 11 IOs can be split at a time, and the
2543 : : * splitting will continue after the first batch is over.
2544 : : */
2545 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11);
2546 : 4 : stub_complete_io(11);
2547 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2548 : :
2549 : : /* The 2nd round */
2550 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11);
2551 : 4 : stub_complete_io(11);
2552 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2553 : :
2554 : : /* The last round */
2555 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2556 : 4 : stub_complete_io(3);
2557 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2558 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2559 : :
2560 : : /* Test an WRITE_ZEROES. This should also not be split. */
2561 : 4 : bdev->max_segment_size = 512;
2562 : 4 : bdev->max_num_segments = 1;
2563 : 4 : g_io_done = false;
2564 : :
2565 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
2566 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2567 : :
2568 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
2569 : 4 : CU_ASSERT(rc == 0);
2570 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2571 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2572 : 4 : stub_complete_io(1);
2573 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2574 : :
2575 : : /* Test an UNMAP. This should also not be split. */
2576 : 4 : g_io_done = false;
2577 : :
2578 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0);
2579 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2580 : :
2581 : 4 : rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL);
2582 : 4 : CU_ASSERT(rc == 0);
2583 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2584 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2585 : 4 : stub_complete_io(1);
2586 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2587 : :
2588 : : /* Test a FLUSH. This should also not be split. */
2589 : 4 : g_io_done = false;
2590 : :
2591 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0);
2592 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2593 : :
2594 : 4 : rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 4, io_done, NULL);
2595 : 4 : CU_ASSERT(rc == 0);
2596 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2597 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2598 : 4 : stub_complete_io(1);
2599 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2600 : :
2601 : : /* Test a COPY. This should also not be split. */
2602 : 4 : g_io_done = false;
2603 : :
2604 : 4 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36);
2605 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2606 : :
2607 : 4 : rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL);
2608 : 4 : CU_ASSERT(rc == 0);
2609 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2610 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2611 : 4 : stub_complete_io(1);
2612 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2613 : :
2614 : : /* Test that IOs are split on max_rw_size */
2615 : 4 : bdev->max_rw_size = 2;
2616 : 4 : bdev->max_segment_size = 0;
2617 : 4 : bdev->max_num_segments = 0;
2618 : 4 : g_io_done = false;
2619 : :
2620 : : /* 5 blocks in a contiguous buffer */
2621 : 4 : iov[0].iov_base = (void *)0x10000;
2622 : 4 : iov[0].iov_len = 5 * 512;
2623 : :
2624 : : /* First: offset=0, num_blocks=2 */
2625 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1);
2626 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512);
2627 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2628 : : /* Second: offset=2, num_blocks=2 */
2629 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 2, 1);
2630 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 2 * 512);
2631 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2632 : : /* Third: offset=4, num_blocks=1 */
2633 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1);
2634 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 4 * 512, 512);
2635 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2636 : :
2637 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 5, io_done, NULL);
2638 : 4 : CU_ASSERT(rc == 0);
2639 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2640 : :
2641 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2642 : 4 : stub_complete_io(3);
2643 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2644 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2645 : :
2646 : : /* Check splitting on both max_rw_size + max_num_segments */
2647 : 4 : bdev->max_rw_size = 2;
2648 : 4 : bdev->max_num_segments = 2;
2649 : 4 : bdev->max_segment_size = 0;
2650 : 4 : g_io_done = false;
2651 : :
2652 : : /* 5 blocks split across 4 iovs */
2653 : 4 : iov[0].iov_base = (void *)0x10000;
2654 : 4 : iov[0].iov_len = 3 * 512;
2655 : 4 : iov[1].iov_base = (void *)0x20000;
2656 : 4 : iov[1].iov_len = 256;
2657 : 4 : iov[2].iov_base = (void *)0x30000;
2658 : 4 : iov[2].iov_len = 256;
2659 : 4 : iov[3].iov_base = (void *)0x40000;
2660 : 4 : iov[3].iov_len = 512;
2661 : :
2662 : : /* First: offset=0, num_blocks=2, iovcnt=1 */
2663 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1);
2664 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512);
2665 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2666 : : /* Second: offset=2, num_blocks=1, iovcnt=1 (max_segment_size prevents from submitting
2667 : : * the rest of iov[0], and iov[1]+iov[2])
2668 : : */
2669 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 1, 1);
2670 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 512);
2671 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2672 : : /* Third: offset=3, num_blocks=1, iovcnt=2 (iov[1]+iov[2]) */
2673 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 3, 1, 2);
2674 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x20000, 256);
2675 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 256);
2676 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2677 : : /* Fourth: offset=4, num_blocks=1, iovcnt=1 (iov[3]) */
2678 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1);
2679 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x40000, 512);
2680 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2681 : :
2682 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 4, 0, 5, io_done, NULL);
2683 : 4 : CU_ASSERT(rc == 0);
2684 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2685 : :
2686 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
2687 : 4 : stub_complete_io(4);
2688 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2689 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2690 : :
2691 : : /* Check splitting on both max_rw_size + max_segment_size */
2692 : 4 : bdev->max_rw_size = 2;
2693 : 4 : bdev->max_segment_size = 512;
2694 : 4 : bdev->max_num_segments = 0;
2695 : 4 : g_io_done = false;
2696 : :
2697 : : /* 6 blocks in a contiguous buffer */
2698 : 4 : iov[0].iov_base = (void *)0x10000;
2699 : 4 : iov[0].iov_len = 6 * 512;
2700 : :
2701 : : /* We expect 3 IOs each with 2 blocks and 2 iovs */
2702 [ + + ]: 16 : for (i = 0; i < 3; ++i) {
2703 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 2, 2);
2704 : 12 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 2 * 512, 512);
2705 : 12 : ut_expected_io_set_iov(expected_io, 1, (void *)0x10000 + i * 2 * 512 + 512, 512);
2706 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2707 : 3 : }
2708 : :
2709 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 6, io_done, NULL);
2710 : 4 : CU_ASSERT(rc == 0);
2711 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2712 : :
2713 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2714 : 4 : stub_complete_io(3);
2715 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2716 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2717 : :
2718 : : /* Check splitting on max_rw_size limited by SPDK_BDEV_IO_NUM_CHILD_IOV */
2719 : 4 : bdev->max_rw_size = 1;
2720 : 4 : bdev->max_segment_size = 0;
2721 : 4 : bdev->max_num_segments = 0;
2722 : 4 : g_io_done = false;
2723 : :
2724 : : /* SPDK_BDEV_IO_NUM_CHILD_IOV + 1 blocks */
2725 : 4 : iov[0].iov_base = (void *)0x10000;
2726 : 4 : iov[0].iov_len = (SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 512;
2727 : :
2728 : : /* We expect SPDK_BDEV_IO_NUM_CHILD_IOV + 1 IOs each with a single iov */
2729 [ + + ]: 16 : for (i = 0; i < 3; ++i) {
2730 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i, 1, 1);
2731 : 12 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 512, 512);
2732 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2733 : 3 : }
2734 : :
2735 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
2736 : 4 : CU_ASSERT(rc == 0);
2737 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2738 : :
2739 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV);
2740 : 4 : stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV);
2741 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2742 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2743 : 4 : stub_complete_io(1);
2744 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2745 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2746 : :
2747 : 4 : spdk_put_io_channel(io_ch);
2748 : 4 : spdk_bdev_close(desc);
2749 : 4 : free_bdev(bdev);
2750 : 4 : ut_fini_bdev();
2751 : 4 : }
2752 : :
2753 : : static void
2754 : 4 : bdev_io_mix_split_test(void)
2755 : : {
2756 : : struct spdk_bdev *bdev;
2757 : 4 : struct spdk_bdev_desc *desc = NULL;
2758 : : struct spdk_io_channel *io_ch;
2759 : 4 : struct spdk_bdev_opts bdev_opts = {};
2760 : 3 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
2761 : : struct ut_expected_io *expected_io;
2762 : : uint64_t i;
2763 : : int rc;
2764 : :
2765 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
2766 : 4 : bdev_opts.bdev_io_pool_size = 512;
2767 : 4 : bdev_opts.bdev_io_cache_size = 64;
2768 : 4 : ut_init_bdev(&bdev_opts);
2769 : :
2770 : 4 : bdev = allocate_bdev("bdev0");
2771 : :
2772 : 4 : rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc);
2773 : 4 : CU_ASSERT(rc == 0);
2774 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
2775 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
2776 : 4 : CU_ASSERT(io_ch != NULL);
2777 : :
2778 : : /* First case optimal_io_boundary == max_segment_size * max_num_segments */
2779 : 4 : bdev->split_on_optimal_io_boundary = true;
2780 : 4 : bdev->optimal_io_boundary = 16;
2781 : :
2782 : 4 : bdev->max_segment_size = 512;
2783 : 4 : bdev->max_num_segments = 16;
2784 : 4 : g_io_done = false;
2785 : :
2786 : : /* IO crossing the IO boundary requires split
2787 : : * Total 2 child IOs.
2788 : : */
2789 : :
2790 : : /* The 1st child IO split the segment_size to multiple segment entry */
2791 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2);
2792 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512);
2793 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512);
2794 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2795 : :
2796 : : /* The 2nd child IO split the segment_size to multiple segment entry */
2797 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2);
2798 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512);
2799 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512);
2800 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2801 : :
2802 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL);
2803 : 4 : CU_ASSERT(rc == 0);
2804 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2805 : :
2806 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2807 : 4 : stub_complete_io(2);
2808 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2809 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2810 : :
2811 : : /* Second case optimal_io_boundary > max_segment_size * max_num_segments */
2812 : 4 : bdev->max_segment_size = 15 * 512;
2813 : 4 : bdev->max_num_segments = 1;
2814 : 4 : g_io_done = false;
2815 : :
2816 : : /* IO crossing the IO boundary requires split.
2817 : : * The 1st child IO segment size exceeds the max_segment_size,
2818 : : * So 1st child IO will be split to multiple segment entry.
2819 : : * Then it split to 2 child IOs because of the max_num_segments.
2820 : : * Total 3 child IOs.
2821 : : */
2822 : :
2823 : : /* The first 2 IOs are in an IO boundary.
2824 : : * Because the optimal_io_boundary > max_segment_size * max_num_segments
2825 : : * So it split to the first 2 IOs.
2826 : : */
2827 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1);
2828 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15);
2829 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2830 : :
2831 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1);
2832 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512);
2833 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2834 : :
2835 : : /* The 3rd Child IO is because of the io boundary */
2836 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1);
2837 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2);
2838 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2839 : :
2840 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL);
2841 : 4 : CU_ASSERT(rc == 0);
2842 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2843 : :
2844 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2845 : 4 : stub_complete_io(3);
2846 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2847 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2848 : :
2849 : : /* Third case optimal_io_boundary < max_segment_size * max_num_segments */
2850 : 4 : bdev->max_segment_size = 17 * 512;
2851 : 4 : bdev->max_num_segments = 1;
2852 : 4 : g_io_done = false;
2853 : :
2854 : : /* IO crossing the IO boundary requires split.
2855 : : * Child IO does not split.
2856 : : * Total 2 child IOs.
2857 : : */
2858 : :
2859 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1);
2860 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16);
2861 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2862 : :
2863 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1);
2864 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2);
2865 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2866 : :
2867 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL);
2868 : 4 : CU_ASSERT(rc == 0);
2869 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2870 : :
2871 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2872 : 4 : stub_complete_io(2);
2873 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2874 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2875 : :
2876 : : /* Now set up a more complex, multi-vector command that needs to be split,
2877 : : * including splitting iovecs.
2878 : : * optimal_io_boundary < max_segment_size * max_num_segments
2879 : : */
2880 : 4 : bdev->max_segment_size = 3 * 512;
2881 : 4 : bdev->max_num_segments = 6;
2882 : 4 : g_io_done = false;
2883 : :
2884 : 4 : iov[0].iov_base = (void *)0x10000;
2885 : 4 : iov[0].iov_len = 4 * 512;
2886 : 4 : iov[1].iov_base = (void *)0x20000;
2887 : 4 : iov[1].iov_len = 4 * 512;
2888 : 4 : iov[2].iov_base = (void *)0x30000;
2889 : 4 : iov[2].iov_len = 10 * 512;
2890 : :
2891 : : /* IO crossing the IO boundary requires split.
2892 : : * The 1st child IO segment size exceeds the max_segment_size and after
2893 : : * splitting segment_size, the num_segments exceeds max_num_segments.
2894 : : * So 1st child IO will be split to 2 child IOs.
2895 : : * Total 3 child IOs.
2896 : : */
2897 : :
2898 : : /* The first 2 IOs are in an IO boundary.
2899 : : * After splitting segment size the segment num exceeds.
2900 : : * So it splits to 2 child IOs.
2901 : : */
2902 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6);
2903 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3);
2904 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512);
2905 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3);
2906 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512);
2907 : 4 : ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3);
2908 : 4 : ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3);
2909 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2910 : :
2911 : : /* The 2nd child IO has the left segment entry */
2912 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
2913 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2);
2914 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2915 : :
2916 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1);
2917 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2);
2918 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2919 : :
2920 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL);
2921 : 4 : CU_ASSERT(rc == 0);
2922 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2923 : :
2924 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2925 : 4 : stub_complete_io(3);
2926 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2927 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2928 : :
2929 : : /* A very complicated case. Each sg entry exceeds max_segment_size
2930 : : * and split on io boundary.
2931 : : * optimal_io_boundary < max_segment_size * max_num_segments
2932 : : */
2933 : 4 : bdev->max_segment_size = 3 * 512;
2934 : 4 : bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV;
2935 : 4 : g_io_done = false;
2936 : :
2937 [ + + ]: 84 : for (i = 0; i < 20; i++) {
2938 : 80 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2939 : 80 : iov[i].iov_len = 512 * 4;
2940 : 20 : }
2941 : :
2942 : : /* IO crossing the IO boundary requires split.
2943 : : * 80 block length can split 5 child IOs base on offset and IO boundary.
2944 : : * Each iov entry needs to be split to 2 entries because of max_segment_size
2945 : : * Total 5 child IOs.
2946 : : */
2947 : :
2948 : : /* 4 iov entries are in an IO boundary and each iov entry splits to 2.
2949 : : * So each child IO occupies 8 child iov entries.
2950 : : */
2951 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8);
2952 [ + + ]: 20 : for (i = 0; i < 4; i++) {
2953 : 16 : int iovcnt = i * 2;
2954 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2955 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2956 : 4 : }
2957 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2958 : :
2959 : : /* 2nd child IO and total 16 child iov entries of parent IO */
2960 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8);
2961 [ + + ]: 20 : for (i = 4; i < 8; i++) {
2962 : 16 : int iovcnt = (i - 4) * 2;
2963 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2964 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2965 : 4 : }
2966 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2967 : :
2968 : : /* 3rd child IO and total 24 child iov entries of parent IO */
2969 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8);
2970 [ + + ]: 20 : for (i = 8; i < 12; i++) {
2971 : 16 : int iovcnt = (i - 8) * 2;
2972 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2973 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2974 : 4 : }
2975 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2976 : :
2977 : : /* 4th child IO and total 32 child iov entries of parent IO */
2978 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8);
2979 [ + + ]: 20 : for (i = 12; i < 16; i++) {
2980 : 16 : int iovcnt = (i - 12) * 2;
2981 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2982 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2983 : 4 : }
2984 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2985 : :
2986 : : /* 5th child IO and because of the child iov entry it should be split
2987 : : * in next round.
2988 : : */
2989 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8);
2990 [ + + ]: 20 : for (i = 16; i < 20; i++) {
2991 : 16 : int iovcnt = (i - 16) * 2;
2992 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2993 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2994 : 4 : }
2995 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2996 : :
2997 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL);
2998 : 4 : CU_ASSERT(rc == 0);
2999 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3000 : :
3001 : : /* First split round */
3002 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
3003 : 4 : stub_complete_io(4);
3004 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3005 : :
3006 : : /* Second split round */
3007 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3008 : 4 : stub_complete_io(1);
3009 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3010 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3011 : :
3012 : 4 : spdk_put_io_channel(io_ch);
3013 : 4 : spdk_bdev_close(desc);
3014 : 4 : free_bdev(bdev);
3015 : 4 : ut_fini_bdev();
3016 : 4 : }
3017 : :
3018 : : static void
3019 : 4 : bdev_io_split_with_io_wait(void)
3020 : : {
3021 : : struct spdk_bdev *bdev;
3022 : 4 : struct spdk_bdev_desc *desc = NULL;
3023 : : struct spdk_io_channel *io_ch;
3024 : : struct spdk_bdev_channel *channel;
3025 : : struct spdk_bdev_mgmt_channel *mgmt_ch;
3026 : 4 : struct spdk_bdev_opts bdev_opts = {};
3027 : 3 : struct iovec iov[3];
3028 : : struct ut_expected_io *expected_io;
3029 : : int rc;
3030 : :
3031 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3032 : 4 : bdev_opts.bdev_io_pool_size = 2;
3033 : 4 : bdev_opts.bdev_io_cache_size = 1;
3034 : 4 : ut_init_bdev(&bdev_opts);
3035 : :
3036 : 4 : bdev = allocate_bdev("bdev0");
3037 : :
3038 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
3039 : 4 : CU_ASSERT(rc == 0);
3040 : 4 : CU_ASSERT(desc != NULL);
3041 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3042 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
3043 : 4 : CU_ASSERT(io_ch != NULL);
3044 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
3045 : 4 : mgmt_ch = channel->shared_resource->mgmt_ch;
3046 : :
3047 : 4 : bdev->optimal_io_boundary = 16;
3048 : 4 : bdev->split_on_optimal_io_boundary = true;
3049 : :
3050 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
3051 : 4 : CU_ASSERT(rc == 0);
3052 : :
3053 : : /* Now test that a single-vector command is split correctly.
3054 : : * Offset 14, length 8, payload 0xF000
3055 : : * Child - Offset 14, length 2, payload 0xF000
3056 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
3057 : : *
3058 : : * Set up the expected values before calling spdk_bdev_read_blocks
3059 : : */
3060 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
3061 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
3062 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3063 : :
3064 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
3065 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
3066 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3067 : :
3068 : : /* The following children will be submitted sequentially due to the capacity of
3069 : : * spdk_bdev_io.
3070 : : */
3071 : :
3072 : : /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
3073 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
3074 : 4 : CU_ASSERT(rc == 0);
3075 : 4 : CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
3076 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3077 : :
3078 : : /* Completing the first read I/O will submit the first child */
3079 : 4 : stub_complete_io(1);
3080 : 4 : CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
3081 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3082 : :
3083 : : /* Completing the first child will submit the second child */
3084 : 4 : stub_complete_io(1);
3085 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3086 : :
3087 : : /* Complete the second child I/O. This should result in our callback getting
3088 : : * invoked since the parent I/O is now complete.
3089 : : */
3090 : 4 : stub_complete_io(1);
3091 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3092 : :
3093 : : /* Now set up a more complex, multi-vector command that needs to be split,
3094 : : * including splitting iovecs.
3095 : : */
3096 : 4 : iov[0].iov_base = (void *)0x10000;
3097 : 4 : iov[0].iov_len = 512;
3098 : 4 : iov[1].iov_base = (void *)0x20000;
3099 : 4 : iov[1].iov_len = 20 * 512;
3100 : 4 : iov[2].iov_base = (void *)0x30000;
3101 : 4 : iov[2].iov_len = 11 * 512;
3102 : :
3103 : 4 : g_io_done = false;
3104 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
3105 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
3106 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
3107 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3108 : :
3109 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
3110 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
3111 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3112 : :
3113 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
3114 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
3115 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
3116 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3117 : :
3118 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
3119 : 4 : CU_ASSERT(rc == 0);
3120 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3121 : :
3122 : : /* The following children will be submitted sequentially due to the capacity of
3123 : : * spdk_bdev_io.
3124 : : */
3125 : :
3126 : : /* Completing the first child will submit the second child */
3127 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3128 : 4 : stub_complete_io(1);
3129 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3130 : :
3131 : : /* Completing the second child will submit the third child */
3132 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3133 : 4 : stub_complete_io(1);
3134 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3135 : :
3136 : : /* Completing the third child will result in our callback getting invoked
3137 : : * since the parent I/O is now complete.
3138 : : */
3139 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3140 : 4 : stub_complete_io(1);
3141 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3142 : :
3143 : 4 : CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
3144 : :
3145 : 4 : spdk_put_io_channel(io_ch);
3146 : 4 : spdk_bdev_close(desc);
3147 : 4 : free_bdev(bdev);
3148 : 4 : ut_fini_bdev();
3149 : 4 : }
3150 : :
3151 : : static void
3152 : 4 : bdev_io_write_unit_split_test(void)
3153 : : {
3154 : : struct spdk_bdev *bdev;
3155 : 4 : struct spdk_bdev_desc *desc = NULL;
3156 : : struct spdk_io_channel *io_ch;
3157 : 4 : struct spdk_bdev_opts bdev_opts = {};
3158 : 3 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4];
3159 : : struct ut_expected_io *expected_io;
3160 : : uint64_t i;
3161 : : int rc;
3162 : :
3163 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3164 : 4 : bdev_opts.bdev_io_pool_size = 512;
3165 : 4 : bdev_opts.bdev_io_cache_size = 64;
3166 : 4 : ut_init_bdev(&bdev_opts);
3167 : :
3168 : 4 : bdev = allocate_bdev("bdev0");
3169 : :
3170 : 4 : rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc);
3171 : 4 : CU_ASSERT(rc == 0);
3172 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
3173 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
3174 : 4 : CU_ASSERT(io_ch != NULL);
3175 : :
3176 : : /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */
3177 : 4 : bdev->write_unit_size = 32;
3178 : 4 : bdev->split_on_write_unit = true;
3179 : 4 : g_io_done = false;
3180 : :
3181 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1);
3182 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512);
3183 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3184 : :
3185 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1);
3186 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512);
3187 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3188 : :
3189 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL);
3190 : 4 : CU_ASSERT(rc == 0);
3191 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3192 : :
3193 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3194 : 4 : stub_complete_io(2);
3195 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3196 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3197 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3198 : :
3199 : : /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split
3200 : : * based on write_unit_size, not optimal_io_boundary */
3201 : 4 : bdev->split_on_optimal_io_boundary = true;
3202 : 4 : bdev->optimal_io_boundary = 16;
3203 : 4 : g_io_done = false;
3204 : :
3205 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL);
3206 : 4 : CU_ASSERT(rc == 0);
3207 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3208 : :
3209 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3210 : 4 : stub_complete_io(2);
3211 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3212 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3213 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3214 : :
3215 : : /* Write I/O should fail if it is smaller than write_unit_size */
3216 : 4 : g_io_done = false;
3217 : :
3218 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL);
3219 : 4 : CU_ASSERT(rc == 0);
3220 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3221 : :
3222 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3223 : 4 : poll_threads();
3224 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3225 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3226 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3227 : :
3228 : : /* Same for I/O not aligned to write_unit_size */
3229 : 4 : g_io_done = false;
3230 : :
3231 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL);
3232 : 4 : CU_ASSERT(rc == 0);
3233 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3234 : :
3235 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3236 : 4 : poll_threads();
3237 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3238 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3239 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3240 : :
3241 : : /* Write should fail if it needs to be split but there are not enough iovs to submit
3242 : : * an entire write unit */
3243 : 4 : bdev->write_unit_size = SPDK_COUNTOF(iov) / 2;
3244 : 4 : g_io_done = false;
3245 : :
3246 [ + + ]: 516 : for (i = 0; i < SPDK_COUNTOF(iov); i++) {
3247 : 512 : iov[i].iov_base = (void *)(0x1000 + 512 * i);
3248 : 512 : iov[i].iov_len = 512;
3249 : 128 : }
3250 : :
3251 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov),
3252 : : io_done, NULL);
3253 : 4 : CU_ASSERT(rc == 0);
3254 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3255 : :
3256 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3257 : 4 : poll_threads();
3258 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3259 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3260 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3261 : :
3262 : 4 : spdk_put_io_channel(io_ch);
3263 : 4 : spdk_bdev_close(desc);
3264 : 4 : free_bdev(bdev);
3265 : 4 : ut_fini_bdev();
3266 : 4 : }
3267 : :
3268 : : static void
3269 : 4 : bdev_io_alignment(void)
3270 : : {
3271 : : struct spdk_bdev *bdev;
3272 : 4 : struct spdk_bdev_desc *desc = NULL;
3273 : : struct spdk_io_channel *io_ch;
3274 : 4 : struct spdk_bdev_opts bdev_opts = {};
3275 : : int rc;
3276 : 4 : void *buf = NULL;
3277 : 3 : struct iovec iovs[2];
3278 : : int iovcnt;
3279 : : uint64_t alignment;
3280 : :
3281 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3282 : 4 : bdev_opts.bdev_io_pool_size = 20;
3283 : 4 : bdev_opts.bdev_io_cache_size = 2;
3284 : 4 : ut_init_bdev(&bdev_opts);
3285 : :
3286 : 4 : fn_table.submit_request = stub_submit_request_get_buf;
3287 : 4 : bdev = allocate_bdev("bdev0");
3288 : :
3289 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
3290 : 4 : CU_ASSERT(rc == 0);
3291 : 4 : CU_ASSERT(desc != NULL);
3292 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3293 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
3294 : 4 : CU_ASSERT(io_ch != NULL);
3295 : :
3296 : : /* Create aligned buffer */
3297 [ - + ]: 4 : rc = posix_memalign(&buf, 4096, 8192);
3298 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(rc == 0);
3299 : :
3300 : : /* Pass aligned single buffer with no alignment required */
3301 : 4 : alignment = 1;
3302 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3303 : :
3304 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
3305 : 4 : CU_ASSERT(rc == 0);
3306 : 4 : stub_complete_io(1);
3307 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3308 : : alignment));
3309 : :
3310 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
3311 : 4 : CU_ASSERT(rc == 0);
3312 : 4 : stub_complete_io(1);
3313 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3314 : : alignment));
3315 : :
3316 : : /* Pass unaligned single buffer with no alignment required */
3317 : 4 : alignment = 1;
3318 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3319 : :
3320 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3321 : 4 : CU_ASSERT(rc == 0);
3322 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3323 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
3324 : 4 : stub_complete_io(1);
3325 : :
3326 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3327 : 4 : CU_ASSERT(rc == 0);
3328 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3329 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
3330 : 4 : stub_complete_io(1);
3331 : :
3332 : : /* Pass unaligned single buffer with 512 alignment required */
3333 : 4 : alignment = 512;
3334 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3335 : :
3336 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3337 : 4 : CU_ASSERT(rc == 0);
3338 : 4 : CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1);
3339 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
3340 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3341 : : alignment));
3342 : 4 : stub_complete_io(1);
3343 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3344 : :
3345 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3346 : 4 : CU_ASSERT(rc == 0);
3347 : 4 : CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1);
3348 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
3349 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3350 : : alignment));
3351 : 4 : stub_complete_io(1);
3352 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3353 : :
3354 : : /* Pass unaligned single buffer with 4096 alignment required */
3355 : 4 : alignment = 4096;
3356 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3357 : :
3358 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
3359 : 4 : CU_ASSERT(rc == 0);
3360 : 4 : CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1);
3361 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
3362 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3363 : : alignment));
3364 : 4 : stub_complete_io(1);
3365 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3366 : :
3367 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
3368 : 4 : CU_ASSERT(rc == 0);
3369 : 4 : CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1);
3370 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
3371 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3372 : : alignment));
3373 : 4 : stub_complete_io(1);
3374 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3375 : :
3376 : : /* Pass aligned iovs with no alignment required */
3377 : 4 : alignment = 1;
3378 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3379 : :
3380 : 4 : iovcnt = 1;
3381 : 4 : iovs[0].iov_base = buf;
3382 : 4 : iovs[0].iov_len = 512;
3383 : :
3384 : 4 : rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3385 : 4 : CU_ASSERT(rc == 0);
3386 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3387 : 4 : stub_complete_io(1);
3388 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3389 : :
3390 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3391 : 4 : CU_ASSERT(rc == 0);
3392 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3393 : 4 : stub_complete_io(1);
3394 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3395 : :
3396 : : /* Pass unaligned iovs with no alignment required */
3397 : 4 : alignment = 1;
3398 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3399 : :
3400 : 4 : iovcnt = 2;
3401 : 4 : iovs[0].iov_base = buf + 16;
3402 : 4 : iovs[0].iov_len = 256;
3403 : 4 : iovs[1].iov_base = buf + 16 + 256 + 32;
3404 : 4 : iovs[1].iov_len = 256;
3405 : :
3406 : 4 : rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3407 : 4 : CU_ASSERT(rc == 0);
3408 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3409 : 4 : stub_complete_io(1);
3410 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3411 : :
3412 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3413 : 4 : CU_ASSERT(rc == 0);
3414 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3415 : 4 : stub_complete_io(1);
3416 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3417 : :
3418 : : /* Pass unaligned iov with 2048 alignment required */
3419 : 4 : alignment = 2048;
3420 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3421 : :
3422 : 4 : iovcnt = 2;
3423 : 4 : iovs[0].iov_base = buf + 16;
3424 : 4 : iovs[0].iov_len = 256;
3425 : 4 : iovs[1].iov_base = buf + 16 + 256 + 32;
3426 : 4 : iovs[1].iov_len = 256;
3427 : :
3428 : 4 : rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3429 : 4 : CU_ASSERT(rc == 0);
3430 : 4 : CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == iovcnt);
3431 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
3432 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3433 : : alignment));
3434 : 4 : stub_complete_io(1);
3435 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3436 : :
3437 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3438 : 4 : CU_ASSERT(rc == 0);
3439 : 4 : CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == iovcnt);
3440 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
3441 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3442 : : alignment));
3443 : 4 : stub_complete_io(1);
3444 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3445 : :
3446 : : /* Pass iov without allocated buffer without alignment required */
3447 : 4 : alignment = 1;
3448 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3449 : :
3450 : 4 : iovcnt = 1;
3451 : 4 : iovs[0].iov_base = NULL;
3452 : 4 : iovs[0].iov_len = 0;
3453 : :
3454 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3455 : 4 : CU_ASSERT(rc == 0);
3456 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3457 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3458 : : alignment));
3459 : 4 : stub_complete_io(1);
3460 : :
3461 : : /* Pass iov without allocated buffer with 1024 alignment required */
3462 : 4 : alignment = 1024;
3463 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3464 : :
3465 : 4 : iovcnt = 1;
3466 : 4 : iovs[0].iov_base = NULL;
3467 : 4 : iovs[0].iov_len = 0;
3468 : :
3469 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3470 : 4 : CU_ASSERT(rc == 0);
3471 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3472 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3473 : : alignment));
3474 : 4 : stub_complete_io(1);
3475 : :
3476 : 4 : spdk_put_io_channel(io_ch);
3477 : 4 : spdk_bdev_close(desc);
3478 : 4 : free_bdev(bdev);
3479 : 4 : fn_table.submit_request = stub_submit_request;
3480 : 4 : ut_fini_bdev();
3481 : :
3482 : 4 : free(buf);
3483 : 4 : }
3484 : :
3485 : : static void
3486 : 4 : bdev_io_alignment_with_boundary(void)
3487 : : {
3488 : : struct spdk_bdev *bdev;
3489 : 4 : struct spdk_bdev_desc *desc = NULL;
3490 : : struct spdk_io_channel *io_ch;
3491 : 4 : struct spdk_bdev_opts bdev_opts = {};
3492 : : int rc;
3493 : 4 : void *buf = NULL;
3494 : 3 : struct iovec iovs[2];
3495 : : int iovcnt;
3496 : : uint64_t alignment;
3497 : :
3498 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3499 : 4 : bdev_opts.bdev_io_pool_size = 20;
3500 : 4 : bdev_opts.bdev_io_cache_size = 2;
3501 : 4 : bdev_opts.opts_size = sizeof(bdev_opts);
3502 : 4 : ut_init_bdev(&bdev_opts);
3503 : :
3504 : 4 : fn_table.submit_request = stub_submit_request_get_buf;
3505 : 4 : bdev = allocate_bdev("bdev0");
3506 : :
3507 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
3508 : 4 : CU_ASSERT(rc == 0);
3509 : 4 : CU_ASSERT(desc != NULL);
3510 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3511 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
3512 : 4 : CU_ASSERT(io_ch != NULL);
3513 : :
3514 : : /* Create aligned buffer */
3515 [ - + ]: 4 : rc = posix_memalign(&buf, 4096, 131072);
3516 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(rc == 0);
3517 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3518 : :
3519 : : #ifdef NOTDEF
3520 : : /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */
3521 : : alignment = 512;
3522 : : bdev->required_alignment = spdk_u32log2(alignment);
3523 : : bdev->optimal_io_boundary = 2;
3524 : : bdev->split_on_optimal_io_boundary = true;
3525 : :
3526 : : iovcnt = 1;
3527 : : iovs[0].iov_base = NULL;
3528 : : iovs[0].iov_len = 512 * 3;
3529 : :
3530 : : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
3531 : : CU_ASSERT(rc == 0);
3532 : : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3533 : : stub_complete_io(2);
3534 : :
3535 : : /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */
3536 : : alignment = 512;
3537 : : bdev->required_alignment = spdk_u32log2(alignment);
3538 : : bdev->optimal_io_boundary = 16;
3539 : : bdev->split_on_optimal_io_boundary = true;
3540 : :
3541 : : iovcnt = 1;
3542 : : iovs[0].iov_base = NULL;
3543 : : iovs[0].iov_len = 512 * 16;
3544 : :
3545 : : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL);
3546 : : CU_ASSERT(rc == 0);
3547 : : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3548 : : stub_complete_io(2);
3549 : :
3550 : : /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */
3551 : : alignment = 512;
3552 : : bdev->required_alignment = spdk_u32log2(alignment);
3553 : : bdev->optimal_io_boundary = 128;
3554 : : bdev->split_on_optimal_io_boundary = true;
3555 : :
3556 : : iovcnt = 1;
3557 : : iovs[0].iov_base = buf + 16;
3558 : : iovs[0].iov_len = 512 * 160;
3559 : : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
3560 : : CU_ASSERT(rc == 0);
3561 : : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3562 : : stub_complete_io(2);
3563 : :
3564 : : #endif
3565 : :
3566 : : /* 512 * 3 with 2 IO boundary */
3567 : 4 : alignment = 512;
3568 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3569 : 4 : bdev->optimal_io_boundary = 2;
3570 : 4 : bdev->split_on_optimal_io_boundary = true;
3571 : :
3572 : 4 : iovcnt = 2;
3573 : 4 : iovs[0].iov_base = buf + 16;
3574 : 4 : iovs[0].iov_len = 512;
3575 : 4 : iovs[1].iov_base = buf + 16 + 512 + 32;
3576 : 4 : iovs[1].iov_len = 1024;
3577 : :
3578 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
3579 : 4 : CU_ASSERT(rc == 0);
3580 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3581 : 4 : stub_complete_io(2);
3582 : :
3583 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
3584 : 4 : CU_ASSERT(rc == 0);
3585 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3586 : 4 : stub_complete_io(2);
3587 : :
3588 : : /* 512 * 64 with 32 IO boundary */
3589 : 4 : bdev->optimal_io_boundary = 32;
3590 : 4 : iovcnt = 2;
3591 : 4 : iovs[0].iov_base = buf + 16;
3592 : 4 : iovs[0].iov_len = 16384;
3593 : 4 : iovs[1].iov_base = buf + 16 + 16384 + 32;
3594 : 4 : iovs[1].iov_len = 16384;
3595 : :
3596 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
3597 : 4 : CU_ASSERT(rc == 0);
3598 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
3599 : 4 : stub_complete_io(3);
3600 : :
3601 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
3602 : 4 : CU_ASSERT(rc == 0);
3603 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
3604 : 4 : stub_complete_io(3);
3605 : :
3606 : : /* 512 * 160 with 32 IO boundary */
3607 : 4 : iovcnt = 1;
3608 : 4 : iovs[0].iov_base = buf + 16;
3609 : 4 : iovs[0].iov_len = 16384 + 65536;
3610 : :
3611 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
3612 : 4 : CU_ASSERT(rc == 0);
3613 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
3614 : 4 : stub_complete_io(6);
3615 : :
3616 : 4 : spdk_put_io_channel(io_ch);
3617 : 4 : spdk_bdev_close(desc);
3618 : 4 : free_bdev(bdev);
3619 : 4 : fn_table.submit_request = stub_submit_request;
3620 : 4 : ut_fini_bdev();
3621 : :
3622 : 4 : free(buf);
3623 : 4 : }
3624 : :
3625 : : static void
3626 : 8 : histogram_status_cb(void *cb_arg, int status)
3627 : : {
3628 : 8 : g_status = status;
3629 : 8 : }
3630 : :
3631 : : static void
3632 : 12 : histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
3633 : : {
3634 : 12 : g_status = status;
3635 : 12 : g_histogram = histogram;
3636 : 12 : }
3637 : :
3638 : : static void
3639 : 89088 : histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
3640 : : uint64_t total, uint64_t so_far)
3641 : : {
3642 : 89088 : g_count += count;
3643 : 89088 : }
3644 : :
3645 : : static void
3646 : 8 : histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
3647 : : {
3648 : 8 : spdk_histogram_data_fn cb_fn = cb_arg;
3649 : :
3650 : 8 : g_status = status;
3651 : :
3652 [ + + ]: 8 : if (status == 0) {
3653 : 4 : spdk_histogram_data_iterate(histogram, cb_fn, NULL);
3654 : 1 : }
3655 : 8 : }
3656 : :
3657 : : static void
3658 : 4 : bdev_histograms(void)
3659 : : {
3660 : : struct spdk_bdev *bdev;
3661 : 4 : struct spdk_bdev_desc *desc = NULL;
3662 : : struct spdk_io_channel *ch;
3663 : : struct spdk_histogram_data *histogram;
3664 : 3 : uint8_t buf[4096];
3665 : : int rc;
3666 : :
3667 : 4 : ut_init_bdev(NULL);
3668 : :
3669 : 4 : bdev = allocate_bdev("bdev");
3670 : :
3671 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
3672 : 4 : CU_ASSERT(rc == 0);
3673 : 4 : CU_ASSERT(desc != NULL);
3674 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3675 : :
3676 : 4 : ch = spdk_bdev_get_io_channel(desc);
3677 : 4 : CU_ASSERT(ch != NULL);
3678 : :
3679 : : /* Enable histogram */
3680 : 4 : g_status = -1;
3681 : 4 : spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
3682 : 4 : poll_threads();
3683 : 4 : CU_ASSERT(g_status == 0);
3684 [ - + ]: 4 : CU_ASSERT(bdev->internal.histogram_enabled == true);
3685 : :
3686 : : /* Allocate histogram */
3687 : 4 : histogram = spdk_histogram_data_alloc();
3688 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(histogram != NULL);
3689 : :
3690 : : /* Check if histogram is zeroed */
3691 : 4 : spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
3692 : 4 : poll_threads();
3693 : 4 : CU_ASSERT(g_status == 0);
3694 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
3695 : :
3696 : 4 : g_count = 0;
3697 : 4 : spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
3698 : :
3699 : 4 : CU_ASSERT(g_count == 0);
3700 : :
3701 : 4 : rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL);
3702 : 4 : CU_ASSERT(rc == 0);
3703 : :
3704 : 4 : spdk_delay_us(10);
3705 : 4 : stub_complete_io(1);
3706 : 4 : poll_threads();
3707 : :
3708 : 4 : rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL);
3709 : 4 : CU_ASSERT(rc == 0);
3710 : :
3711 : 4 : spdk_delay_us(10);
3712 : 4 : stub_complete_io(1);
3713 : 4 : poll_threads();
3714 : :
3715 : : /* Check if histogram gathered data from all I/O channels */
3716 : 4 : g_histogram = NULL;
3717 : 4 : spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
3718 : 4 : poll_threads();
3719 : 4 : CU_ASSERT(g_status == 0);
3720 [ - + ]: 4 : CU_ASSERT(bdev->internal.histogram_enabled == true);
3721 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
3722 : :
3723 : 4 : g_count = 0;
3724 : 4 : spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
3725 : 4 : CU_ASSERT(g_count == 2);
3726 : :
3727 : 4 : g_count = 0;
3728 : 4 : spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count);
3729 : 4 : CU_ASSERT(g_status == 0);
3730 : 4 : CU_ASSERT(g_count == 2);
3731 : :
3732 : : /* Disable histogram */
3733 : 4 : spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
3734 : 4 : poll_threads();
3735 : 4 : CU_ASSERT(g_status == 0);
3736 [ - + ]: 4 : CU_ASSERT(bdev->internal.histogram_enabled == false);
3737 : :
3738 : : /* Try to run histogram commands on disabled bdev */
3739 : 4 : spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
3740 : 4 : poll_threads();
3741 : 4 : CU_ASSERT(g_status == -EFAULT);
3742 : :
3743 : 4 : spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL);
3744 : 4 : CU_ASSERT(g_status == -EFAULT);
3745 : :
3746 : 4 : spdk_histogram_data_free(histogram);
3747 : 4 : spdk_put_io_channel(ch);
3748 : 4 : spdk_bdev_close(desc);
3749 : 4 : free_bdev(bdev);
3750 : 4 : ut_fini_bdev();
3751 : 4 : }
3752 : :
3753 : : static void
3754 : 8 : _bdev_compare(bool emulated)
3755 : : {
3756 : : struct spdk_bdev *bdev;
3757 : 8 : struct spdk_bdev_desc *desc = NULL;
3758 : : struct spdk_io_channel *ioch;
3759 : : struct ut_expected_io *expected_io;
3760 : : uint64_t offset, num_blocks;
3761 : : uint32_t num_completed;
3762 : 6 : char aa_buf[512];
3763 : 6 : char bb_buf[512];
3764 : 6 : struct iovec compare_iov;
3765 : : uint8_t expected_io_type;
3766 : : int rc;
3767 : :
3768 [ + + ]: 8 : if (emulated) {
3769 : 4 : expected_io_type = SPDK_BDEV_IO_TYPE_READ;
3770 : 1 : } else {
3771 : 4 : expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE;
3772 : : }
3773 : :
3774 [ - + ]: 8 : memset(aa_buf, 0xaa, sizeof(aa_buf));
3775 [ - + ]: 8 : memset(bb_buf, 0xbb, sizeof(bb_buf));
3776 : :
3777 : 8 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated;
3778 : :
3779 : 8 : ut_init_bdev(NULL);
3780 : 8 : fn_table.submit_request = stub_submit_request_get_buf;
3781 : 8 : bdev = allocate_bdev("bdev");
3782 : :
3783 : 8 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
3784 : 8 : CU_ASSERT_EQUAL(rc, 0);
3785 [ + + ]: 8 : SPDK_CU_ASSERT_FATAL(desc != NULL);
3786 : 8 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3787 : 8 : ioch = spdk_bdev_get_io_channel(desc);
3788 [ - + ]: 8 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
3789 : :
3790 : 8 : fn_table.submit_request = stub_submit_request_get_buf;
3791 : 8 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3792 : :
3793 : 8 : offset = 50;
3794 : 8 : num_blocks = 1;
3795 : 8 : compare_iov.iov_base = aa_buf;
3796 : 8 : compare_iov.iov_len = sizeof(aa_buf);
3797 : :
3798 : : /* 1. successful comparev */
3799 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3800 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3801 : :
3802 : 8 : g_io_done = false;
3803 : 8 : g_compare_read_buf = aa_buf;
3804 : 8 : g_compare_read_buf_len = sizeof(aa_buf);
3805 : 8 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3806 : 8 : CU_ASSERT_EQUAL(rc, 0);
3807 : 8 : num_completed = stub_complete_io(1);
3808 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3809 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3810 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3811 : :
3812 : : /* 2. miscompare comparev */
3813 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3814 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3815 : :
3816 : 8 : g_io_done = false;
3817 : 8 : g_compare_read_buf = bb_buf;
3818 : 8 : g_compare_read_buf_len = sizeof(bb_buf);
3819 : 8 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3820 : 8 : CU_ASSERT_EQUAL(rc, 0);
3821 : 8 : num_completed = stub_complete_io(1);
3822 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3823 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3824 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3825 : :
3826 : : /* 3. successful compare */
3827 : 8 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3828 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3829 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3830 : :
3831 : 8 : g_io_done = false;
3832 : 8 : g_compare_read_buf = aa_buf;
3833 : 8 : g_compare_read_buf_len = sizeof(aa_buf);
3834 : 8 : rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL);
3835 : 8 : CU_ASSERT_EQUAL(rc, 0);
3836 : 8 : num_completed = stub_complete_io(1);
3837 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3838 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3839 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3840 : :
3841 : : /* 4. miscompare compare */
3842 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3843 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3844 : :
3845 : 8 : g_io_done = false;
3846 : 8 : g_compare_read_buf = bb_buf;
3847 : 8 : g_compare_read_buf_len = sizeof(bb_buf);
3848 : 8 : rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL);
3849 : 8 : CU_ASSERT_EQUAL(rc, 0);
3850 : 8 : num_completed = stub_complete_io(1);
3851 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3852 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3853 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3854 : :
3855 : 8 : spdk_put_io_channel(ioch);
3856 : 8 : spdk_bdev_close(desc);
3857 : 8 : free_bdev(bdev);
3858 : 8 : fn_table.submit_request = stub_submit_request;
3859 : 8 : ut_fini_bdev();
3860 : :
3861 : 8 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
3862 : :
3863 : 8 : g_compare_read_buf = NULL;
3864 : 8 : }
3865 : :
3866 : : static void
3867 : 8 : _bdev_compare_with_md(bool emulated)
3868 : : {
3869 : : struct spdk_bdev *bdev;
3870 : 8 : struct spdk_bdev_desc *desc = NULL;
3871 : : struct spdk_io_channel *ioch;
3872 : : struct ut_expected_io *expected_io;
3873 : : uint64_t offset, num_blocks;
3874 : : uint32_t num_completed;
3875 : 6 : char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */];
3876 : 6 : char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */];
3877 : 6 : char buf_miscompare[1024 /* 2 * blocklen */];
3878 : 6 : char md_buf[16];
3879 : 6 : char md_buf_miscompare[16];
3880 : 6 : struct iovec compare_iov;
3881 : : uint8_t expected_io_type;
3882 : : int rc;
3883 : :
3884 [ + + ]: 8 : if (emulated) {
3885 : 4 : expected_io_type = SPDK_BDEV_IO_TYPE_READ;
3886 : 1 : } else {
3887 : 4 : expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE;
3888 : : }
3889 : :
3890 [ - + ]: 8 : memset(buf, 0xaa, sizeof(buf));
3891 [ - + ]: 8 : memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare));
3892 : : /* make last md different */
3893 [ - + ]: 8 : memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8);
3894 [ - + ]: 8 : memset(buf_miscompare, 0xbb, sizeof(buf_miscompare));
3895 [ - + ]: 8 : memset(md_buf, 0xaa, 16);
3896 [ - + ]: 8 : memset(md_buf_miscompare, 0xbb, 16);
3897 : :
3898 : 8 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated;
3899 : :
3900 : 8 : ut_init_bdev(NULL);
3901 : 8 : fn_table.submit_request = stub_submit_request_get_buf;
3902 : 8 : bdev = allocate_bdev("bdev");
3903 : :
3904 : 8 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
3905 : 8 : CU_ASSERT_EQUAL(rc, 0);
3906 [ + + ]: 8 : SPDK_CU_ASSERT_FATAL(desc != NULL);
3907 : 8 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3908 : 8 : ioch = spdk_bdev_get_io_channel(desc);
3909 [ - + ]: 8 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
3910 : :
3911 : 8 : fn_table.submit_request = stub_submit_request_get_buf;
3912 : 8 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3913 : :
3914 : 8 : offset = 50;
3915 : 8 : num_blocks = 2;
3916 : :
3917 : : /* interleaved md & data */
3918 : 8 : bdev->md_interleave = true;
3919 : 8 : bdev->md_len = 8;
3920 : 8 : bdev->blocklen = 512 + 8;
3921 : 8 : compare_iov.iov_base = buf;
3922 : 8 : compare_iov.iov_len = sizeof(buf);
3923 : :
3924 : : /* 1. successful compare with md interleaved */
3925 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3926 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3927 : :
3928 : 8 : g_io_done = false;
3929 : 8 : g_compare_read_buf = buf;
3930 : 8 : g_compare_read_buf_len = sizeof(buf);
3931 : 8 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3932 : 8 : CU_ASSERT_EQUAL(rc, 0);
3933 : 8 : num_completed = stub_complete_io(1);
3934 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3935 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3936 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3937 : :
3938 : : /* 2. miscompare with md interleaved */
3939 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3940 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3941 : :
3942 : 8 : g_io_done = false;
3943 : 8 : g_compare_read_buf = buf_interleaved_miscompare;
3944 : 8 : g_compare_read_buf_len = sizeof(buf_interleaved_miscompare);
3945 : 8 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3946 : 8 : CU_ASSERT_EQUAL(rc, 0);
3947 : 8 : num_completed = stub_complete_io(1);
3948 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3949 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3950 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3951 : :
3952 : : /* Separate data & md buffers */
3953 : 8 : bdev->md_interleave = false;
3954 : 8 : bdev->blocklen = 512;
3955 : 8 : compare_iov.iov_base = buf;
3956 : 8 : compare_iov.iov_len = 1024;
3957 : :
3958 : : /* 3. successful compare with md separated */
3959 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3960 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3961 : :
3962 : 8 : g_io_done = false;
3963 : 8 : g_compare_read_buf = buf;
3964 : 8 : g_compare_read_buf_len = 1024;
3965 : 8 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3966 : 8 : g_compare_md_buf = md_buf;
3967 : 10 : rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf,
3968 : 2 : offset, num_blocks, io_done, NULL);
3969 : 8 : CU_ASSERT_EQUAL(rc, 0);
3970 : 8 : num_completed = stub_complete_io(1);
3971 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3972 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3973 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3974 : :
3975 : : /* 4. miscompare with md separated where md buf is different */
3976 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3977 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3978 : :
3979 : 8 : g_io_done = false;
3980 : 8 : g_compare_read_buf = buf;
3981 : 8 : g_compare_read_buf_len = 1024;
3982 : 8 : g_compare_md_buf = md_buf_miscompare;
3983 : 10 : rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf,
3984 : 2 : offset, num_blocks, io_done, NULL);
3985 : 8 : CU_ASSERT_EQUAL(rc, 0);
3986 : 8 : num_completed = stub_complete_io(1);
3987 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3988 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3989 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3990 : :
3991 : : /* 5. miscompare with md separated where buf is different */
3992 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3993 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3994 : :
3995 : 8 : g_io_done = false;
3996 : 8 : g_compare_read_buf = buf_miscompare;
3997 : 8 : g_compare_read_buf_len = sizeof(buf_miscompare);
3998 : 8 : g_compare_md_buf = md_buf;
3999 : 10 : rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf,
4000 : 2 : offset, num_blocks, io_done, NULL);
4001 : 8 : CU_ASSERT_EQUAL(rc, 0);
4002 : 8 : num_completed = stub_complete_io(1);
4003 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
4004 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
4005 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
4006 : :
4007 : 8 : bdev->md_len = 0;
4008 : 8 : g_compare_md_buf = NULL;
4009 : :
4010 : 8 : spdk_put_io_channel(ioch);
4011 : 8 : spdk_bdev_close(desc);
4012 : 8 : free_bdev(bdev);
4013 : 8 : fn_table.submit_request = stub_submit_request;
4014 : 8 : ut_fini_bdev();
4015 : :
4016 : 8 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
4017 : :
4018 : 8 : g_compare_read_buf = NULL;
4019 : 8 : }
4020 : :
4021 : : static void
4022 : 4 : bdev_compare(void)
4023 : : {
4024 : 4 : _bdev_compare(false);
4025 : 4 : _bdev_compare_with_md(false);
4026 : 4 : }
4027 : :
4028 : : static void
4029 : 4 : bdev_compare_emulated(void)
4030 : : {
4031 : 4 : _bdev_compare(true);
4032 : 4 : _bdev_compare_with_md(true);
4033 : 4 : }
4034 : :
4035 : : static void
4036 : 4 : bdev_compare_and_write(void)
4037 : : {
4038 : : struct spdk_bdev *bdev;
4039 : 4 : struct spdk_bdev_desc *desc = NULL;
4040 : : struct spdk_io_channel *ioch;
4041 : : struct ut_expected_io *expected_io;
4042 : : uint64_t offset, num_blocks;
4043 : : uint32_t num_completed;
4044 : 3 : char aa_buf[512];
4045 : 3 : char bb_buf[512];
4046 : 3 : char cc_buf[512];
4047 : 3 : char write_buf[512];
4048 : 3 : struct iovec compare_iov;
4049 : 3 : struct iovec write_iov;
4050 : : int rc;
4051 : :
4052 : 4 : memset(aa_buf, 0xaa, sizeof(aa_buf));
4053 : 4 : memset(bb_buf, 0xbb, sizeof(bb_buf));
4054 : 4 : memset(cc_buf, 0xcc, sizeof(cc_buf));
4055 : :
4056 : 4 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false;
4057 : :
4058 : 4 : ut_init_bdev(NULL);
4059 : 4 : fn_table.submit_request = stub_submit_request_get_buf;
4060 : 4 : bdev = allocate_bdev("bdev");
4061 : :
4062 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4063 : 4 : CU_ASSERT_EQUAL(rc, 0);
4064 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4065 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4066 : 4 : ioch = spdk_bdev_get_io_channel(desc);
4067 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4068 : :
4069 : 4 : fn_table.submit_request = stub_submit_request_get_buf;
4070 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4071 : :
4072 : 4 : offset = 50;
4073 : 4 : num_blocks = 1;
4074 : 4 : compare_iov.iov_base = aa_buf;
4075 : 4 : compare_iov.iov_len = sizeof(aa_buf);
4076 : 4 : write_iov.iov_base = bb_buf;
4077 : 4 : write_iov.iov_len = sizeof(bb_buf);
4078 : :
4079 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
4080 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4081 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0);
4082 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4083 : :
4084 : 4 : g_io_done = false;
4085 : 4 : g_compare_read_buf = aa_buf;
4086 : 4 : g_compare_read_buf_len = sizeof(aa_buf);
4087 : 4 : memset(write_buf, 0, sizeof(write_buf));
4088 : 4 : g_compare_write_buf = write_buf;
4089 : 4 : g_compare_write_buf_len = sizeof(write_buf);
4090 : 5 : rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
4091 : 1 : offset, num_blocks, io_done, NULL);
4092 : : /* Trigger range locking */
4093 : 4 : poll_threads();
4094 : 4 : CU_ASSERT_EQUAL(rc, 0);
4095 : 4 : num_completed = stub_complete_io(1);
4096 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4097 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
4098 : 4 : num_completed = stub_complete_io(1);
4099 : : /* Trigger range unlocking */
4100 : 4 : poll_threads();
4101 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4102 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4103 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4104 : 4 : CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0);
4105 : :
4106 : : /* Test miscompare */
4107 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
4108 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4109 : :
4110 : 4 : g_io_done = false;
4111 : 4 : g_compare_read_buf = cc_buf;
4112 : 4 : g_compare_read_buf_len = sizeof(cc_buf);
4113 : 4 : memset(write_buf, 0, sizeof(write_buf));
4114 : 4 : g_compare_write_buf = write_buf;
4115 : 4 : g_compare_write_buf_len = sizeof(write_buf);
4116 : 5 : rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
4117 : 1 : offset, num_blocks, io_done, NULL);
4118 : : /* Trigger range locking */
4119 : 4 : poll_threads();
4120 : 4 : CU_ASSERT_EQUAL(rc, 0);
4121 : 4 : num_completed = stub_complete_io(1);
4122 : : /* Trigger range unlocking earlier because we expect error here */
4123 : 4 : poll_threads();
4124 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4125 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4126 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
4127 : 4 : num_completed = stub_complete_io(1);
4128 : 4 : CU_ASSERT_EQUAL(num_completed, 0);
4129 : :
4130 : 4 : spdk_put_io_channel(ioch);
4131 : 4 : spdk_bdev_close(desc);
4132 : 4 : free_bdev(bdev);
4133 : 4 : fn_table.submit_request = stub_submit_request;
4134 : 4 : ut_fini_bdev();
4135 : :
4136 : 4 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
4137 : :
4138 : 4 : g_compare_read_buf = NULL;
4139 : 4 : g_compare_write_buf = NULL;
4140 : 4 : }
4141 : :
4142 : : static void
4143 : 4 : bdev_write_zeroes(void)
4144 : : {
4145 : : struct spdk_bdev *bdev;
4146 : 4 : struct spdk_bdev_desc *desc = NULL;
4147 : : struct spdk_io_channel *ioch;
4148 : : struct ut_expected_io *expected_io;
4149 : : uint64_t offset, num_io_blocks, num_blocks;
4150 : : uint32_t num_completed, num_requests;
4151 : : int rc;
4152 : :
4153 : 4 : ut_init_bdev(NULL);
4154 : 4 : bdev = allocate_bdev("bdev");
4155 : :
4156 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4157 : 4 : CU_ASSERT_EQUAL(rc, 0);
4158 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4159 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4160 : 4 : ioch = spdk_bdev_get_io_channel(desc);
4161 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4162 : :
4163 : 4 : fn_table.submit_request = stub_submit_request;
4164 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4165 : :
4166 : : /* First test that if the bdev supports write_zeroes, the request won't be split */
4167 : 4 : bdev->md_len = 0;
4168 : 4 : bdev->blocklen = 4096;
4169 [ - + ]: 4 : num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
4170 : :
4171 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
4172 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4173 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4174 : 4 : CU_ASSERT_EQUAL(rc, 0);
4175 : 4 : num_completed = stub_complete_io(1);
4176 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4177 : :
4178 : : /* Check that if write zeroes is not supported it'll be replaced by regular writes */
4179 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
4180 : 4 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
4181 [ - + ]: 4 : num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen;
4182 : 4 : num_requests = 2;
4183 [ - + ]: 4 : num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests;
4184 : :
4185 [ + + ]: 12 : for (offset = 0; offset < num_requests; ++offset) {
4186 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
4187 : 2 : offset * num_io_blocks, num_io_blocks, 0);
4188 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4189 : 2 : }
4190 : :
4191 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4192 : 4 : CU_ASSERT_EQUAL(rc, 0);
4193 : 4 : num_completed = stub_complete_io(num_requests);
4194 : 4 : CU_ASSERT_EQUAL(num_completed, num_requests);
4195 : :
4196 : : /* Check that the splitting is correct if bdev has interleaved metadata */
4197 : 4 : bdev->md_interleave = true;
4198 : 4 : bdev->md_len = 64;
4199 : 4 : bdev->blocklen = 4096 + 64;
4200 : 4 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
4201 [ - + ]: 4 : num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
4202 : :
4203 : 4 : num_requests = offset = 0;
4204 [ + + ]: 12 : while (offset < num_blocks) {
4205 [ + + + + : 8 : num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset);
- + ]
4206 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
4207 : 2 : offset, num_io_blocks, 0);
4208 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4209 : 8 : offset += num_io_blocks;
4210 : 8 : num_requests++;
4211 : : }
4212 : :
4213 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4214 : 4 : CU_ASSERT_EQUAL(rc, 0);
4215 : 4 : num_completed = stub_complete_io(num_requests);
4216 : 4 : CU_ASSERT_EQUAL(num_completed, num_requests);
4217 : 4 : num_completed = stub_complete_io(num_requests);
4218 [ - + ]: 4 : assert(num_completed == 0);
4219 : :
4220 : : /* Check the the same for separate metadata buffer */
4221 : 4 : bdev->md_interleave = false;
4222 : 4 : bdev->md_len = 64;
4223 : 4 : bdev->blocklen = 4096;
4224 : 4 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
4225 : :
4226 : 4 : num_requests = offset = 0;
4227 [ + + ]: 12 : while (offset < num_blocks) {
4228 [ + + + - : 8 : num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks);
- + ]
4229 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
4230 : 2 : offset, num_io_blocks, 0);
4231 : 8 : expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen;
4232 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4233 : 8 : offset += num_io_blocks;
4234 : 8 : num_requests++;
4235 : : }
4236 : :
4237 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4238 : 4 : CU_ASSERT_EQUAL(rc, 0);
4239 : 4 : num_completed = stub_complete_io(num_requests);
4240 : 4 : CU_ASSERT_EQUAL(num_completed, num_requests);
4241 : :
4242 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
4243 : 4 : spdk_put_io_channel(ioch);
4244 : 4 : spdk_bdev_close(desc);
4245 : 4 : free_bdev(bdev);
4246 : 4 : ut_fini_bdev();
4247 : 4 : }
4248 : :
4249 : : static void
4250 : 4 : bdev_zcopy_write(void)
4251 : : {
4252 : : struct spdk_bdev *bdev;
4253 : 4 : struct spdk_bdev_desc *desc = NULL;
4254 : : struct spdk_io_channel *ioch;
4255 : : struct ut_expected_io *expected_io;
4256 : : uint64_t offset, num_blocks;
4257 : : uint32_t num_completed;
4258 : 3 : char aa_buf[512];
4259 : 3 : struct iovec iov;
4260 : : int rc;
4261 : 4 : const bool populate = false;
4262 : 4 : const bool commit = true;
4263 : :
4264 [ - + ]: 4 : memset(aa_buf, 0xaa, sizeof(aa_buf));
4265 : :
4266 : 4 : ut_init_bdev(NULL);
4267 : 4 : bdev = allocate_bdev("bdev");
4268 : :
4269 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4270 : 4 : CU_ASSERT_EQUAL(rc, 0);
4271 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4272 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4273 : 4 : ioch = spdk_bdev_get_io_channel(desc);
4274 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4275 : :
4276 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4277 : :
4278 : 4 : offset = 50;
4279 : 4 : num_blocks = 1;
4280 : 4 : iov.iov_base = NULL;
4281 : 4 : iov.iov_len = 0;
4282 : :
4283 : 4 : g_zcopy_read_buf = (void *) 0x1122334455667788UL;
4284 : 4 : g_zcopy_read_buf_len = (uint32_t) -1;
4285 : : /* Do a zcopy start for a write (populate=false) */
4286 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4287 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4288 : 4 : g_io_done = false;
4289 : 4 : g_zcopy_write_buf = aa_buf;
4290 : 4 : g_zcopy_write_buf_len = sizeof(aa_buf);
4291 : 4 : g_zcopy_bdev_io = NULL;
4292 : 4 : rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL);
4293 : 4 : CU_ASSERT_EQUAL(rc, 0);
4294 : 4 : num_completed = stub_complete_io(1);
4295 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4296 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4297 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4298 : : /* Check that the iov has been set up */
4299 : 4 : CU_ASSERT(iov.iov_base == g_zcopy_write_buf);
4300 : 4 : CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len);
4301 : : /* Check that the bdev_io has been saved */
4302 : 4 : CU_ASSERT(g_zcopy_bdev_io != NULL);
4303 : : /* Now do the zcopy end for a write (commit=true) */
4304 : 4 : g_io_done = false;
4305 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4306 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4307 : 4 : rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL);
4308 : 4 : CU_ASSERT_EQUAL(rc, 0);
4309 : 4 : num_completed = stub_complete_io(1);
4310 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4311 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4312 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4313 : : /* Check the g_zcopy are reset by io_done */
4314 : 4 : CU_ASSERT(g_zcopy_write_buf == NULL);
4315 : 4 : CU_ASSERT(g_zcopy_write_buf_len == 0);
4316 : : /* Check that io_done has freed the g_zcopy_bdev_io */
4317 : 4 : CU_ASSERT(g_zcopy_bdev_io == NULL);
4318 : :
4319 : : /* Check the zcopy read buffer has not been touched which
4320 : : * ensures that the correct buffers were used.
4321 : : */
4322 : 4 : CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL);
4323 : 4 : CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1);
4324 : :
4325 : 4 : spdk_put_io_channel(ioch);
4326 : 4 : spdk_bdev_close(desc);
4327 : 4 : free_bdev(bdev);
4328 : 4 : ut_fini_bdev();
4329 : 4 : }
4330 : :
4331 : : static void
4332 : 4 : bdev_zcopy_read(void)
4333 : : {
4334 : : struct spdk_bdev *bdev;
4335 : 4 : struct spdk_bdev_desc *desc = NULL;
4336 : : struct spdk_io_channel *ioch;
4337 : : struct ut_expected_io *expected_io;
4338 : : uint64_t offset, num_blocks;
4339 : : uint32_t num_completed;
4340 : 3 : char aa_buf[512];
4341 : 3 : struct iovec iov;
4342 : : int rc;
4343 : 4 : const bool populate = true;
4344 : 4 : const bool commit = false;
4345 : :
4346 [ - + ]: 4 : memset(aa_buf, 0xaa, sizeof(aa_buf));
4347 : :
4348 : 4 : ut_init_bdev(NULL);
4349 : 4 : bdev = allocate_bdev("bdev");
4350 : :
4351 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4352 : 4 : CU_ASSERT_EQUAL(rc, 0);
4353 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4354 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4355 : 4 : ioch = spdk_bdev_get_io_channel(desc);
4356 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4357 : :
4358 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4359 : :
4360 : 4 : offset = 50;
4361 : 4 : num_blocks = 1;
4362 : 4 : iov.iov_base = NULL;
4363 : 4 : iov.iov_len = 0;
4364 : :
4365 : 4 : g_zcopy_write_buf = (void *) 0x1122334455667788UL;
4366 : 4 : g_zcopy_write_buf_len = (uint32_t) -1;
4367 : :
4368 : : /* Do a zcopy start for a read (populate=true) */
4369 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4370 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4371 : 4 : g_io_done = false;
4372 : 4 : g_zcopy_read_buf = aa_buf;
4373 : 4 : g_zcopy_read_buf_len = sizeof(aa_buf);
4374 : 4 : g_zcopy_bdev_io = NULL;
4375 : 4 : rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL);
4376 : 4 : CU_ASSERT_EQUAL(rc, 0);
4377 : 4 : num_completed = stub_complete_io(1);
4378 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4379 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4380 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4381 : : /* Check that the iov has been set up */
4382 : 4 : CU_ASSERT(iov.iov_base == g_zcopy_read_buf);
4383 : 4 : CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len);
4384 : : /* Check that the bdev_io has been saved */
4385 : 4 : CU_ASSERT(g_zcopy_bdev_io != NULL);
4386 : :
4387 : : /* Now do the zcopy end for a read (commit=false) */
4388 : 4 : g_io_done = false;
4389 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4390 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4391 : 4 : rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL);
4392 : 4 : CU_ASSERT_EQUAL(rc, 0);
4393 : 4 : num_completed = stub_complete_io(1);
4394 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4395 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4396 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4397 : : /* Check the g_zcopy are reset by io_done */
4398 : 4 : CU_ASSERT(g_zcopy_read_buf == NULL);
4399 : 4 : CU_ASSERT(g_zcopy_read_buf_len == 0);
4400 : : /* Check that io_done has freed the g_zcopy_bdev_io */
4401 : 4 : CU_ASSERT(g_zcopy_bdev_io == NULL);
4402 : :
4403 : : /* Check the zcopy write buffer has not been touched which
4404 : : * ensures that the correct buffers were used.
4405 : : */
4406 : 4 : CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL);
4407 : 4 : CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1);
4408 : :
4409 : 4 : spdk_put_io_channel(ioch);
4410 : 4 : spdk_bdev_close(desc);
4411 : 4 : free_bdev(bdev);
4412 : 4 : ut_fini_bdev();
4413 : 4 : }
4414 : :
4415 : : static void
4416 : 4 : bdev_open_while_hotremove(void)
4417 : : {
4418 : : struct spdk_bdev *bdev;
4419 : 4 : struct spdk_bdev_desc *desc[2] = {};
4420 : : int rc;
4421 : :
4422 : 4 : bdev = allocate_bdev("bdev");
4423 : :
4424 : 4 : rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]);
4425 : 4 : CU_ASSERT(rc == 0);
4426 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
4427 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0]));
4428 : :
4429 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
4430 : : /* Bdev unregister is handled asynchronously. Poll thread to complete. */
4431 : 4 : poll_threads();
4432 : :
4433 : 4 : rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]);
4434 : 4 : CU_ASSERT(rc == -ENODEV);
4435 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[1] == NULL);
4436 : :
4437 : 4 : spdk_bdev_close(desc[0]);
4438 : 4 : free_bdev(bdev);
4439 : 4 : }
4440 : :
4441 : : static void
4442 : 4 : bdev_close_while_hotremove(void)
4443 : : {
4444 : : struct spdk_bdev *bdev;
4445 : 4 : struct spdk_bdev_desc *desc = NULL;
4446 : 4 : int rc = 0;
4447 : :
4448 : 4 : bdev = allocate_bdev("bdev");
4449 : :
4450 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc);
4451 : 4 : CU_ASSERT_EQUAL(rc, 0);
4452 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4453 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4454 : :
4455 : : /* Simulate hot-unplug by unregistering bdev */
4456 : 4 : g_event_type1 = 0xFF;
4457 : 4 : g_unregister_arg = NULL;
4458 : 4 : g_unregister_rc = -1;
4459 : 4 : spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678);
4460 : : /* Close device while remove event is in flight */
4461 : 4 : spdk_bdev_close(desc);
4462 : :
4463 : : /* Ensure that unregister callback is delayed */
4464 : 4 : CU_ASSERT_EQUAL(g_unregister_arg, NULL);
4465 : 4 : CU_ASSERT_EQUAL(g_unregister_rc, -1);
4466 : :
4467 : 4 : poll_threads();
4468 : :
4469 : : /* Event callback shall not be issued because device was closed */
4470 : 4 : CU_ASSERT_EQUAL(g_event_type1, 0xFF);
4471 : : /* Unregister callback is issued */
4472 : 4 : CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678);
4473 : 4 : CU_ASSERT_EQUAL(g_unregister_rc, 0);
4474 : :
4475 : 4 : free_bdev(bdev);
4476 : 4 : }
4477 : :
4478 : : static void
4479 : 4 : bdev_open_ext_test(void)
4480 : : {
4481 : : struct spdk_bdev *bdev;
4482 : 4 : struct spdk_bdev_desc *desc1 = NULL;
4483 : 4 : struct spdk_bdev_desc *desc2 = NULL;
4484 : 4 : int rc = 0;
4485 : :
4486 : 4 : bdev = allocate_bdev("bdev");
4487 : :
4488 : 4 : rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1);
4489 : 4 : CU_ASSERT_EQUAL(rc, -EINVAL);
4490 : :
4491 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1);
4492 : 4 : CU_ASSERT_EQUAL(rc, 0);
4493 : :
4494 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2);
4495 : 4 : CU_ASSERT_EQUAL(rc, 0);
4496 : :
4497 : 4 : g_event_type1 = 0xFF;
4498 : 4 : g_event_type2 = 0xFF;
4499 : :
4500 : : /* Simulate hot-unplug by unregistering bdev */
4501 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
4502 : 4 : poll_threads();
4503 : :
4504 : : /* Check if correct events have been triggered in event callback fn */
4505 : 4 : CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE);
4506 : 4 : CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE);
4507 : :
4508 : 4 : free_bdev(bdev);
4509 : 4 : poll_threads();
4510 : 4 : }
4511 : :
4512 : : static void
4513 : 4 : bdev_open_ext_unregister(void)
4514 : : {
4515 : : struct spdk_bdev *bdev;
4516 : 4 : struct spdk_bdev_desc *desc1 = NULL;
4517 : 4 : struct spdk_bdev_desc *desc2 = NULL;
4518 : 4 : struct spdk_bdev_desc *desc3 = NULL;
4519 : 4 : struct spdk_bdev_desc *desc4 = NULL;
4520 : 4 : int rc = 0;
4521 : :
4522 : 4 : bdev = allocate_bdev("bdev");
4523 : :
4524 : 4 : rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1);
4525 : 4 : CU_ASSERT_EQUAL(rc, -EINVAL);
4526 : :
4527 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1);
4528 : 4 : CU_ASSERT_EQUAL(rc, 0);
4529 : :
4530 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2);
4531 : 4 : CU_ASSERT_EQUAL(rc, 0);
4532 : :
4533 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3);
4534 : 4 : CU_ASSERT_EQUAL(rc, 0);
4535 : :
4536 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4);
4537 : 4 : CU_ASSERT_EQUAL(rc, 0);
4538 : :
4539 : 4 : g_event_type1 = 0xFF;
4540 : 4 : g_event_type2 = 0xFF;
4541 : 4 : g_event_type3 = 0xFF;
4542 : 4 : g_event_type4 = 0xFF;
4543 : :
4544 : 4 : g_unregister_arg = NULL;
4545 : 4 : g_unregister_rc = -1;
4546 : :
4547 : : /* Simulate hot-unplug by unregistering bdev */
4548 : 4 : spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678);
4549 : :
4550 : : /*
4551 : : * Unregister is handled asynchronously and event callback
4552 : : * (i.e., above bdev_open_cbN) will be called.
4553 : : * For bdev_open_cb3 and bdev_open_cb4, it is intended to not
4554 : : * close the desc3 and desc4 so that the bdev is not closed.
4555 : : */
4556 : 4 : poll_threads();
4557 : :
4558 : : /* Check if correct events have been triggered in event callback fn */
4559 : 4 : CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE);
4560 : 4 : CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE);
4561 : 4 : CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE);
4562 : 4 : CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE);
4563 : :
4564 : : /* Check that unregister callback is delayed */
4565 : 4 : CU_ASSERT(g_unregister_arg == NULL);
4566 : 4 : CU_ASSERT(g_unregister_rc == -1);
4567 : :
4568 : : /*
4569 : : * Explicitly close desc3. As desc4 is still opened there, the
4570 : : * unergister callback is still delayed to execute.
4571 : : */
4572 : 4 : spdk_bdev_close(desc3);
4573 : 4 : CU_ASSERT(g_unregister_arg == NULL);
4574 : 4 : CU_ASSERT(g_unregister_rc == -1);
4575 : :
4576 : : /*
4577 : : * Explicitly close desc4 to trigger the ongoing bdev unregister
4578 : : * operation after last desc is closed.
4579 : : */
4580 : 4 : spdk_bdev_close(desc4);
4581 : :
4582 : : /* Poll the thread for the async unregister operation */
4583 : 4 : poll_threads();
4584 : :
4585 : : /* Check that unregister callback is executed */
4586 : 4 : CU_ASSERT(g_unregister_arg == (void *)0x12345678);
4587 : 4 : CU_ASSERT(g_unregister_rc == 0);
4588 : :
4589 : 4 : free_bdev(bdev);
4590 : 4 : poll_threads();
4591 : 4 : }
4592 : :
4593 : : struct timeout_io_cb_arg {
4594 : : struct iovec iov;
4595 : : uint8_t type;
4596 : : };
4597 : :
4598 : : static int
4599 : 56 : bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
4600 : : {
4601 : : struct spdk_bdev_io *bdev_io;
4602 : 56 : int n = 0;
4603 : :
4604 [ - + ]: 56 : if (!ch) {
4605 : 0 : return -1;
4606 : : }
4607 : :
4608 [ + + ]: 116 : TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
4609 : 60 : n++;
4610 : 15 : }
4611 : :
4612 : 56 : return n;
4613 : 14 : }
4614 : :
4615 : : static void
4616 : 12 : bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
4617 : : {
4618 : 12 : struct timeout_io_cb_arg *ctx = cb_arg;
4619 : :
4620 : 12 : ctx->type = bdev_io->type;
4621 : 12 : ctx->iov.iov_base = bdev_io->iov.iov_base;
4622 : 12 : ctx->iov.iov_len = bdev_io->iov.iov_len;
4623 : 12 : }
4624 : :
4625 : : static void
4626 : 4 : bdev_set_io_timeout(void)
4627 : : {
4628 : : struct spdk_bdev *bdev;
4629 : 4 : struct spdk_bdev_desc *desc = NULL;
4630 : 4 : struct spdk_io_channel *io_ch = NULL;
4631 : 4 : struct spdk_bdev_channel *bdev_ch = NULL;
4632 : 3 : struct timeout_io_cb_arg cb_arg;
4633 : :
4634 : 4 : ut_init_bdev(NULL);
4635 : 4 : bdev = allocate_bdev("bdev");
4636 : :
4637 : 4 : CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0);
4638 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4639 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4640 : :
4641 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
4642 : 4 : CU_ASSERT(io_ch != NULL);
4643 : :
4644 : 4 : bdev_ch = spdk_io_channel_get_ctx(io_ch);
4645 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
4646 : :
4647 : : /* This is the part1.
4648 : : * We will check the bdev_ch->io_submitted list
4649 : : * TO make sure that it can link IOs and only the user submitted IOs
4650 : : */
4651 : 4 : CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
4652 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4653 : 4 : CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
4654 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
4655 : 4 : stub_complete_io(1);
4656 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4657 : 4 : stub_complete_io(1);
4658 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4659 : :
4660 : : /* Split IO */
4661 : 4 : bdev->optimal_io_boundary = 16;
4662 : 4 : bdev->split_on_optimal_io_boundary = true;
4663 : :
4664 : : /* Now test that a single-vector command is split correctly.
4665 : : * Offset 14, length 8, payload 0xF000
4666 : : * Child - Offset 14, length 2, payload 0xF000
4667 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
4668 : : *
4669 : : * Set up the expected values before calling spdk_bdev_read_blocks
4670 : : */
4671 : 4 : CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
4672 : : /* We count all submitted IOs including IO that are generated by splitting. */
4673 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3);
4674 : 4 : stub_complete_io(1);
4675 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
4676 : 4 : stub_complete_io(1);
4677 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4678 : :
4679 : : /* Also include the reset IO */
4680 : 4 : CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
4681 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4682 : 4 : poll_threads();
4683 : 4 : stub_complete_io(1);
4684 : 4 : poll_threads();
4685 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4686 : :
4687 : : /* This is part2
4688 : : * Test the desc timeout poller register
4689 : : */
4690 : :
4691 : : /* Successfully set the timeout */
4692 : 4 : CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
4693 : 4 : CU_ASSERT(desc->io_timeout_poller != NULL);
4694 : 4 : CU_ASSERT(desc->timeout_in_sec == 30);
4695 : 4 : CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
4696 : 4 : CU_ASSERT(desc->cb_arg == &cb_arg);
4697 : :
4698 : : /* Change the timeout limit */
4699 : 4 : CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0);
4700 : 4 : CU_ASSERT(desc->io_timeout_poller != NULL);
4701 : 4 : CU_ASSERT(desc->timeout_in_sec == 20);
4702 : 4 : CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
4703 : 4 : CU_ASSERT(desc->cb_arg == &cb_arg);
4704 : :
4705 : : /* Disable the timeout */
4706 : 4 : CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0);
4707 : 4 : CU_ASSERT(desc->io_timeout_poller == NULL);
4708 : :
4709 : : /* This the part3
4710 : : * We will test to catch timeout IO and check whether the IO is
4711 : : * the submitted one.
4712 : : */
4713 [ - + ]: 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4714 : 4 : CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
4715 : 4 : CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0);
4716 : :
4717 : : /* Don't reach the limit */
4718 : 4 : spdk_delay_us(15 * spdk_get_ticks_hz());
4719 : 4 : poll_threads();
4720 : 4 : CU_ASSERT(cb_arg.type == 0);
4721 : 4 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
4722 : 4 : CU_ASSERT(cb_arg.iov.iov_len == 0);
4723 : :
4724 : : /* 15 + 15 = 30 reach the limit */
4725 : 4 : spdk_delay_us(15 * spdk_get_ticks_hz());
4726 : 4 : poll_threads();
4727 : 4 : CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
4728 : 4 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
4729 : 4 : CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen);
4730 : 4 : stub_complete_io(1);
4731 : :
4732 : : /* Use the same split IO above and check the IO */
4733 [ - + ]: 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4734 : 4 : CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
4735 : :
4736 : : /* The first child complete in time */
4737 : 4 : spdk_delay_us(15 * spdk_get_ticks_hz());
4738 : 4 : poll_threads();
4739 : 4 : stub_complete_io(1);
4740 : 4 : CU_ASSERT(cb_arg.type == 0);
4741 : 4 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
4742 : 4 : CU_ASSERT(cb_arg.iov.iov_len == 0);
4743 : :
4744 : : /* The second child reach the limit */
4745 : 4 : spdk_delay_us(15 * spdk_get_ticks_hz());
4746 : 4 : poll_threads();
4747 : 4 : CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
4748 : 4 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
4749 : 4 : CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen);
4750 : 4 : stub_complete_io(1);
4751 : :
4752 : : /* Also include the reset IO */
4753 [ - + ]: 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4754 : 4 : CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
4755 : 4 : spdk_delay_us(30 * spdk_get_ticks_hz());
4756 : 4 : poll_threads();
4757 : 4 : CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET);
4758 : 4 : stub_complete_io(1);
4759 : 4 : poll_threads();
4760 : :
4761 : 4 : spdk_put_io_channel(io_ch);
4762 : 4 : spdk_bdev_close(desc);
4763 : 4 : free_bdev(bdev);
4764 : 4 : ut_fini_bdev();
4765 : 4 : }
4766 : :
4767 : : static void
4768 : 4 : bdev_set_qd_sampling(void)
4769 : : {
4770 : : struct spdk_bdev *bdev;
4771 : 4 : struct spdk_bdev_desc *desc = NULL;
4772 : 4 : struct spdk_io_channel *io_ch = NULL;
4773 : 4 : struct spdk_bdev_channel *bdev_ch = NULL;
4774 : 3 : struct timeout_io_cb_arg cb_arg;
4775 : :
4776 : 4 : ut_init_bdev(NULL);
4777 : 4 : bdev = allocate_bdev("bdev");
4778 : :
4779 : 4 : CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0);
4780 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4781 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4782 : :
4783 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
4784 : 4 : CU_ASSERT(io_ch != NULL);
4785 : :
4786 : 4 : bdev_ch = spdk_io_channel_get_ctx(io_ch);
4787 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
4788 : :
4789 : : /* This is the part1.
4790 : : * We will check the bdev_ch->io_submitted list
4791 : : * TO make sure that it can link IOs and only the user submitted IOs
4792 : : */
4793 : 4 : CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
4794 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4795 : 4 : CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
4796 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
4797 : 4 : stub_complete_io(1);
4798 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4799 : 4 : stub_complete_io(1);
4800 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4801 : :
4802 : : /* This is the part2.
4803 : : * Test the bdev's qd poller register
4804 : : */
4805 : : /* 1st Successfully set the qd sampling period */
4806 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 10);
4807 : 4 : CU_ASSERT(bdev->internal.new_period == 10);
4808 : 4 : CU_ASSERT(bdev->internal.period == 10);
4809 : 4 : CU_ASSERT(bdev->internal.qd_desc != NULL);
4810 : 4 : poll_threads();
4811 : 4 : CU_ASSERT(bdev->internal.qd_poller != NULL);
4812 : :
4813 : : /* 2nd Change the qd sampling period */
4814 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 20);
4815 : 4 : CU_ASSERT(bdev->internal.new_period == 20);
4816 : 4 : CU_ASSERT(bdev->internal.period == 10);
4817 : 4 : CU_ASSERT(bdev->internal.qd_desc != NULL);
4818 : 4 : poll_threads();
4819 : 4 : CU_ASSERT(bdev->internal.qd_poller != NULL);
4820 : 4 : CU_ASSERT(bdev->internal.period == bdev->internal.new_period);
4821 : :
4822 : : /* 3rd Change the qd sampling period and verify qd_poll_in_progress */
4823 : 4 : spdk_delay_us(20);
4824 : 4 : poll_thread_times(0, 1);
4825 [ - + ]: 4 : CU_ASSERT(bdev->internal.qd_poll_in_progress == true);
4826 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 30);
4827 : 4 : CU_ASSERT(bdev->internal.new_period == 30);
4828 : 4 : CU_ASSERT(bdev->internal.period == 20);
4829 : 4 : poll_threads();
4830 [ - + ]: 4 : CU_ASSERT(bdev->internal.qd_poll_in_progress == false);
4831 : 4 : CU_ASSERT(bdev->internal.period == bdev->internal.new_period);
4832 : :
4833 : : /* 4th Disable the qd sampling period */
4834 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 0);
4835 : 4 : CU_ASSERT(bdev->internal.new_period == 0);
4836 : 4 : CU_ASSERT(bdev->internal.period == 30);
4837 : 4 : poll_threads();
4838 : 4 : CU_ASSERT(bdev->internal.qd_poller == NULL);
4839 : 4 : CU_ASSERT(bdev->internal.period == bdev->internal.new_period);
4840 : 4 : CU_ASSERT(bdev->internal.qd_desc == NULL);
4841 : :
4842 : : /* This is the part3.
4843 : : * We will test the submitted IO and reset works
4844 : : * properly with the qd sampling.
4845 : : */
4846 [ - + ]: 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4847 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 1);
4848 : 4 : poll_threads();
4849 : :
4850 : 4 : CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
4851 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4852 : :
4853 : : /* Also include the reset IO */
4854 [ - + ]: 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4855 : 4 : CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
4856 : 4 : poll_threads();
4857 : :
4858 : : /* Close the desc */
4859 : 4 : spdk_put_io_channel(io_ch);
4860 : 4 : spdk_bdev_close(desc);
4861 : :
4862 : : /* Complete the submitted IO and reset */
4863 : 4 : stub_complete_io(2);
4864 : 4 : poll_threads();
4865 : :
4866 : 4 : free_bdev(bdev);
4867 : 4 : ut_fini_bdev();
4868 : 4 : }
4869 : :
4870 : : static void
4871 : 4 : lba_range_overlap(void)
4872 : : {
4873 : 3 : struct lba_range r1, r2;
4874 : :
4875 : 4 : r1.offset = 100;
4876 : 4 : r1.length = 50;
4877 : :
4878 : 4 : r2.offset = 0;
4879 : 4 : r2.length = 1;
4880 : 4 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4881 : :
4882 : 4 : r2.offset = 0;
4883 : 4 : r2.length = 100;
4884 : 4 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4885 : :
4886 : 4 : r2.offset = 0;
4887 : 4 : r2.length = 110;
4888 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4889 : :
4890 : 4 : r2.offset = 100;
4891 : 4 : r2.length = 10;
4892 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4893 : :
4894 : 4 : r2.offset = 110;
4895 : 4 : r2.length = 20;
4896 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4897 : :
4898 : 4 : r2.offset = 140;
4899 : 4 : r2.length = 150;
4900 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4901 : :
4902 : 4 : r2.offset = 130;
4903 : 4 : r2.length = 200;
4904 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4905 : :
4906 : 4 : r2.offset = 150;
4907 : 4 : r2.length = 100;
4908 : 4 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4909 : :
4910 : 4 : r2.offset = 110;
4911 : 4 : r2.length = 0;
4912 : 4 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4913 : 4 : }
4914 : :
4915 : : static bool g_lock_lba_range_done;
4916 : : static bool g_unlock_lba_range_done;
4917 : :
4918 : : static void
4919 : 32 : lock_lba_range_done(struct lba_range *range, void *ctx, int status)
4920 : : {
4921 : 32 : g_lock_lba_range_done = true;
4922 : 32 : }
4923 : :
4924 : : static void
4925 : 24 : unlock_lba_range_done(struct lba_range *range, void *ctx, int status)
4926 : : {
4927 : 24 : g_unlock_lba_range_done = true;
4928 : 24 : }
4929 : :
4930 : : static void
4931 : 4 : lock_lba_range_check_ranges(void)
4932 : : {
4933 : : struct spdk_bdev *bdev;
4934 : 4 : struct spdk_bdev_desc *desc = NULL;
4935 : : struct spdk_io_channel *io_ch;
4936 : : struct spdk_bdev_channel *channel;
4937 : : struct lba_range *range;
4938 : 3 : int ctx1;
4939 : : int rc;
4940 : :
4941 : 4 : ut_init_bdev(NULL);
4942 : 4 : bdev = allocate_bdev("bdev0");
4943 : :
4944 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
4945 : 4 : CU_ASSERT(rc == 0);
4946 : 4 : CU_ASSERT(desc != NULL);
4947 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4948 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
4949 : 4 : CU_ASSERT(io_ch != NULL);
4950 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
4951 : :
4952 : 4 : g_lock_lba_range_done = false;
4953 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
4954 : 4 : CU_ASSERT(rc == 0);
4955 : 4 : poll_threads();
4956 : :
4957 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
4958 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
4959 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
4960 : 4 : CU_ASSERT(range->offset == 20);
4961 : 4 : CU_ASSERT(range->length == 10);
4962 : 4 : CU_ASSERT(range->owner_ch == channel);
4963 : :
4964 : : /* Unlocks must exactly match a lock. */
4965 : 4 : g_unlock_lba_range_done = false;
4966 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1);
4967 : 4 : CU_ASSERT(rc == -EINVAL);
4968 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == false);
4969 : :
4970 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
4971 : 4 : CU_ASSERT(rc == 0);
4972 : 4 : spdk_delay_us(100);
4973 : 4 : poll_threads();
4974 : :
4975 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
4976 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
4977 : :
4978 : 4 : spdk_put_io_channel(io_ch);
4979 : 4 : spdk_bdev_close(desc);
4980 : 4 : free_bdev(bdev);
4981 : 4 : ut_fini_bdev();
4982 : 4 : }
4983 : :
4984 : : static void
4985 : 4 : lock_lba_range_with_io_outstanding(void)
4986 : : {
4987 : : struct spdk_bdev *bdev;
4988 : 4 : struct spdk_bdev_desc *desc = NULL;
4989 : : struct spdk_io_channel *io_ch;
4990 : : struct spdk_bdev_channel *channel;
4991 : : struct lba_range *range;
4992 : 3 : char buf[4096];
4993 : 3 : int ctx1;
4994 : : int rc;
4995 : :
4996 : 4 : ut_init_bdev(NULL);
4997 : 4 : bdev = allocate_bdev("bdev0");
4998 : :
4999 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5000 : 4 : CU_ASSERT(rc == 0);
5001 : 4 : CU_ASSERT(desc != NULL);
5002 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5003 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
5004 : 4 : CU_ASSERT(io_ch != NULL);
5005 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
5006 : :
5007 : 4 : g_io_done = false;
5008 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
5009 : 4 : CU_ASSERT(rc == 0);
5010 : :
5011 : 4 : g_lock_lba_range_done = false;
5012 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
5013 : 4 : CU_ASSERT(rc == 0);
5014 : 4 : poll_threads();
5015 : :
5016 : : /* The lock should immediately become valid, since there are no outstanding
5017 : : * write I/O.
5018 : : */
5019 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5020 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5021 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5022 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5023 : 4 : CU_ASSERT(range->offset == 20);
5024 : 4 : CU_ASSERT(range->length == 10);
5025 : 4 : CU_ASSERT(range->owner_ch == channel);
5026 : 4 : CU_ASSERT(range->locked_ctx == &ctx1);
5027 : :
5028 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
5029 : 4 : CU_ASSERT(rc == 0);
5030 : 4 : stub_complete_io(1);
5031 : 4 : spdk_delay_us(100);
5032 : 4 : poll_threads();
5033 : :
5034 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5035 : :
5036 : : /* Now try again, but with a write I/O. */
5037 : 4 : g_io_done = false;
5038 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
5039 : 4 : CU_ASSERT(rc == 0);
5040 : :
5041 : 4 : g_lock_lba_range_done = false;
5042 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
5043 : 4 : CU_ASSERT(rc == 0);
5044 : 4 : poll_threads();
5045 : :
5046 : : /* The lock should not be fully valid yet, since a write I/O is outstanding.
5047 : : * But note that the range should be on the channel's locked_list, to make sure no
5048 : : * new write I/O are started.
5049 : : */
5050 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5051 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5052 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5053 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5054 : 4 : CU_ASSERT(range->offset == 20);
5055 : 4 : CU_ASSERT(range->length == 10);
5056 : :
5057 : : /* Complete the write I/O. This should make the lock valid (checked by confirming
5058 : : * our callback was invoked).
5059 : : */
5060 : 4 : stub_complete_io(1);
5061 : 4 : spdk_delay_us(100);
5062 : 4 : poll_threads();
5063 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5064 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5065 : :
5066 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
5067 : 4 : CU_ASSERT(rc == 0);
5068 : 4 : poll_threads();
5069 : :
5070 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5071 : :
5072 : 4 : spdk_put_io_channel(io_ch);
5073 : 4 : spdk_bdev_close(desc);
5074 : 4 : free_bdev(bdev);
5075 : 4 : ut_fini_bdev();
5076 : 4 : }
5077 : :
5078 : : static void
5079 : 4 : lock_lba_range_overlapped(void)
5080 : : {
5081 : : struct spdk_bdev *bdev;
5082 : 4 : struct spdk_bdev_desc *desc = NULL;
5083 : : struct spdk_io_channel *io_ch;
5084 : : struct spdk_bdev_channel *channel;
5085 : : struct lba_range *range;
5086 : 3 : int ctx1;
5087 : : int rc;
5088 : :
5089 : 4 : ut_init_bdev(NULL);
5090 : 4 : bdev = allocate_bdev("bdev0");
5091 : :
5092 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5093 : 4 : CU_ASSERT(rc == 0);
5094 : 4 : CU_ASSERT(desc != NULL);
5095 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5096 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
5097 : 4 : CU_ASSERT(io_ch != NULL);
5098 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
5099 : :
5100 : : /* Lock range 20-29. */
5101 : 4 : g_lock_lba_range_done = false;
5102 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
5103 : 4 : CU_ASSERT(rc == 0);
5104 : 4 : poll_threads();
5105 : :
5106 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5107 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5108 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5109 : 4 : CU_ASSERT(range->offset == 20);
5110 : 4 : CU_ASSERT(range->length == 10);
5111 : :
5112 : : /* Try to lock range 25-39. It should not lock immediately, since it overlaps with
5113 : : * 20-29.
5114 : : */
5115 : 4 : g_lock_lba_range_done = false;
5116 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1);
5117 : 4 : CU_ASSERT(rc == 0);
5118 : 4 : poll_threads();
5119 : :
5120 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5121 : 4 : range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
5122 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5123 : 4 : CU_ASSERT(range->offset == 25);
5124 : 4 : CU_ASSERT(range->length == 15);
5125 : :
5126 : : /* Unlock 20-29. This should result in range 25-39 now getting locked since it
5127 : : * no longer overlaps with an active lock.
5128 : : */
5129 : 4 : g_unlock_lba_range_done = false;
5130 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
5131 : 4 : CU_ASSERT(rc == 0);
5132 : 4 : poll_threads();
5133 : :
5134 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5135 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
5136 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5137 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5138 : 4 : CU_ASSERT(range->offset == 25);
5139 : 4 : CU_ASSERT(range->length == 15);
5140 : :
5141 : : /* Lock 40-59. This should immediately lock since it does not overlap with the
5142 : : * currently active 25-39 lock.
5143 : : */
5144 : 4 : g_lock_lba_range_done = false;
5145 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1);
5146 : 4 : CU_ASSERT(rc == 0);
5147 : 4 : poll_threads();
5148 : :
5149 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5150 : 4 : range = TAILQ_FIRST(&bdev->internal.locked_ranges);
5151 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5152 : 4 : range = TAILQ_NEXT(range, tailq);
5153 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5154 : 4 : CU_ASSERT(range->offset == 40);
5155 : 4 : CU_ASSERT(range->length == 20);
5156 : :
5157 : : /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */
5158 : 4 : g_lock_lba_range_done = false;
5159 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1);
5160 : 4 : CU_ASSERT(rc == 0);
5161 : 4 : poll_threads();
5162 : :
5163 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5164 : 4 : range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
5165 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5166 : 4 : CU_ASSERT(range->offset == 35);
5167 : 4 : CU_ASSERT(range->length == 10);
5168 : :
5169 : : /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since
5170 : : * the 40-59 lock is still active.
5171 : : */
5172 : 4 : g_unlock_lba_range_done = false;
5173 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1);
5174 : 4 : CU_ASSERT(rc == 0);
5175 : 4 : poll_threads();
5176 : :
5177 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5178 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5179 : 4 : range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
5180 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5181 : 4 : CU_ASSERT(range->offset == 35);
5182 : 4 : CU_ASSERT(range->length == 10);
5183 : :
5184 : : /* Unlock 40-59. This should result in 35-44 now getting locked, since there are
5185 : : * no longer any active overlapping locks.
5186 : : */
5187 : 4 : g_unlock_lba_range_done = false;
5188 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1);
5189 : 4 : CU_ASSERT(rc == 0);
5190 : 4 : poll_threads();
5191 : :
5192 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5193 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5194 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
5195 : 4 : range = TAILQ_FIRST(&bdev->internal.locked_ranges);
5196 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5197 : 4 : CU_ASSERT(range->offset == 35);
5198 : 4 : CU_ASSERT(range->length == 10);
5199 : :
5200 : : /* Finally, unlock 35-44. */
5201 : 4 : g_unlock_lba_range_done = false;
5202 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1);
5203 : 4 : CU_ASSERT(rc == 0);
5204 : 4 : poll_threads();
5205 : :
5206 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5207 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges));
5208 : :
5209 : 4 : spdk_put_io_channel(io_ch);
5210 : 4 : spdk_bdev_close(desc);
5211 : 4 : free_bdev(bdev);
5212 : 4 : ut_fini_bdev();
5213 : 4 : }
5214 : :
5215 : : static void
5216 : 12 : bdev_quiesce_done(void *ctx, int status)
5217 : : {
5218 : 12 : g_lock_lba_range_done = true;
5219 : 12 : }
5220 : :
5221 : : static void
5222 : 16 : bdev_unquiesce_done(void *ctx, int status)
5223 : : {
5224 : 16 : g_unlock_lba_range_done = true;
5225 : 16 : }
5226 : :
5227 : : static void
5228 : 4 : bdev_quiesce_done_unquiesce(void *ctx, int status)
5229 : : {
5230 : 4 : struct spdk_bdev *bdev = ctx;
5231 : : int rc;
5232 : :
5233 : 4 : g_lock_lba_range_done = true;
5234 : :
5235 : 4 : rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, NULL);
5236 : 4 : CU_ASSERT(rc == 0);
5237 : 4 : }
5238 : :
5239 : : static void
5240 : 4 : bdev_quiesce(void)
5241 : : {
5242 : : struct spdk_bdev *bdev;
5243 : 4 : struct spdk_bdev_desc *desc = NULL;
5244 : : struct spdk_io_channel *io_ch;
5245 : : struct spdk_bdev_channel *channel;
5246 : : struct lba_range *range;
5247 : : struct spdk_bdev_io *bdev_io;
5248 : 3 : int ctx1;
5249 : : int rc;
5250 : :
5251 : 4 : ut_init_bdev(NULL);
5252 : 4 : bdev = allocate_bdev("bdev0");
5253 : :
5254 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5255 : 4 : CU_ASSERT(rc == 0);
5256 : 4 : CU_ASSERT(desc != NULL);
5257 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5258 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
5259 : 4 : CU_ASSERT(io_ch != NULL);
5260 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
5261 : :
5262 : 4 : g_lock_lba_range_done = false;
5263 : 4 : rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1);
5264 : 4 : CU_ASSERT(rc == 0);
5265 : 4 : poll_threads();
5266 : :
5267 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5268 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5269 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5270 : 4 : CU_ASSERT(range->offset == 0);
5271 : 4 : CU_ASSERT(range->length == bdev->blockcnt);
5272 : 4 : CU_ASSERT(range->owner_ch == NULL);
5273 : 4 : range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges);
5274 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5275 : 4 : CU_ASSERT(range->offset == 0);
5276 : 4 : CU_ASSERT(range->length == bdev->blockcnt);
5277 : 4 : CU_ASSERT(range->owner_ch == NULL);
5278 : :
5279 : 4 : g_unlock_lba_range_done = false;
5280 : 4 : rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1);
5281 : 4 : CU_ASSERT(rc == 0);
5282 : 4 : spdk_delay_us(100);
5283 : 4 : poll_threads();
5284 : :
5285 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5286 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5287 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges));
5288 : :
5289 : 4 : g_lock_lba_range_done = false;
5290 : 4 : rc = spdk_bdev_quiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_quiesce_done, &ctx1);
5291 : 4 : CU_ASSERT(rc == 0);
5292 : 4 : poll_threads();
5293 : :
5294 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5295 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5296 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5297 : 4 : CU_ASSERT(range->offset == 20);
5298 : 4 : CU_ASSERT(range->length == 10);
5299 : 4 : CU_ASSERT(range->owner_ch == NULL);
5300 : 4 : range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges);
5301 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5302 : 4 : CU_ASSERT(range->offset == 20);
5303 : 4 : CU_ASSERT(range->length == 10);
5304 : 4 : CU_ASSERT(range->owner_ch == NULL);
5305 : :
5306 : : /* Unlocks must exactly match a lock. */
5307 : 4 : g_unlock_lba_range_done = false;
5308 : 4 : rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 1, bdev_unquiesce_done, &ctx1);
5309 : 4 : CU_ASSERT(rc == -EINVAL);
5310 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == false);
5311 : :
5312 : 4 : rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_unquiesce_done, &ctx1);
5313 : 4 : CU_ASSERT(rc == 0);
5314 : 4 : spdk_delay_us(100);
5315 : 4 : poll_threads();
5316 : :
5317 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5318 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5319 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges));
5320 : :
5321 : : /* Test unquiesce from quiesce cb */
5322 : 4 : g_lock_lba_range_done = false;
5323 : 4 : g_unlock_lba_range_done = false;
5324 : 4 : rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done_unquiesce, bdev);
5325 : 4 : CU_ASSERT(rc == 0);
5326 : 4 : poll_threads();
5327 : :
5328 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5329 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5330 : :
5331 : : /* Test quiesce with read I/O */
5332 : 4 : g_lock_lba_range_done = false;
5333 : 4 : g_unlock_lba_range_done = false;
5334 : 4 : g_io_done = false;
5335 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1);
5336 : 4 : CU_ASSERT(rc == 0);
5337 : :
5338 : 4 : rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1);
5339 : 4 : CU_ASSERT(rc == 0);
5340 : 4 : poll_threads();
5341 : :
5342 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5343 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5344 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5345 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5346 : :
5347 : 4 : stub_complete_io(1);
5348 : 4 : spdk_delay_us(100);
5349 : 4 : poll_threads();
5350 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5351 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5352 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->io_locked));
5353 : :
5354 : 4 : g_io_done = false;
5355 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1);
5356 : 4 : CU_ASSERT(rc == 0);
5357 : :
5358 : 4 : bdev_io = TAILQ_FIRST(&channel->io_locked);
5359 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
5360 : 4 : CU_ASSERT(bdev_io->u.bdev.offset_blocks == 20);
5361 : 4 : CU_ASSERT(bdev_io->u.bdev.num_blocks == 1);
5362 : :
5363 : 4 : rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1);
5364 : 4 : CU_ASSERT(rc == 0);
5365 : 4 : spdk_delay_us(100);
5366 : 4 : poll_threads();
5367 : :
5368 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5369 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5370 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges));
5371 : :
5372 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->io_locked));
5373 : 4 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
5374 : 4 : poll_threads();
5375 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5376 : :
5377 : 4 : spdk_put_io_channel(io_ch);
5378 : 4 : spdk_bdev_close(desc);
5379 : 4 : free_bdev(bdev);
5380 : 4 : ut_fini_bdev();
5381 : 4 : }
5382 : :
5383 : : static void
5384 : 36 : abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
5385 : : {
5386 : 36 : g_abort_done = true;
5387 : 36 : g_abort_status = bdev_io->internal.status;
5388 : 36 : spdk_bdev_free_io(bdev_io);
5389 : 36 : }
5390 : :
5391 : : static void
5392 : 4 : bdev_io_abort(void)
5393 : : {
5394 : : struct spdk_bdev *bdev;
5395 : 4 : struct spdk_bdev_desc *desc = NULL;
5396 : : struct spdk_io_channel *io_ch;
5397 : : struct spdk_bdev_channel *channel;
5398 : : struct spdk_bdev_mgmt_channel *mgmt_ch;
5399 : 4 : struct spdk_bdev_opts bdev_opts = {};
5400 : 3 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
5401 : 4 : uint64_t io_ctx1 = 0, io_ctx2 = 0, i;
5402 : : int rc;
5403 : :
5404 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
5405 : 4 : bdev_opts.bdev_io_pool_size = 7;
5406 : 4 : bdev_opts.bdev_io_cache_size = 2;
5407 : 4 : ut_init_bdev(&bdev_opts);
5408 : :
5409 : 4 : bdev = allocate_bdev("bdev0");
5410 : :
5411 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5412 : 4 : CU_ASSERT(rc == 0);
5413 : 4 : CU_ASSERT(desc != NULL);
5414 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5415 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
5416 : 4 : CU_ASSERT(io_ch != NULL);
5417 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
5418 : 4 : mgmt_ch = channel->shared_resource->mgmt_ch;
5419 : :
5420 : 4 : g_abort_done = false;
5421 : :
5422 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false);
5423 : :
5424 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5425 : 4 : CU_ASSERT(rc == -ENOTSUP);
5426 : :
5427 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true);
5428 : :
5429 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL);
5430 : 4 : CU_ASSERT(rc == 0);
5431 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5432 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED);
5433 : :
5434 : : /* Test the case that the target I/O was successfully aborted. */
5435 : 4 : g_io_done = false;
5436 : :
5437 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
5438 : 4 : CU_ASSERT(rc == 0);
5439 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5440 : :
5441 : 4 : g_abort_done = false;
5442 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5443 : :
5444 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5445 : 4 : CU_ASSERT(rc == 0);
5446 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5447 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5448 : 4 : stub_complete_io(1);
5449 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5450 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5451 : :
5452 : : /* Test the case that the target I/O was not aborted because it completed
5453 : : * in the middle of execution of the abort.
5454 : : */
5455 : 4 : g_io_done = false;
5456 : :
5457 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
5458 : 4 : CU_ASSERT(rc == 0);
5459 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5460 : :
5461 : 4 : g_abort_done = false;
5462 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
5463 : :
5464 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5465 : 4 : CU_ASSERT(rc == 0);
5466 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5467 : :
5468 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5469 : 4 : stub_complete_io(1);
5470 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5471 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5472 : :
5473 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
5474 : 4 : stub_complete_io(1);
5475 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5476 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5477 : :
5478 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5479 : :
5480 : 4 : bdev->optimal_io_boundary = 16;
5481 : 4 : bdev->split_on_optimal_io_boundary = true;
5482 : :
5483 : : /* Test that a single-vector command which is split is aborted correctly.
5484 : : * Offset 14, length 8, payload 0xF000
5485 : : * Child - Offset 14, length 2, payload 0xF000
5486 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
5487 : : */
5488 : 4 : g_io_done = false;
5489 : :
5490 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1);
5491 : 4 : CU_ASSERT(rc == 0);
5492 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5493 : :
5494 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5495 : :
5496 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5497 : :
5498 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5499 : 4 : CU_ASSERT(rc == 0);
5500 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5501 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5502 : 4 : stub_complete_io(2);
5503 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5504 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5505 : :
5506 : : /* Test that a multi-vector command that needs to be split by strip and then
5507 : : * needs to be split is aborted correctly. Abort is requested before the second
5508 : : * child I/O was submitted. The parent I/O should complete with failure without
5509 : : * submitting the second child I/O.
5510 : : */
5511 [ + + ]: 260 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) {
5512 : 256 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
5513 : 256 : iov[i].iov_len = 512;
5514 : 64 : }
5515 : :
5516 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
5517 : 4 : g_io_done = false;
5518 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0,
5519 : : SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1);
5520 : 4 : CU_ASSERT(rc == 0);
5521 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5522 : :
5523 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5524 : :
5525 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5526 : :
5527 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5528 : 4 : CU_ASSERT(rc == 0);
5529 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5530 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5531 : 4 : stub_complete_io(1);
5532 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5533 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5534 : :
5535 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5536 : :
5537 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5538 : :
5539 : 4 : bdev->optimal_io_boundary = 16;
5540 : 4 : g_io_done = false;
5541 : :
5542 : : /* Test that a single-vector command which is split is aborted correctly.
5543 : : * Differently from the above, the child abort request will be submitted
5544 : : * sequentially due to the capacity of spdk_bdev_io.
5545 : : */
5546 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1);
5547 : 4 : CU_ASSERT(rc == 0);
5548 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5549 : :
5550 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
5551 : :
5552 : 4 : g_abort_done = false;
5553 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5554 : :
5555 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5556 : 4 : CU_ASSERT(rc == 0);
5557 : 4 : CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
5558 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
5559 : :
5560 : 4 : stub_complete_io(1);
5561 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5562 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5563 : 4 : stub_complete_io(3);
5564 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5565 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5566 : :
5567 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5568 : :
5569 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5570 : :
5571 : 4 : bdev->split_on_optimal_io_boundary = false;
5572 : 4 : bdev->split_on_write_unit = true;
5573 : 4 : bdev->write_unit_size = 16;
5574 : :
5575 : : /* Test that a single-vector command which is split is aborted correctly.
5576 : : * Offset 16, length 32, payload 0xF000
5577 : : * Child - Offset 16, length 16, payload 0xF000
5578 : : * Child - Offset 32, length 16, payload 0xF000 + 16 * 512
5579 : : *
5580 : : * Use bdev->split_on_write_unit as a split condition.
5581 : : */
5582 : 4 : g_io_done = false;
5583 : :
5584 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 16, 32, io_done, &io_ctx1);
5585 : 4 : CU_ASSERT(rc == 0);
5586 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5587 : :
5588 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5589 : :
5590 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5591 : :
5592 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5593 : 4 : CU_ASSERT(rc == 0);
5594 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5595 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5596 : 4 : stub_complete_io(2);
5597 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5598 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5599 : :
5600 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5601 : :
5602 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5603 : :
5604 : 4 : bdev->split_on_write_unit = false;
5605 : 4 : bdev->max_rw_size = 16;
5606 : :
5607 : : /* Test that a single-vector command which is split is aborted correctly.
5608 : : * Use bdev->max_rw_size as a split condition.
5609 : : */
5610 : 4 : g_io_done = false;
5611 : :
5612 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 32, io_done, &io_ctx1);
5613 : 4 : CU_ASSERT(rc == 0);
5614 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5615 : :
5616 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5617 : :
5618 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5619 : :
5620 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5621 : 4 : CU_ASSERT(rc == 0);
5622 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5623 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5624 : 4 : stub_complete_io(2);
5625 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5626 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5627 : :
5628 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5629 : :
5630 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5631 : :
5632 : 4 : bdev->max_rw_size = 0;
5633 : 4 : bdev->max_segment_size = 512 * 16;
5634 : 4 : bdev->max_num_segments = 1;
5635 : :
5636 : : /* Test that a single-vector command which is split is aborted correctly.
5637 : : * Use bdev->max_segment_size and bdev->max_num_segments together as split conditions.
5638 : : *
5639 : : * One single-vector command is changed to one two-vectors command, but
5640 : : * bdev->max_num_segments is 1 and it is split into two single-vector commands.
5641 : : */
5642 : 4 : g_io_done = false;
5643 : :
5644 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 32, io_done, &io_ctx1);
5645 : 4 : CU_ASSERT(rc == 0);
5646 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5647 : :
5648 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5649 : :
5650 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5651 : :
5652 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5653 : 4 : CU_ASSERT(rc == 0);
5654 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5655 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5656 : 4 : stub_complete_io(2);
5657 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5658 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5659 : :
5660 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5661 : :
5662 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5663 : :
5664 : 4 : spdk_put_io_channel(io_ch);
5665 : 4 : spdk_bdev_close(desc);
5666 : 4 : free_bdev(bdev);
5667 : 4 : ut_fini_bdev();
5668 : 4 : }
5669 : :
5670 : : static void
5671 : 4 : bdev_unmap(void)
5672 : : {
5673 : : struct spdk_bdev *bdev;
5674 : 4 : struct spdk_bdev_desc *desc = NULL;
5675 : : struct spdk_io_channel *ioch;
5676 : : struct spdk_bdev_channel *bdev_ch;
5677 : : struct ut_expected_io *expected_io;
5678 : 4 : struct spdk_bdev_opts bdev_opts = {};
5679 : : uint32_t i, num_outstanding;
5680 : : uint64_t offset, num_blocks, max_unmap_blocks, num_children;
5681 : : int rc;
5682 : :
5683 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
5684 : 4 : bdev_opts.bdev_io_pool_size = 512;
5685 : 4 : bdev_opts.bdev_io_cache_size = 64;
5686 : 4 : ut_init_bdev(&bdev_opts);
5687 : :
5688 : 4 : bdev = allocate_bdev("bdev");
5689 : :
5690 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
5691 : 4 : CU_ASSERT_EQUAL(rc, 0);
5692 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5693 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5694 : 4 : ioch = spdk_bdev_get_io_channel(desc);
5695 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
5696 : 4 : bdev_ch = spdk_io_channel_get_ctx(ioch);
5697 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
5698 : :
5699 : 4 : fn_table.submit_request = stub_submit_request;
5700 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5701 : :
5702 : : /* Case 1: First test the request won't be split */
5703 : 4 : num_blocks = 32;
5704 : :
5705 : 4 : g_io_done = false;
5706 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0);
5707 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5708 : 4 : rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5709 : 4 : CU_ASSERT_EQUAL(rc, 0);
5710 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5711 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5712 : 4 : stub_complete_io(1);
5713 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5714 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5715 : :
5716 : : /* Case 2: Test the split with 2 children requests */
5717 : 4 : bdev->max_unmap = 8;
5718 : 4 : bdev->max_unmap_segments = 2;
5719 : 4 : max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments;
5720 : 4 : num_blocks = max_unmap_blocks * 2;
5721 : 4 : offset = 0;
5722 : :
5723 : 4 : g_io_done = false;
5724 [ + + ]: 12 : for (i = 0; i < 2; i++) {
5725 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0);
5726 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5727 : 8 : offset += max_unmap_blocks;
5728 : 2 : }
5729 : :
5730 : 4 : rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5731 : 4 : CU_ASSERT_EQUAL(rc, 0);
5732 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5733 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5734 : 4 : stub_complete_io(2);
5735 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5736 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5737 : :
5738 : : /* Case 3: Test the split with 15 children requests, will finish 8 requests first */
5739 : 4 : num_children = 15;
5740 : 4 : num_blocks = max_unmap_blocks * num_children;
5741 : 4 : g_io_done = false;
5742 : 4 : offset = 0;
5743 [ + + ]: 64 : for (i = 0; i < num_children; i++) {
5744 : 60 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0);
5745 : 60 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5746 : 60 : offset += max_unmap_blocks;
5747 : 15 : }
5748 : :
5749 : 4 : rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5750 : 4 : CU_ASSERT_EQUAL(rc, 0);
5751 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5752 : :
5753 [ + + ]: 12 : while (num_children > 0) {
5754 [ + + ]: 8 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS);
5755 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
5756 : 8 : stub_complete_io(num_outstanding);
5757 : 8 : num_children -= num_outstanding;
5758 : : }
5759 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5760 : :
5761 : 4 : spdk_put_io_channel(ioch);
5762 : 4 : spdk_bdev_close(desc);
5763 : 4 : free_bdev(bdev);
5764 : 4 : ut_fini_bdev();
5765 : 4 : }
5766 : :
5767 : : static void
5768 : 4 : bdev_write_zeroes_split_test(void)
5769 : : {
5770 : : struct spdk_bdev *bdev;
5771 : 4 : struct spdk_bdev_desc *desc = NULL;
5772 : : struct spdk_io_channel *ioch;
5773 : : struct spdk_bdev_channel *bdev_ch;
5774 : : struct ut_expected_io *expected_io;
5775 : 4 : struct spdk_bdev_opts bdev_opts = {};
5776 : : uint32_t i, num_outstanding;
5777 : : uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children;
5778 : : int rc;
5779 : :
5780 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
5781 : 4 : bdev_opts.bdev_io_pool_size = 512;
5782 : 4 : bdev_opts.bdev_io_cache_size = 64;
5783 : 4 : ut_init_bdev(&bdev_opts);
5784 : :
5785 : 4 : bdev = allocate_bdev("bdev");
5786 : :
5787 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
5788 : 4 : CU_ASSERT_EQUAL(rc, 0);
5789 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5790 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5791 : 4 : ioch = spdk_bdev_get_io_channel(desc);
5792 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
5793 : 4 : bdev_ch = spdk_io_channel_get_ctx(ioch);
5794 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
5795 : :
5796 : 4 : fn_table.submit_request = stub_submit_request;
5797 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5798 : :
5799 : : /* Case 1: First test the request won't be split */
5800 : 4 : num_blocks = 32;
5801 : :
5802 : 4 : g_io_done = false;
5803 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
5804 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5805 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5806 : 4 : CU_ASSERT_EQUAL(rc, 0);
5807 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5808 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5809 : 4 : stub_complete_io(1);
5810 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5811 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5812 : :
5813 : : /* Case 2: Test the split with 2 children requests */
5814 : 4 : max_write_zeroes_blocks = 8;
5815 : 4 : bdev->max_write_zeroes = max_write_zeroes_blocks;
5816 : 4 : num_blocks = max_write_zeroes_blocks * 2;
5817 : 4 : offset = 0;
5818 : :
5819 : 4 : g_io_done = false;
5820 [ + + ]: 12 : for (i = 0; i < 2; i++) {
5821 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks,
5822 : : 0);
5823 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5824 : 8 : offset += max_write_zeroes_blocks;
5825 : 2 : }
5826 : :
5827 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5828 : 4 : CU_ASSERT_EQUAL(rc, 0);
5829 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5830 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5831 : 4 : stub_complete_io(2);
5832 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5833 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5834 : :
5835 : : /* Case 3: Test the split with 15 children requests, will finish 8 requests first */
5836 : 4 : num_children = 15;
5837 : 4 : num_blocks = max_write_zeroes_blocks * num_children;
5838 : 4 : g_io_done = false;
5839 : 4 : offset = 0;
5840 [ + + ]: 64 : for (i = 0; i < num_children; i++) {
5841 : 60 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks,
5842 : : 0);
5843 : 60 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5844 : 60 : offset += max_write_zeroes_blocks;
5845 : 15 : }
5846 : :
5847 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5848 : 4 : CU_ASSERT_EQUAL(rc, 0);
5849 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5850 : :
5851 [ + + ]: 12 : while (num_children > 0) {
5852 [ + + ]: 8 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS);
5853 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
5854 : 8 : stub_complete_io(num_outstanding);
5855 : 8 : num_children -= num_outstanding;
5856 : : }
5857 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5858 : :
5859 : 4 : spdk_put_io_channel(ioch);
5860 : 4 : spdk_bdev_close(desc);
5861 : 4 : free_bdev(bdev);
5862 : 4 : ut_fini_bdev();
5863 : 4 : }
5864 : :
5865 : : static void
5866 : 4 : bdev_set_options_test(void)
5867 : : {
5868 : 4 : struct spdk_bdev_opts bdev_opts = {};
5869 : : int rc;
5870 : :
5871 : : /* Case1: Do not set opts_size */
5872 : 4 : rc = spdk_bdev_set_opts(&bdev_opts);
5873 : 4 : CU_ASSERT(rc == -1);
5874 : 4 : }
5875 : :
5876 : : static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d;
5877 : :
5878 : : static int
5879 : 12 : test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains,
5880 : : int array_size)
5881 : : {
5882 [ + + + + ]: 12 : if (array_size > 0 && domains) {
5883 : 4 : domains[0] = g_bdev_memory_domain;
5884 : 1 : }
5885 : :
5886 : 12 : return 1;
5887 : : }
5888 : :
5889 : : static void
5890 : 4 : bdev_get_memory_domains(void)
5891 : : {
5892 : 4 : struct spdk_bdev_fn_table fn_table = {
5893 : : .get_memory_domains = test_bdev_get_supported_dma_device_types_op
5894 : : };
5895 : 4 : struct spdk_bdev bdev = { .fn_table = &fn_table };
5896 : 4 : struct spdk_memory_domain *domains[2] = {};
5897 : : int rc;
5898 : :
5899 : : /* bdev is NULL */
5900 : 4 : rc = spdk_bdev_get_memory_domains(NULL, domains, 2);
5901 : 4 : CU_ASSERT(rc == -EINVAL);
5902 : :
5903 : : /* domains is NULL */
5904 : 4 : rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2);
5905 : 4 : CU_ASSERT(rc == 1);
5906 : :
5907 : : /* array size is 0 */
5908 : 4 : rc = spdk_bdev_get_memory_domains(&bdev, domains, 0);
5909 : 4 : CU_ASSERT(rc == 1);
5910 : :
5911 : : /* get_supported_dma_device_types op is set */
5912 : 4 : rc = spdk_bdev_get_memory_domains(&bdev, domains, 2);
5913 : 4 : CU_ASSERT(rc == 1);
5914 : 4 : CU_ASSERT(domains[0] == g_bdev_memory_domain);
5915 : :
5916 : : /* get_supported_dma_device_types op is not set */
5917 : 4 : fn_table.get_memory_domains = NULL;
5918 : 4 : rc = spdk_bdev_get_memory_domains(&bdev, domains, 2);
5919 : 4 : CU_ASSERT(rc == 0);
5920 : 4 : }
5921 : :
5922 : : static void
5923 : 8 : _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts)
5924 : : {
5925 : : struct spdk_bdev *bdev;
5926 : 8 : struct spdk_bdev_desc *desc = NULL;
5927 : : struct spdk_io_channel *io_ch;
5928 : 6 : char io_buf[512];
5929 : 8 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
5930 : : struct ut_expected_io *expected_io;
5931 : : int rc;
5932 : :
5933 : 8 : ut_init_bdev(NULL);
5934 : :
5935 : 8 : bdev = allocate_bdev("bdev0");
5936 : 8 : bdev->md_interleave = false;
5937 : 8 : bdev->md_len = 8;
5938 : :
5939 : 8 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5940 : 8 : CU_ASSERT(rc == 0);
5941 [ + + ]: 8 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5942 : 8 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5943 : 8 : io_ch = spdk_bdev_get_io_channel(desc);
5944 : 8 : CU_ASSERT(io_ch != NULL);
5945 : :
5946 : : /* read */
5947 : 8 : g_io_done = false;
5948 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
5949 [ + + ]: 8 : if (ext_io_opts) {
5950 : 4 : expected_io->md_buf = ext_io_opts->metadata;
5951 : 1 : }
5952 : 8 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
5953 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5954 : :
5955 : 8 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts);
5956 : :
5957 : 8 : CU_ASSERT(rc == 0);
5958 [ - + ]: 8 : CU_ASSERT(g_io_done == false);
5959 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5960 : 8 : stub_complete_io(1);
5961 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
5962 : :
5963 : : /* write */
5964 : 8 : g_io_done = false;
5965 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
5966 [ + + ]: 8 : if (ext_io_opts) {
5967 : 4 : expected_io->md_buf = ext_io_opts->metadata;
5968 : 1 : }
5969 : 8 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
5970 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5971 : :
5972 : 8 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts);
5973 : :
5974 : 8 : CU_ASSERT(rc == 0);
5975 [ - + ]: 8 : CU_ASSERT(g_io_done == false);
5976 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5977 : 8 : stub_complete_io(1);
5978 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
5979 : :
5980 : 8 : spdk_put_io_channel(io_ch);
5981 : 8 : spdk_bdev_close(desc);
5982 : 8 : free_bdev(bdev);
5983 : 8 : ut_fini_bdev();
5984 : :
5985 : 8 : }
5986 : :
5987 : : static void
5988 : 4 : bdev_io_ext(void)
5989 : : {
5990 : 4 : struct spdk_bdev_ext_io_opts ext_io_opts = {
5991 : : .metadata = (void *)0xFF000000,
5992 : : .size = sizeof(ext_io_opts),
5993 : : .dif_check_flags_exclude_mask = 0
5994 : : };
5995 : :
5996 : 4 : _bdev_io_ext(&ext_io_opts);
5997 : 4 : }
5998 : :
5999 : : static void
6000 : 4 : bdev_io_ext_no_opts(void)
6001 : : {
6002 : 4 : _bdev_io_ext(NULL);
6003 : 4 : }
6004 : :
6005 : : static void
6006 : 4 : bdev_io_ext_invalid_opts(void)
6007 : : {
6008 : : struct spdk_bdev *bdev;
6009 : 4 : struct spdk_bdev_desc *desc = NULL;
6010 : : struct spdk_io_channel *io_ch;
6011 : 3 : char io_buf[512];
6012 : 4 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
6013 : 4 : struct spdk_bdev_ext_io_opts ext_io_opts = {
6014 : : .metadata = (void *)0xFF000000,
6015 : : .size = sizeof(ext_io_opts),
6016 : : .dif_check_flags_exclude_mask = 0
6017 : : };
6018 : : int rc;
6019 : :
6020 : 4 : ut_init_bdev(NULL);
6021 : :
6022 : 4 : bdev = allocate_bdev("bdev0");
6023 : 4 : bdev->md_interleave = false;
6024 : 4 : bdev->md_len = 8;
6025 : :
6026 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6027 : 4 : CU_ASSERT(rc == 0);
6028 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6029 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6030 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
6031 : 4 : CU_ASSERT(io_ch != NULL);
6032 : :
6033 : : /* Test invalid ext_opts size */
6034 : 4 : ext_io_opts.size = 0;
6035 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6036 : 4 : CU_ASSERT(rc == -EINVAL);
6037 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6038 : 4 : CU_ASSERT(rc == -EINVAL);
6039 : :
6040 : 4 : ext_io_opts.size = sizeof(ext_io_opts) * 2;
6041 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6042 : 4 : CU_ASSERT(rc == -EINVAL);
6043 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6044 : 4 : CU_ASSERT(rc == -EINVAL);
6045 : :
6046 : 4 : ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) +
6047 : : sizeof(ext_io_opts.metadata) - 1;
6048 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6049 : 4 : CU_ASSERT(rc == -EINVAL);
6050 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6051 : 4 : CU_ASSERT(rc == -EINVAL);
6052 : :
6053 : 4 : spdk_put_io_channel(io_ch);
6054 : 4 : spdk_bdev_close(desc);
6055 : 4 : free_bdev(bdev);
6056 : 4 : ut_fini_bdev();
6057 : 4 : }
6058 : :
6059 : : static void
6060 : 4 : bdev_io_ext_split(void)
6061 : : {
6062 : : struct spdk_bdev *bdev;
6063 : 4 : struct spdk_bdev_desc *desc = NULL;
6064 : : struct spdk_io_channel *io_ch;
6065 : 3 : char io_buf[512];
6066 : 4 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
6067 : : struct ut_expected_io *expected_io;
6068 : 4 : struct spdk_bdev_ext_io_opts ext_io_opts = {
6069 : : .metadata = (void *)0xFF000000,
6070 : : .size = sizeof(ext_io_opts),
6071 : : .dif_check_flags_exclude_mask = 0
6072 : : };
6073 : : int rc;
6074 : :
6075 : 4 : ut_init_bdev(NULL);
6076 : :
6077 : 4 : bdev = allocate_bdev("bdev0");
6078 : 4 : bdev->md_interleave = false;
6079 : 4 : bdev->md_len = 8;
6080 : :
6081 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6082 : 4 : CU_ASSERT(rc == 0);
6083 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6084 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6085 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
6086 : 4 : CU_ASSERT(io_ch != NULL);
6087 : :
6088 : : /* Check that IO request with ext_opts and metadata is split correctly
6089 : : * Offset 14, length 8, payload 0xF000
6090 : : * Child - Offset 14, length 2, payload 0xF000
6091 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
6092 : : */
6093 : 4 : bdev->optimal_io_boundary = 16;
6094 : 4 : bdev->split_on_optimal_io_boundary = true;
6095 : 4 : bdev->md_interleave = false;
6096 : 4 : bdev->md_len = 8;
6097 : :
6098 : 4 : iov.iov_base = (void *)0xF000;
6099 : 4 : iov.iov_len = 4096;
6100 [ - + ]: 4 : memset(&ext_io_opts, 0, sizeof(ext_io_opts));
6101 : 4 : ext_io_opts.metadata = (void *)0xFF000000;
6102 : 4 : ext_io_opts.size = sizeof(ext_io_opts);
6103 : 4 : g_io_done = false;
6104 : :
6105 : : /* read */
6106 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
6107 : 4 : expected_io->md_buf = ext_io_opts.metadata;
6108 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
6109 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6110 : :
6111 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
6112 : 4 : expected_io->md_buf = ext_io_opts.metadata + 2 * 8;
6113 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
6114 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6115 : :
6116 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts);
6117 : 4 : CU_ASSERT(rc == 0);
6118 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6119 : :
6120 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
6121 : 4 : stub_complete_io(2);
6122 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6123 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6124 : :
6125 : : /* write */
6126 : 4 : g_io_done = false;
6127 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
6128 : 4 : expected_io->md_buf = ext_io_opts.metadata;
6129 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
6130 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6131 : :
6132 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1);
6133 : 4 : expected_io->md_buf = ext_io_opts.metadata + 2 * 8;
6134 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
6135 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6136 : :
6137 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts);
6138 : 4 : CU_ASSERT(rc == 0);
6139 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6140 : :
6141 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
6142 : 4 : stub_complete_io(2);
6143 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6144 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6145 : :
6146 : 4 : spdk_put_io_channel(io_ch);
6147 : 4 : spdk_bdev_close(desc);
6148 : 4 : free_bdev(bdev);
6149 : 4 : ut_fini_bdev();
6150 : 4 : }
6151 : :
6152 : : static void
6153 : 4 : bdev_io_ext_bounce_buffer(void)
6154 : : {
6155 : : struct spdk_bdev *bdev;
6156 : 4 : struct spdk_bdev_desc *desc = NULL;
6157 : : struct spdk_io_channel *io_ch;
6158 : 3 : char io_buf[512];
6159 : 4 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
6160 : : struct ut_expected_io *expected_io, *aux_io;
6161 : 4 : struct spdk_bdev_ext_io_opts ext_io_opts = {
6162 : : .metadata = (void *)0xFF000000,
6163 : : .size = sizeof(ext_io_opts),
6164 : : .dif_check_flags_exclude_mask = 0
6165 : : };
6166 : : int rc;
6167 : :
6168 : 4 : ut_init_bdev(NULL);
6169 : :
6170 : 4 : bdev = allocate_bdev("bdev0");
6171 : 4 : bdev->md_interleave = false;
6172 : 4 : bdev->md_len = 8;
6173 : :
6174 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6175 : 4 : CU_ASSERT(rc == 0);
6176 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6177 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6178 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
6179 : 4 : CU_ASSERT(io_ch != NULL);
6180 : :
6181 : : /* Verify data pull/push
6182 : : * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */
6183 : 4 : ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef;
6184 : :
6185 : : /* read */
6186 : 4 : g_io_done = false;
6187 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
6188 : 4 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6189 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6190 : :
6191 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6192 : :
6193 : 4 : CU_ASSERT(rc == 0);
6194 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6195 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6196 : 4 : stub_complete_io(1);
6197 [ - + ]: 4 : CU_ASSERT(g_memory_domain_push_data_called == true);
6198 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6199 : :
6200 : : /* write */
6201 : 4 : g_io_done = false;
6202 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6203 : 4 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6204 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6205 : :
6206 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6207 : :
6208 : 4 : CU_ASSERT(rc == 0);
6209 [ - + ]: 4 : CU_ASSERT(g_memory_domain_pull_data_called == true);
6210 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6211 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6212 : 4 : stub_complete_io(1);
6213 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6214 : :
6215 : : /* Verify the request is queued after receiving ENOMEM from pull */
6216 : 4 : g_io_done = false;
6217 : 4 : aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6218 : 4 : ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len);
6219 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link);
6220 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL);
6221 : 4 : CU_ASSERT(rc == 0);
6222 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6223 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6224 : :
6225 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6226 : 4 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6227 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6228 : :
6229 : 4 : MOCK_SET(spdk_memory_domain_pull_data, -ENOMEM);
6230 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6231 : 4 : CU_ASSERT(rc == 0);
6232 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6233 : : /* The second IO has been queued */
6234 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6235 : :
6236 [ - + - + ]: 4 : MOCK_CLEAR(spdk_memory_domain_pull_data);
6237 : 4 : g_memory_domain_pull_data_called = false;
6238 : 4 : stub_complete_io(1);
6239 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6240 [ - + ]: 4 : CU_ASSERT(g_memory_domain_pull_data_called == true);
6241 : : /* The second IO should be submitted now */
6242 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6243 : 4 : g_io_done = false;
6244 : 4 : stub_complete_io(1);
6245 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6246 : :
6247 : : /* Verify the request is queued after receiving ENOMEM from push */
6248 : 4 : g_io_done = false;
6249 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
6250 : 4 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6251 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6252 : :
6253 : 4 : MOCK_SET(spdk_memory_domain_push_data, -ENOMEM);
6254 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6255 : 4 : CU_ASSERT(rc == 0);
6256 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6257 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6258 : :
6259 : 4 : aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6260 : 4 : ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len);
6261 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link);
6262 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL);
6263 : 4 : CU_ASSERT(rc == 0);
6264 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
6265 : :
6266 : 4 : stub_complete_io(1);
6267 : : /* The IO isn't done yet, it's still waiting on push */
6268 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6269 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6270 [ - + - + ]: 4 : MOCK_CLEAR(spdk_memory_domain_push_data);
6271 : 4 : g_memory_domain_push_data_called = false;
6272 : : /* Completing the second IO should also trigger push on the first one */
6273 : 4 : stub_complete_io(1);
6274 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6275 [ - + ]: 4 : CU_ASSERT(g_memory_domain_push_data_called == true);
6276 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6277 : :
6278 : 4 : spdk_put_io_channel(io_ch);
6279 : 4 : spdk_bdev_close(desc);
6280 : 4 : free_bdev(bdev);
6281 : 4 : ut_fini_bdev();
6282 : 4 : }
6283 : :
6284 : : static void
6285 : 4 : bdev_register_uuid_alias(void)
6286 : : {
6287 : : struct spdk_bdev *bdev, *second;
6288 : 3 : char uuid[SPDK_UUID_STRING_LEN];
6289 : : int rc;
6290 : :
6291 : 4 : ut_init_bdev(NULL);
6292 : 4 : bdev = allocate_bdev("bdev0");
6293 : :
6294 : : /* Make sure an UUID was generated */
6295 : 4 : CU_ASSERT_FALSE(spdk_uuid_is_null(&bdev->uuid));
6296 : :
6297 : : /* Check that an UUID alias was registered */
6298 : 4 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
6299 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6300 : :
6301 : : /* Unregister the bdev */
6302 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
6303 : 4 : poll_threads();
6304 : 4 : CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid));
6305 : :
6306 : : /* Check the same, but this time register the bdev with non-zero UUID */
6307 : 4 : rc = spdk_bdev_register(bdev);
6308 : 4 : CU_ASSERT_EQUAL(rc, 0);
6309 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6310 : :
6311 : : /* Unregister the bdev */
6312 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
6313 : 4 : poll_threads();
6314 : 4 : CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid));
6315 : :
6316 : : /* Register the bdev using UUID as the name */
6317 : 4 : bdev->name = uuid;
6318 : 4 : rc = spdk_bdev_register(bdev);
6319 : 4 : CU_ASSERT_EQUAL(rc, 0);
6320 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6321 : :
6322 : : /* Unregister the bdev */
6323 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
6324 : 4 : poll_threads();
6325 : 4 : CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid));
6326 : :
6327 : : /* Check that it's not possible to register two bdevs with the same UUIDs */
6328 : 4 : bdev->name = "bdev0";
6329 : 4 : second = allocate_bdev("bdev1");
6330 : 4 : spdk_uuid_copy(&bdev->uuid, &second->uuid);
6331 : 4 : rc = spdk_bdev_register(bdev);
6332 : 4 : CU_ASSERT_EQUAL(rc, -EEXIST);
6333 : :
6334 : : /* Regenerate the UUID and re-check */
6335 : 4 : spdk_uuid_generate(&bdev->uuid);
6336 : 4 : rc = spdk_bdev_register(bdev);
6337 : 4 : CU_ASSERT_EQUAL(rc, 0);
6338 : :
6339 : : /* And check that both bdevs can be retrieved through their UUIDs */
6340 : 4 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
6341 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6342 : 4 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid);
6343 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second);
6344 : :
6345 : 4 : free_bdev(second);
6346 : 4 : free_bdev(bdev);
6347 : 4 : ut_fini_bdev();
6348 : 4 : }
6349 : :
6350 : : static void
6351 : 4 : bdev_unregister_by_name(void)
6352 : : {
6353 : : struct spdk_bdev *bdev;
6354 : : int rc;
6355 : :
6356 : 4 : bdev = allocate_bdev("bdev");
6357 : :
6358 : 4 : g_event_type1 = 0xFF;
6359 : 4 : g_unregister_arg = NULL;
6360 : 4 : g_unregister_rc = -1;
6361 : :
6362 : 4 : rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678);
6363 : 4 : CU_ASSERT(rc == -ENODEV);
6364 : :
6365 : 4 : rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678);
6366 : 4 : CU_ASSERT(rc == -ENODEV);
6367 : :
6368 : 4 : rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678);
6369 : 4 : CU_ASSERT(rc == 0);
6370 : :
6371 : : /* Check that unregister callback is delayed */
6372 : 4 : CU_ASSERT(g_unregister_arg == NULL);
6373 : 4 : CU_ASSERT(g_unregister_rc == -1);
6374 : :
6375 : 4 : poll_threads();
6376 : :
6377 : : /* Event callback shall not be issued because device was closed */
6378 : 4 : CU_ASSERT(g_event_type1 == 0xFF);
6379 : : /* Unregister callback is issued */
6380 : 4 : CU_ASSERT(g_unregister_arg == (void *)0x12345678);
6381 : 4 : CU_ASSERT(g_unregister_rc == 0);
6382 : :
6383 : 4 : free_bdev(bdev);
6384 : 4 : }
6385 : :
6386 : : static int
6387 : 44 : count_bdevs(void *ctx, struct spdk_bdev *bdev)
6388 : : {
6389 : 44 : int *count = ctx;
6390 : :
6391 : 44 : (*count)++;
6392 : :
6393 : 44 : return 0;
6394 : : }
6395 : :
6396 : : static void
6397 : 4 : for_each_bdev_test(void)
6398 : : {
6399 : : struct spdk_bdev *bdev[8];
6400 : 3 : int rc, count;
6401 : :
6402 : 4 : bdev[0] = allocate_bdev("bdev0");
6403 : 4 : bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING;
6404 : :
6405 : 4 : bdev[1] = allocate_bdev("bdev1");
6406 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
6407 : 4 : CU_ASSERT(rc == 0);
6408 : :
6409 : 4 : bdev[2] = allocate_bdev("bdev2");
6410 : :
6411 : 4 : bdev[3] = allocate_bdev("bdev3");
6412 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
6413 : 4 : CU_ASSERT(rc == 0);
6414 : :
6415 : 4 : bdev[4] = allocate_bdev("bdev4");
6416 : :
6417 : 4 : bdev[5] = allocate_bdev("bdev5");
6418 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
6419 : 4 : CU_ASSERT(rc == 0);
6420 : :
6421 : 4 : bdev[6] = allocate_bdev("bdev6");
6422 : :
6423 : 4 : bdev[7] = allocate_bdev("bdev7");
6424 : :
6425 : 4 : count = 0;
6426 : 4 : rc = spdk_for_each_bdev(&count, count_bdevs);
6427 : 4 : CU_ASSERT(rc == 0);
6428 : 4 : CU_ASSERT(count == 7);
6429 : :
6430 : 4 : count = 0;
6431 : 4 : rc = spdk_for_each_bdev_leaf(&count, count_bdevs);
6432 : 4 : CU_ASSERT(rc == 0);
6433 : 4 : CU_ASSERT(count == 4);
6434 : :
6435 : 4 : bdev[0]->internal.status = SPDK_BDEV_STATUS_READY;
6436 : 4 : free_bdev(bdev[0]);
6437 : 4 : free_bdev(bdev[1]);
6438 : 4 : free_bdev(bdev[2]);
6439 : 4 : free_bdev(bdev[3]);
6440 : 4 : free_bdev(bdev[4]);
6441 : 4 : free_bdev(bdev[5]);
6442 : 4 : free_bdev(bdev[6]);
6443 : 4 : free_bdev(bdev[7]);
6444 : 4 : }
6445 : :
6446 : : static void
6447 : 4 : bdev_seek_test(void)
6448 : : {
6449 : : struct spdk_bdev *bdev;
6450 : 4 : struct spdk_bdev_desc *desc = NULL;
6451 : : struct spdk_io_channel *io_ch;
6452 : : int rc;
6453 : :
6454 : 4 : ut_init_bdev(NULL);
6455 : 4 : poll_threads();
6456 : :
6457 : 4 : bdev = allocate_bdev("bdev0");
6458 : :
6459 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6460 : 4 : CU_ASSERT(rc == 0);
6461 : 4 : poll_threads();
6462 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6463 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6464 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
6465 : 4 : CU_ASSERT(io_ch != NULL);
6466 : :
6467 : : /* Seek data not supported */
6468 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false);
6469 : 4 : rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL);
6470 : 4 : CU_ASSERT(rc == 0);
6471 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6472 : 4 : poll_threads();
6473 : 4 : CU_ASSERT(g_seek_offset == 0);
6474 : :
6475 : : /* Seek hole not supported */
6476 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false);
6477 : 4 : rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL);
6478 : 4 : CU_ASSERT(rc == 0);
6479 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6480 : 4 : poll_threads();
6481 : 4 : CU_ASSERT(g_seek_offset == UINT64_MAX);
6482 : :
6483 : : /* Seek data supported */
6484 : 4 : g_seek_data_offset = 12345;
6485 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true);
6486 : 4 : rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL);
6487 : 4 : CU_ASSERT(rc == 0);
6488 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6489 : 4 : stub_complete_io(1);
6490 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6491 : 4 : CU_ASSERT(g_seek_offset == 12345);
6492 : :
6493 : : /* Seek hole supported */
6494 : 4 : g_seek_hole_offset = 67890;
6495 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true);
6496 : 4 : rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL);
6497 : 4 : CU_ASSERT(rc == 0);
6498 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6499 : 4 : stub_complete_io(1);
6500 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6501 : 4 : CU_ASSERT(g_seek_offset == 67890);
6502 : :
6503 : 4 : spdk_put_io_channel(io_ch);
6504 : 4 : spdk_bdev_close(desc);
6505 : 4 : free_bdev(bdev);
6506 : 4 : ut_fini_bdev();
6507 : 4 : }
6508 : :
6509 : : static void
6510 : 4 : bdev_copy(void)
6511 : : {
6512 : : struct spdk_bdev *bdev;
6513 : 4 : struct spdk_bdev_desc *desc = NULL;
6514 : : struct spdk_io_channel *ioch;
6515 : : struct ut_expected_io *expected_io;
6516 : : uint64_t src_offset, num_blocks;
6517 : : uint32_t num_completed;
6518 : : int rc;
6519 : :
6520 : 4 : ut_init_bdev(NULL);
6521 : 4 : bdev = allocate_bdev("bdev");
6522 : :
6523 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
6524 : 4 : CU_ASSERT_EQUAL(rc, 0);
6525 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6526 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6527 : 4 : ioch = spdk_bdev_get_io_channel(desc);
6528 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
6529 : :
6530 : 4 : fn_table.submit_request = stub_submit_request;
6531 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
6532 : :
6533 : : /* First test that if the bdev supports copy, the request won't be split */
6534 : 4 : bdev->md_len = 0;
6535 : 4 : bdev->blocklen = 512;
6536 : 4 : num_blocks = 128;
6537 : 4 : src_offset = bdev->blockcnt - num_blocks;
6538 : :
6539 : 4 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks);
6540 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6541 : :
6542 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6543 : 4 : CU_ASSERT_EQUAL(rc, 0);
6544 : 4 : num_completed = stub_complete_io(1);
6545 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
6546 : :
6547 : : /* Check that if copy is not supported it'll still work */
6548 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0);
6549 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6550 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0);
6551 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6552 : :
6553 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false);
6554 : :
6555 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6556 : 4 : CU_ASSERT_EQUAL(rc, 0);
6557 : 4 : num_completed = stub_complete_io(1);
6558 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
6559 : 4 : num_completed = stub_complete_io(1);
6560 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
6561 : :
6562 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true);
6563 : 4 : spdk_put_io_channel(ioch);
6564 : 4 : spdk_bdev_close(desc);
6565 : 4 : free_bdev(bdev);
6566 : 4 : ut_fini_bdev();
6567 : 4 : }
6568 : :
6569 : : static void
6570 : 4 : bdev_copy_split_test(void)
6571 : : {
6572 : : struct spdk_bdev *bdev;
6573 : 4 : struct spdk_bdev_desc *desc = NULL;
6574 : : struct spdk_io_channel *ioch;
6575 : : struct spdk_bdev_channel *bdev_ch;
6576 : : struct ut_expected_io *expected_io;
6577 : 4 : struct spdk_bdev_opts bdev_opts = {};
6578 : : uint32_t i, num_outstanding;
6579 : : uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children;
6580 : : int rc;
6581 : :
6582 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
6583 : 4 : bdev_opts.bdev_io_pool_size = 512;
6584 : 4 : bdev_opts.bdev_io_cache_size = 64;
6585 : 4 : rc = spdk_bdev_set_opts(&bdev_opts);
6586 : 4 : CU_ASSERT(rc == 0);
6587 : :
6588 : 4 : ut_init_bdev(NULL);
6589 : 4 : bdev = allocate_bdev("bdev");
6590 : :
6591 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
6592 : 4 : CU_ASSERT_EQUAL(rc, 0);
6593 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6594 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6595 : 4 : ioch = spdk_bdev_get_io_channel(desc);
6596 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
6597 : 4 : bdev_ch = spdk_io_channel_get_ctx(ioch);
6598 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
6599 : :
6600 : 4 : fn_table.submit_request = stub_submit_request;
6601 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
6602 : :
6603 : : /* Case 1: First test the request won't be split */
6604 : 4 : num_blocks = 32;
6605 : 4 : src_offset = bdev->blockcnt - num_blocks;
6606 : :
6607 : 4 : g_io_done = false;
6608 : 4 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks);
6609 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6610 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6611 : 4 : CU_ASSERT_EQUAL(rc, 0);
6612 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6613 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6614 : 4 : stub_complete_io(1);
6615 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6616 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6617 : :
6618 : : /* Case 2: Test the split with 2 children requests */
6619 : 4 : max_copy_blocks = 8;
6620 : 4 : bdev->max_copy = max_copy_blocks;
6621 : 4 : num_children = 2;
6622 : 4 : num_blocks = max_copy_blocks * num_children;
6623 : 4 : offset = 0;
6624 : 4 : src_offset = bdev->blockcnt - num_blocks;
6625 : :
6626 : 4 : g_io_done = false;
6627 [ + + ]: 12 : for (i = 0; i < num_children; i++) {
6628 : 10 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset,
6629 : 2 : src_offset + offset, max_copy_blocks);
6630 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6631 : 8 : offset += max_copy_blocks;
6632 : 2 : }
6633 : :
6634 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6635 : 4 : CU_ASSERT_EQUAL(rc, 0);
6636 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6637 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children);
6638 : 4 : stub_complete_io(num_children);
6639 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6640 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6641 : :
6642 : : /* Case 3: Test the split with 15 children requests, will finish 8 requests first */
6643 : 4 : num_children = 15;
6644 : 4 : num_blocks = max_copy_blocks * num_children;
6645 : 4 : offset = 0;
6646 : 4 : src_offset = bdev->blockcnt - num_blocks;
6647 : :
6648 : 4 : g_io_done = false;
6649 [ + + ]: 64 : for (i = 0; i < num_children; i++) {
6650 : 75 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset,
6651 : 15 : src_offset + offset, max_copy_blocks);
6652 : 60 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6653 : 60 : offset += max_copy_blocks;
6654 : 15 : }
6655 : :
6656 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6657 : 4 : CU_ASSERT_EQUAL(rc, 0);
6658 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6659 : :
6660 [ + + ]: 12 : while (num_children > 0) {
6661 [ + + ]: 8 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS);
6662 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
6663 : 8 : stub_complete_io(num_outstanding);
6664 : 8 : num_children -= num_outstanding;
6665 : : }
6666 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6667 : :
6668 : : /* Case 4: Same test scenario as the case 2 but the configuration is different.
6669 : : * Copy is not supported.
6670 : : */
6671 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false);
6672 : :
6673 : 4 : num_children = 2;
6674 : 4 : max_copy_blocks = spdk_bdev_get_max_copy(bdev);
6675 : 4 : num_blocks = max_copy_blocks * num_children;
6676 : 4 : src_offset = bdev->blockcnt - num_blocks;
6677 : 4 : offset = 0;
6678 : :
6679 : 4 : g_io_done = false;
6680 [ + + ]: 12 : for (i = 0; i < num_children; i++) {
6681 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset,
6682 : 2 : max_copy_blocks, 0);
6683 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6684 : 8 : src_offset += max_copy_blocks;
6685 : 2 : }
6686 [ + + ]: 12 : for (i = 0; i < num_children; i++) {
6687 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset,
6688 : 2 : max_copy_blocks, 0);
6689 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6690 : 8 : offset += max_copy_blocks;
6691 : 2 : }
6692 : :
6693 : 4 : src_offset = bdev->blockcnt - num_blocks;
6694 : 4 : offset = 0;
6695 : :
6696 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, offset, src_offset, num_blocks, io_done, NULL);
6697 : 4 : CU_ASSERT_EQUAL(rc, 0);
6698 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6699 : :
6700 [ + + ]: 8 : while (num_children > 0) {
6701 [ + - ]: 4 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS);
6702 : :
6703 : : /* One copy request is split into one read and one write requests. */
6704 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
6705 : 4 : stub_complete_io(num_outstanding);
6706 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
6707 : 4 : stub_complete_io(num_outstanding);
6708 : :
6709 : 4 : num_children -= num_outstanding;
6710 : : }
6711 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6712 : :
6713 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true);
6714 : :
6715 : 4 : spdk_put_io_channel(ioch);
6716 : 4 : spdk_bdev_close(desc);
6717 : 4 : free_bdev(bdev);
6718 : 4 : ut_fini_bdev();
6719 : 4 : }
6720 : :
6721 : : static void
6722 : 4 : examine_claim_v1(struct spdk_bdev *bdev)
6723 : : {
6724 : : int rc;
6725 : :
6726 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if);
6727 : 4 : CU_ASSERT(rc == 0);
6728 : 4 : }
6729 : :
6730 : : static void
6731 : 16 : examine_no_lock_held(struct spdk_bdev *bdev)
6732 : : {
6733 : 16 : CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock));
6734 : 16 : CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock));
6735 : 16 : }
6736 : :
6737 : : struct examine_claim_v2_ctx {
6738 : : struct ut_examine_ctx examine_ctx;
6739 : : enum spdk_bdev_claim_type claim_type;
6740 : : struct spdk_bdev_desc *desc;
6741 : : };
6742 : :
6743 : : static void
6744 : 4 : examine_claim_v2(struct spdk_bdev *bdev)
6745 : : {
6746 : 4 : struct examine_claim_v2_ctx *ctx = bdev->ctxt;
6747 : : int rc;
6748 : :
6749 : 4 : rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc);
6750 : 4 : CU_ASSERT(rc == 0);
6751 : :
6752 : 4 : rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if);
6753 : 4 : CU_ASSERT(rc == 0);
6754 : 4 : }
6755 : :
6756 : : static void
6757 : 4 : examine_locks(void)
6758 : : {
6759 : : struct spdk_bdev *bdev;
6760 : 4 : struct ut_examine_ctx ctx = { 0 };
6761 : 3 : struct examine_claim_v2_ctx v2_ctx;
6762 : :
6763 : : /* Without any claims, one code path is taken */
6764 : 4 : ctx.examine_config = examine_no_lock_held;
6765 : 4 : ctx.examine_disk = examine_no_lock_held;
6766 : 4 : bdev = allocate_bdev_ctx("bdev0", &ctx);
6767 : 4 : CU_ASSERT(ctx.examine_config_count == 1);
6768 : 4 : CU_ASSERT(ctx.examine_disk_count == 1);
6769 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6770 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == NULL);
6771 : 4 : free_bdev(bdev);
6772 : :
6773 : : /* Exercise another path that is taken when examine_config() takes a v1 claim. */
6774 [ - + ]: 4 : memset(&ctx, 0, sizeof(ctx));
6775 : 4 : ctx.examine_config = examine_claim_v1;
6776 : 4 : ctx.examine_disk = examine_no_lock_held;
6777 : 4 : bdev = allocate_bdev_ctx("bdev0", &ctx);
6778 : 4 : CU_ASSERT(ctx.examine_config_count == 1);
6779 : 4 : CU_ASSERT(ctx.examine_disk_count == 1);
6780 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
6781 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if);
6782 : 4 : spdk_bdev_module_release_bdev(bdev);
6783 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6784 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == NULL);
6785 : 4 : free_bdev(bdev);
6786 : :
6787 : : /* Exercise the final path that comes with v2 claims. */
6788 [ - + ]: 4 : memset(&v2_ctx, 0, sizeof(v2_ctx));
6789 : 4 : v2_ctx.examine_ctx.examine_config = examine_claim_v2;
6790 : 4 : v2_ctx.examine_ctx.examine_disk = examine_no_lock_held;
6791 : 4 : v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
6792 : 4 : bdev = allocate_bdev_ctx("bdev0", &v2_ctx);
6793 : 4 : CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1);
6794 : 4 : CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1);
6795 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6796 : 4 : spdk_bdev_close(v2_ctx.desc);
6797 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6798 : 4 : free_bdev(bdev);
6799 : 4 : }
6800 : :
6801 : : #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \
6802 : : do { \
6803 : : uint32_t len = 0; \
6804 : : struct spdk_bdev_module_claim *claim; \
6805 : : TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \
6806 : : len++; \
6807 : : } \
6808 : : CU_ASSERT(len == expect); \
6809 : : } while (0)
6810 : :
6811 : : static void
6812 : 4 : claim_v2_rwo(void)
6813 : : {
6814 : : struct spdk_bdev *bdev;
6815 : 3 : struct spdk_bdev_desc *desc;
6816 : 3 : struct spdk_bdev_desc *desc2;
6817 : 3 : struct spdk_bdev_claim_opts opts;
6818 : : int rc;
6819 : :
6820 : 4 : bdev = allocate_bdev("bdev0");
6821 : :
6822 : : /* Claim without options */
6823 : 4 : desc = NULL;
6824 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6825 : 4 : CU_ASSERT(rc == 0);
6826 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6827 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6828 : : &bdev_ut_if);
6829 : 4 : CU_ASSERT(rc == 0);
6830 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
6831 : 4 : CU_ASSERT(desc->claim != NULL);
6832 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6833 : 4 : CU_ASSERT(strcmp(desc->claim->name, "") == 0);
6834 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6835 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6836 : :
6837 : : /* Release the claim by closing the descriptor */
6838 : 4 : spdk_bdev_close(desc);
6839 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6840 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
6841 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6842 : :
6843 : : /* Claim with options */
6844 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6845 : 4 : snprintf(opts.name, sizeof(opts.name), "%s", "claim with options");
6846 : 4 : desc = NULL;
6847 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6848 : 4 : CU_ASSERT(rc == 0);
6849 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6850 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts,
6851 : : &bdev_ut_if);
6852 : 4 : CU_ASSERT(rc == 0);
6853 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
6854 : 4 : CU_ASSERT(desc->claim != NULL);
6855 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6856 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6857 : 4 : memset(&opts, 0, sizeof(opts));
6858 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6859 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6860 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6861 : :
6862 : : /* The claim blocks new writers. */
6863 : 4 : desc2 = NULL;
6864 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
6865 : 4 : CU_ASSERT(rc == -EPERM);
6866 : 4 : CU_ASSERT(desc2 == NULL);
6867 : :
6868 : : /* New readers are allowed */
6869 : 4 : desc2 = NULL;
6870 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2);
6871 : 4 : CU_ASSERT(rc == 0);
6872 : 4 : CU_ASSERT(desc2 != NULL);
6873 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6874 : :
6875 : : /* No new v2 RWO claims are allowed */
6876 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6877 : : &bdev_ut_if);
6878 : 4 : CU_ASSERT(rc == -EPERM);
6879 : :
6880 : : /* No new v2 ROM claims are allowed */
6881 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6882 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
6883 : : &bdev_ut_if);
6884 : 4 : CU_ASSERT(rc == -EPERM);
6885 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6886 : :
6887 : : /* No new v2 RWM claims are allowed */
6888 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6889 : 4 : opts.shared_claim_key = (uint64_t)&opts;
6890 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
6891 : : &bdev_ut_if);
6892 : 4 : CU_ASSERT(rc == -EPERM);
6893 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6894 : :
6895 : : /* No new v1 claims are allowed */
6896 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
6897 : 4 : CU_ASSERT(rc == -EPERM);
6898 : :
6899 : : /* None of the above changed the existing claim */
6900 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6901 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6902 : :
6903 : : /* Closing the first descriptor now allows a new claim and it is promoted to rw. */
6904 : 4 : spdk_bdev_close(desc);
6905 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6906 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6907 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6908 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6909 : : &bdev_ut_if);
6910 : 4 : CU_ASSERT(rc == 0);
6911 : 4 : CU_ASSERT(desc2->claim != NULL);
6912 [ - + ]: 4 : CU_ASSERT(desc2->write);
6913 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
6914 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim);
6915 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6916 : 4 : spdk_bdev_close(desc2);
6917 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6918 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6919 : :
6920 : : /* Cannot claim with a key */
6921 : 4 : desc = NULL;
6922 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6923 : 4 : CU_ASSERT(rc == 0);
6924 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6925 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6926 : 4 : opts.shared_claim_key = (uint64_t)&opts;
6927 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts,
6928 : : &bdev_ut_if);
6929 : 4 : CU_ASSERT(rc == -EINVAL);
6930 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6931 [ + + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6932 : 4 : spdk_bdev_close(desc);
6933 : :
6934 : : /* Clean up */
6935 : 4 : free_bdev(bdev);
6936 : 4 : }
6937 : :
6938 : : static void
6939 : 4 : claim_v2_rom(void)
6940 : : {
6941 : : struct spdk_bdev *bdev;
6942 : 3 : struct spdk_bdev_desc *desc;
6943 : 3 : struct spdk_bdev_desc *desc2;
6944 : 3 : struct spdk_bdev_claim_opts opts;
6945 : : int rc;
6946 : :
6947 : 4 : bdev = allocate_bdev("bdev0");
6948 : :
6949 : : /* Claim without options */
6950 : 4 : desc = NULL;
6951 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6952 : 4 : CU_ASSERT(rc == 0);
6953 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6954 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
6955 : : &bdev_ut_if);
6956 : 4 : CU_ASSERT(rc == 0);
6957 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6958 : 4 : CU_ASSERT(desc->claim != NULL);
6959 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6960 : 4 : CU_ASSERT(strcmp(desc->claim->name, "") == 0);
6961 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6962 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6963 : :
6964 : : /* Release the claim by closing the descriptor */
6965 : 4 : spdk_bdev_close(desc);
6966 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6967 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
6968 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6969 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6970 : :
6971 : : /* Claim with options */
6972 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6973 : 4 : snprintf(opts.name, sizeof(opts.name), "%s", "claim with options");
6974 : 4 : desc = NULL;
6975 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6976 : 4 : CU_ASSERT(rc == 0);
6977 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6978 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts,
6979 : : &bdev_ut_if);
6980 : 4 : CU_ASSERT(rc == 0);
6981 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6982 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc->claim != NULL);
6983 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6984 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6985 : 4 : memset(&opts, 0, sizeof(opts));
6986 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6987 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6988 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6989 : :
6990 : : /* The claim blocks new writers. */
6991 : 4 : desc2 = NULL;
6992 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
6993 : 4 : CU_ASSERT(rc == -EPERM);
6994 : 4 : CU_ASSERT(desc2 == NULL);
6995 : :
6996 : : /* New readers are allowed */
6997 : 4 : desc2 = NULL;
6998 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2);
6999 : 4 : CU_ASSERT(rc == 0);
7000 : 4 : CU_ASSERT(desc2 != NULL);
7001 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7002 : :
7003 : : /* No new v2 RWO claims are allowed */
7004 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
7005 : : &bdev_ut_if);
7006 : 4 : CU_ASSERT(rc == -EPERM);
7007 : :
7008 : : /* No new v2 RWM claims are allowed */
7009 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7010 : 4 : opts.shared_claim_key = (uint64_t)&opts;
7011 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7012 : : &bdev_ut_if);
7013 : 4 : CU_ASSERT(rc == -EPERM);
7014 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7015 : :
7016 : : /* No new v1 claims are allowed */
7017 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7018 : 4 : CU_ASSERT(rc == -EPERM);
7019 : :
7020 : : /* None of the above messed up the existing claim */
7021 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
7022 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7023 : :
7024 : : /* New v2 ROM claims are allowed and the descriptor stays read-only. */
7025 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7026 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
7027 : : &bdev_ut_if);
7028 : 4 : CU_ASSERT(rc == 0);
7029 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7030 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
7031 : 4 : CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim);
7032 [ + + ]: 12 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 2);
7033 : :
7034 : : /* Claim remains when closing the first descriptor */
7035 : 4 : spdk_bdev_close(desc);
7036 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
7037 : 4 : CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs));
7038 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim);
7039 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7040 : :
7041 : : /* Claim removed when closing the other descriptor */
7042 : 4 : spdk_bdev_close(desc2);
7043 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7044 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
7045 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
7046 : :
7047 : : /* Cannot claim with a key */
7048 : 4 : desc = NULL;
7049 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
7050 : 4 : CU_ASSERT(rc == 0);
7051 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7052 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7053 : 4 : opts.shared_claim_key = (uint64_t)&opts;
7054 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts,
7055 : : &bdev_ut_if);
7056 : 4 : CU_ASSERT(rc == -EINVAL);
7057 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7058 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
7059 : 4 : spdk_bdev_close(desc);
7060 : :
7061 : : /* Cannot claim with a read-write descriptor */
7062 : 4 : desc = NULL;
7063 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
7064 : 4 : CU_ASSERT(rc == 0);
7065 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7066 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
7067 : : &bdev_ut_if);
7068 : 4 : CU_ASSERT(rc == -EINVAL);
7069 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7070 [ + + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
7071 : 4 : spdk_bdev_close(desc);
7072 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
7073 : :
7074 : : /* Clean up */
7075 : 4 : free_bdev(bdev);
7076 : 4 : }
7077 : :
7078 : : static void
7079 : 4 : claim_v2_rwm(void)
7080 : : {
7081 : : struct spdk_bdev *bdev;
7082 : 3 : struct spdk_bdev_desc *desc;
7083 : 3 : struct spdk_bdev_desc *desc2;
7084 : 3 : struct spdk_bdev_claim_opts opts;
7085 : 3 : char good_key, bad_key;
7086 : : int rc;
7087 : :
7088 : 4 : bdev = allocate_bdev("bdev0");
7089 : :
7090 : : /* Claim without options should fail */
7091 : 4 : desc = NULL;
7092 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
7093 : 4 : CU_ASSERT(rc == 0);
7094 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7095 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL,
7096 : : &bdev_ut_if);
7097 : 4 : CU_ASSERT(rc == -EINVAL);
7098 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7099 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
7100 : 4 : CU_ASSERT(desc->claim == NULL);
7101 : :
7102 : : /* Claim with options */
7103 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7104 : 4 : snprintf(opts.name, sizeof(opts.name), "%s", "claim with options");
7105 : 4 : opts.shared_claim_key = (uint64_t)&good_key;
7106 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7107 : : &bdev_ut_if);
7108 : 4 : CU_ASSERT(rc == 0);
7109 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED);
7110 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc->claim != NULL);
7111 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
7112 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
7113 : 4 : memset(&opts, 0, sizeof(opts));
7114 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
7115 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
7116 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7117 : :
7118 : : /* The claim blocks new writers. */
7119 : 4 : desc2 = NULL;
7120 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
7121 : 4 : CU_ASSERT(rc == -EPERM);
7122 : 4 : CU_ASSERT(desc2 == NULL);
7123 : :
7124 : : /* New readers are allowed */
7125 : 4 : desc2 = NULL;
7126 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2);
7127 : 4 : CU_ASSERT(rc == 0);
7128 : 4 : CU_ASSERT(desc2 != NULL);
7129 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7130 : :
7131 : : /* No new v2 RWO claims are allowed */
7132 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
7133 : : &bdev_ut_if);
7134 : 4 : CU_ASSERT(rc == -EPERM);
7135 : :
7136 : : /* No new v2 ROM claims are allowed and the descriptor stays read-only. */
7137 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7138 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
7139 : : &bdev_ut_if);
7140 : 4 : CU_ASSERT(rc == -EPERM);
7141 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7142 : :
7143 : : /* No new v1 claims are allowed */
7144 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7145 : 4 : CU_ASSERT(rc == -EPERM);
7146 : :
7147 : : /* No new v2 RWM claims are allowed if the key does not match */
7148 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7149 : 4 : opts.shared_claim_key = (uint64_t)&bad_key;
7150 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7151 : : &bdev_ut_if);
7152 : 4 : CU_ASSERT(rc == -EPERM);
7153 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7154 : :
7155 : : /* None of the above messed up the existing claim */
7156 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
7157 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7158 : :
7159 : : /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */
7160 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7161 : 4 : opts.shared_claim_key = (uint64_t)&good_key;
7162 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7163 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7164 : : &bdev_ut_if);
7165 : 4 : CU_ASSERT(rc == 0);
7166 [ - + ]: 4 : CU_ASSERT(desc2->write);
7167 : 4 : CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim);
7168 [ + + ]: 12 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 2);
7169 : :
7170 : : /* Claim remains when closing the first descriptor */
7171 : 4 : spdk_bdev_close(desc);
7172 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED);
7173 : 4 : CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs));
7174 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim);
7175 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7176 : :
7177 : : /* Claim removed when closing the other descriptor */
7178 : 4 : spdk_bdev_close(desc2);
7179 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7180 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
7181 : :
7182 : : /* Cannot claim without a key */
7183 : 4 : desc = NULL;
7184 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
7185 : 4 : CU_ASSERT(rc == 0);
7186 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7187 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7188 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7189 : : &bdev_ut_if);
7190 : 4 : CU_ASSERT(rc == -EINVAL);
7191 : 4 : spdk_bdev_close(desc);
7192 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7193 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
7194 : :
7195 : : /* Clean up */
7196 : 4 : free_bdev(bdev);
7197 : 4 : }
7198 : :
7199 : : static void
7200 : 4 : claim_v2_existing_writer(void)
7201 : : {
7202 : : struct spdk_bdev *bdev;
7203 : 3 : struct spdk_bdev_desc *desc;
7204 : 3 : struct spdk_bdev_desc *desc2;
7205 : 3 : struct spdk_bdev_claim_opts opts;
7206 : : enum spdk_bdev_claim_type type;
7207 : 4 : enum spdk_bdev_claim_type types[] = {
7208 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE,
7209 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED,
7210 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE
7211 : : };
7212 : : size_t i;
7213 : : int rc;
7214 : :
7215 : 4 : bdev = allocate_bdev("bdev0");
7216 : :
7217 : 4 : desc = NULL;
7218 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
7219 : 4 : CU_ASSERT(rc == 0);
7220 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7221 : 4 : desc2 = NULL;
7222 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
7223 : 4 : CU_ASSERT(rc == 0);
7224 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc2 != NULL);
7225 : :
7226 [ + + ]: 16 : for (i = 0; i < SPDK_COUNTOF(types); i++) {
7227 : 12 : type = types[i];
7228 : 12 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7229 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) {
7230 : 4 : opts.shared_claim_key = (uint64_t)&opts;
7231 : 1 : }
7232 : 12 : rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if);
7233 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) {
7234 : 4 : CU_ASSERT(rc == -EINVAL);
7235 : 1 : } else {
7236 : 8 : CU_ASSERT(rc == -EPERM);
7237 : : }
7238 : 12 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7239 : 12 : rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if);
7240 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) {
7241 : 4 : CU_ASSERT(rc == -EINVAL);
7242 : 1 : } else {
7243 : 8 : CU_ASSERT(rc == -EPERM);
7244 : : }
7245 : 12 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7246 : 3 : }
7247 : :
7248 : 4 : spdk_bdev_close(desc);
7249 : 4 : spdk_bdev_close(desc2);
7250 : :
7251 : : /* Clean up */
7252 : 4 : free_bdev(bdev);
7253 : 4 : }
7254 : :
7255 : : static void
7256 : 4 : claim_v2_existing_v1(void)
7257 : : {
7258 : : struct spdk_bdev *bdev;
7259 : 3 : struct spdk_bdev_desc *desc;
7260 : 3 : struct spdk_bdev_claim_opts opts;
7261 : : enum spdk_bdev_claim_type type;
7262 : 4 : enum spdk_bdev_claim_type types[] = {
7263 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE,
7264 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED,
7265 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE
7266 : : };
7267 : : size_t i;
7268 : : int rc;
7269 : :
7270 : 4 : bdev = allocate_bdev("bdev0");
7271 : :
7272 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7273 : 4 : CU_ASSERT(rc == 0);
7274 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
7275 : :
7276 : 4 : desc = NULL;
7277 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
7278 : 4 : CU_ASSERT(rc == 0);
7279 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7280 : :
7281 [ + + ]: 16 : for (i = 0; i < SPDK_COUNTOF(types); i++) {
7282 : 12 : type = types[i];
7283 : 12 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7284 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) {
7285 : 4 : opts.shared_claim_key = (uint64_t)&opts;
7286 : 1 : }
7287 : 12 : rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if);
7288 : 12 : CU_ASSERT(rc == -EPERM);
7289 : 12 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
7290 : 3 : }
7291 : :
7292 : 4 : spdk_bdev_module_release_bdev(bdev);
7293 : 4 : spdk_bdev_close(desc);
7294 : :
7295 : : /* Clean up */
7296 : 4 : free_bdev(bdev);
7297 : 4 : }
7298 : :
7299 : : static void
7300 : 4 : claim_v1_existing_v2(void)
7301 : : {
7302 : : struct spdk_bdev *bdev;
7303 : 3 : struct spdk_bdev_desc *desc;
7304 : 3 : struct spdk_bdev_claim_opts opts;
7305 : : enum spdk_bdev_claim_type type;
7306 : 4 : enum spdk_bdev_claim_type types[] = {
7307 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE,
7308 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED,
7309 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE
7310 : : };
7311 : : size_t i;
7312 : : int rc;
7313 : :
7314 : 4 : bdev = allocate_bdev("bdev0");
7315 : :
7316 [ + + ]: 16 : for (i = 0; i < SPDK_COUNTOF(types); i++) {
7317 : 12 : type = types[i];
7318 : :
7319 : 12 : desc = NULL;
7320 : 12 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
7321 : 12 : CU_ASSERT(rc == 0);
7322 [ + + ]: 12 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7323 : :
7324 : : /* Get a v2 claim */
7325 : 12 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7326 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) {
7327 : 4 : opts.shared_claim_key = (uint64_t)&opts;
7328 : 1 : }
7329 : 12 : rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if);
7330 : 12 : CU_ASSERT(rc == 0);
7331 : :
7332 : : /* Fail to get a v1 claim */
7333 : 12 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7334 : 12 : CU_ASSERT(rc == -EPERM);
7335 : :
7336 : 12 : spdk_bdev_close(desc);
7337 : :
7338 : : /* Now v1 succeeds */
7339 : 12 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7340 : 12 : CU_ASSERT(rc == 0)
7341 : 12 : spdk_bdev_module_release_bdev(bdev);
7342 : 3 : }
7343 : :
7344 : : /* Clean up */
7345 : 4 : free_bdev(bdev);
7346 : 4 : }
7347 : :
7348 : : static int ut_examine_claimed_init0(void);
7349 : : static int ut_examine_claimed_init1(void);
7350 : : static void ut_examine_claimed_config0(struct spdk_bdev *bdev);
7351 : : static void ut_examine_claimed_disk0(struct spdk_bdev *bdev);
7352 : : static void ut_examine_claimed_config1(struct spdk_bdev *bdev);
7353 : : static void ut_examine_claimed_disk1(struct spdk_bdev *bdev);
7354 : :
7355 : : #define UT_MAX_EXAMINE_MODS 2
7356 : : struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = {
7357 : : {
7358 : : .name = "vbdev_ut_examine0",
7359 : : .module_init = ut_examine_claimed_init0,
7360 : : .module_fini = vbdev_ut_module_fini,
7361 : : .examine_config = ut_examine_claimed_config0,
7362 : : .examine_disk = ut_examine_claimed_disk0,
7363 : : },
7364 : : {
7365 : : .name = "vbdev_ut_examine1",
7366 : : .module_init = ut_examine_claimed_init1,
7367 : : .module_fini = vbdev_ut_module_fini,
7368 : : .examine_config = ut_examine_claimed_config1,
7369 : : .examine_disk = ut_examine_claimed_disk1,
7370 : : }
7371 : : };
7372 : :
7373 : 4 : SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0])
7374 : 4 : SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1])
7375 : :
7376 : : struct ut_examine_claimed_ctx {
7377 : : uint32_t examine_config_count;
7378 : : uint32_t examine_disk_count;
7379 : :
7380 : : /* Claim type to take, with these options */
7381 : : enum spdk_bdev_claim_type claim_type;
7382 : : struct spdk_bdev_claim_opts claim_opts;
7383 : :
7384 : : /* Expected return value from spdk_bdev_module_claim_bdev_desc() */
7385 : : int expect_claim_err;
7386 : :
7387 : : /* Descriptor used for a claim */
7388 : : struct spdk_bdev_desc *desc;
7389 : : } examine_claimed_ctx[UT_MAX_EXAMINE_MODS];
7390 : :
7391 : : bool ut_testing_examine_claimed;
7392 : :
7393 : : /*
7394 : : * Store the order in which the modules were initialized,
7395 : : * since we have no guarantee on the order of execution of the constructors.
7396 : : * Modules are examined in reverse order of their initialization.
7397 : : */
7398 : : static int g_ut_examine_claimed_order[UT_MAX_EXAMINE_MODS];
7399 : : static int
7400 : 328 : ut_examine_claimed_init(uint32_t modnum)
7401 : : {
7402 : : static int current = UT_MAX_EXAMINE_MODS;
7403 : :
7404 : : /* Only do this for the first initialization of the bdev framework */
7405 [ + + ]: 328 : if (current == 0) {
7406 : 320 : return 0;
7407 : : }
7408 : 8 : g_ut_examine_claimed_order[modnum] = --current;
7409 : :
7410 : 8 : return 0;
7411 : 82 : }
7412 : :
7413 : : static int
7414 : 164 : ut_examine_claimed_init0(void)
7415 : : {
7416 : 164 : return ut_examine_claimed_init(0);
7417 : : }
7418 : :
7419 : : static int
7420 : 164 : ut_examine_claimed_init1(void)
7421 : : {
7422 : 164 : return ut_examine_claimed_init(1);
7423 : : }
7424 : :
7425 : : static void
7426 : 16 : reset_examine_claimed_ctx(void)
7427 : : {
7428 : : struct ut_examine_claimed_ctx *ctx;
7429 : : uint32_t i;
7430 : :
7431 [ + + ]: 48 : for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) {
7432 : 32 : ctx = &examine_claimed_ctx[i];
7433 [ + + ]: 32 : if (ctx->desc != NULL) {
7434 : 20 : spdk_bdev_close(ctx->desc);
7435 : 5 : }
7436 [ - + ]: 32 : memset(ctx, 0, sizeof(*ctx));
7437 : 32 : spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts));
7438 : 8 : }
7439 : 16 : }
7440 : :
7441 : : static void
7442 : 664 : examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum)
7443 : : {
7444 [ + + ]: 664 : SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS);
7445 : 664 : struct spdk_bdev_module *module = &examine_claimed_mods[modnum];
7446 : 664 : struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum];
7447 : : int rc;
7448 : :
7449 [ + + + + ]: 664 : if (!ut_testing_examine_claimed) {
7450 : 640 : spdk_bdev_module_examine_done(module);
7451 : 640 : return;
7452 : : }
7453 : :
7454 : 24 : ctx->examine_config_count++;
7455 : :
7456 [ + + ]: 24 : if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) {
7457 : 25 : rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts,
7458 : 5 : &ctx->desc);
7459 : 20 : CU_ASSERT(rc == 0);
7460 : :
7461 : 20 : rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module);
7462 : 20 : CU_ASSERT(rc == ctx->expect_claim_err);
7463 : 5 : }
7464 : 24 : spdk_bdev_module_examine_done(module);
7465 : 166 : }
7466 : :
7467 : : static void
7468 : 332 : ut_examine_claimed_config0(struct spdk_bdev *bdev)
7469 : : {
7470 : 332 : examine_claimed_config(bdev, g_ut_examine_claimed_order[0]);
7471 : 332 : }
7472 : :
7473 : : static void
7474 : 332 : ut_examine_claimed_config1(struct spdk_bdev *bdev)
7475 : : {
7476 : 332 : examine_claimed_config(bdev, g_ut_examine_claimed_order[1]);
7477 : 332 : }
7478 : :
7479 : : static void
7480 : 640 : examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum)
7481 : : {
7482 [ + + ]: 640 : SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS);
7483 : 640 : struct spdk_bdev_module *module = &examine_claimed_mods[modnum];
7484 : 640 : struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum];
7485 : :
7486 [ + + + + ]: 640 : if (!ut_testing_examine_claimed) {
7487 : 624 : spdk_bdev_module_examine_done(module);
7488 : 624 : return;
7489 : : }
7490 : :
7491 : 16 : ctx->examine_disk_count++;
7492 : :
7493 : 16 : spdk_bdev_module_examine_done(module);
7494 : 160 : }
7495 : :
7496 : : static void
7497 : 320 : ut_examine_claimed_disk0(struct spdk_bdev *bdev)
7498 : : {
7499 : 320 : examine_claimed_disk(bdev, 0);
7500 : 320 : }
7501 : :
7502 : : static void
7503 : 320 : ut_examine_claimed_disk1(struct spdk_bdev *bdev)
7504 : : {
7505 : 320 : examine_claimed_disk(bdev, 1);
7506 : 320 : }
7507 : :
7508 : : static void
7509 : 4 : examine_claimed(void)
7510 : : {
7511 : : struct spdk_bdev *bdev;
7512 : 4 : struct spdk_bdev_module *mod = examine_claimed_mods;
7513 : 4 : struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx;
7514 : :
7515 : 4 : ut_testing_examine_claimed = true;
7516 : 4 : reset_examine_claimed_ctx();
7517 : :
7518 : : /*
7519 : : * With one module claiming, both modules' examine_config should be called, but only the
7520 : : * claiming module's examine_disk should be called.
7521 : : */
7522 : 4 : ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7523 : 4 : bdev = allocate_bdev("bdev0");
7524 : 4 : CU_ASSERT(ctx[0].examine_config_count == 1);
7525 : 4 : CU_ASSERT(ctx[0].examine_disk_count == 1);
7526 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL);
7527 : 4 : CU_ASSERT(ctx[0].desc->claim->module == &mod[0]);
7528 : 4 : CU_ASSERT(ctx[1].examine_config_count == 1);
7529 : 4 : CU_ASSERT(ctx[1].examine_disk_count == 0);
7530 : 4 : CU_ASSERT(ctx[1].desc == NULL);
7531 : 4 : reset_examine_claimed_ctx();
7532 : 4 : free_bdev(bdev);
7533 : :
7534 : : /*
7535 : : * With two modules claiming, both modules' examine_config and examine_disk should be
7536 : : * called.
7537 : : */
7538 : 4 : ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7539 : 4 : ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7540 : 4 : bdev = allocate_bdev("bdev0");
7541 : 4 : CU_ASSERT(ctx[0].examine_config_count == 1);
7542 : 4 : CU_ASSERT(ctx[0].examine_disk_count == 1);
7543 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL);
7544 : 4 : CU_ASSERT(ctx[0].desc->claim->module == &mod[0]);
7545 : 4 : CU_ASSERT(ctx[1].examine_config_count == 1);
7546 : 4 : CU_ASSERT(ctx[1].examine_disk_count == 1);
7547 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL);
7548 : 4 : CU_ASSERT(ctx[1].desc->claim->module == &mod[1]);
7549 : 4 : reset_examine_claimed_ctx();
7550 : 4 : free_bdev(bdev);
7551 : :
7552 : : /*
7553 : : * If two vbdev modules try to claim with conflicting claim types, the module that was added
7554 : : * last wins. The winner gets the claim and is the only one that has its examine_disk
7555 : : * callback invoked.
7556 : : */
7557 : 4 : ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7558 : 4 : ctx[0].expect_claim_err = -EPERM;
7559 : 4 : ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE;
7560 : 4 : bdev = allocate_bdev("bdev0");
7561 : 4 : CU_ASSERT(ctx[0].examine_config_count == 1);
7562 : 4 : CU_ASSERT(ctx[0].examine_disk_count == 0);
7563 : 4 : CU_ASSERT(ctx[1].examine_config_count == 1);
7564 : 4 : CU_ASSERT(ctx[1].examine_disk_count == 1);
7565 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL);
7566 : 4 : CU_ASSERT(ctx[1].desc->claim->module == &mod[1]);
7567 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
7568 : 4 : reset_examine_claimed_ctx();
7569 : 4 : free_bdev(bdev);
7570 : :
7571 : 4 : ut_testing_examine_claimed = false;
7572 : 4 : }
7573 : :
7574 : : static void
7575 : 4 : get_numa_id(void)
7576 : : {
7577 : 4 : struct spdk_bdev bdev = {};
7578 : :
7579 : 4 : bdev.numa.id = 0;
7580 : 4 : bdev.numa.id_valid = 0;
7581 : 4 : CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == SPDK_ENV_NUMA_ID_ANY);
7582 : :
7583 : 4 : bdev.numa.id_valid = 1;
7584 : 4 : CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == 0);
7585 : :
7586 : 4 : bdev.numa.id = SPDK_ENV_NUMA_ID_ANY;
7587 : 4 : CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == SPDK_ENV_NUMA_ID_ANY);
7588 : 4 : }
7589 : :
7590 : : static void
7591 : 16 : get_device_stat_with_reset_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg,
7592 : : int rc)
7593 : : {
7594 : 16 : *(bool *)cb_arg = true;
7595 : 16 : }
7596 : :
7597 : : static void
7598 : 16 : get_device_stat_with_given_reset(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat,
7599 : : enum spdk_bdev_reset_stat_mode mode)
7600 : : {
7601 : 16 : bool done = false;
7602 : :
7603 : 16 : spdk_bdev_get_device_stat(bdev, stat, mode, get_device_stat_with_reset_cb, &done);
7604 [ + + + + ]: 32 : while (!done) { poll_threads(); }
7605 : 16 : }
7606 : :
7607 : : static void
7608 : 4 : get_device_stat_with_reset(void)
7609 : : {
7610 : : struct spdk_bdev *bdev;
7611 : 4 : struct spdk_bdev_desc *desc = NULL;
7612 : : struct spdk_io_channel *io_ch;
7613 : 4 : struct spdk_bdev_opts bdev_opts = {};
7614 : : struct spdk_bdev_io_stat *stat;
7615 : :
7616 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
7617 : 4 : bdev_opts.bdev_io_pool_size = 2;
7618 : 4 : bdev_opts.bdev_io_cache_size = 1;
7619 : 4 : ut_init_bdev(&bdev_opts);
7620 : 4 : bdev = allocate_bdev("bdev0");
7621 : :
7622 : 4 : CU_ASSERT(spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc) == 0);
7623 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7624 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
7625 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
7626 : 4 : CU_ASSERT(io_ch != NULL);
7627 : :
7628 : 4 : g_io_done = false;
7629 : 4 : CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
7630 : 4 : spdk_delay_us(10);
7631 : 4 : stub_complete_io(1);
7632 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
7633 : :
7634 : 4 : stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
7635 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(stat != NULL);
7636 : :
7637 : : /* Get stat without resetting and check that it is correct */
7638 : 4 : get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_NONE);
7639 : 4 : CU_ASSERT(stat->bytes_read == 4096);
7640 : 4 : CU_ASSERT(stat->max_read_latency_ticks == 10);
7641 : :
7642 : : /**
7643 : : * Check that stat was not reseted after previous step,
7644 : : * send get request with resetting maxmin stats
7645 : : */
7646 : 4 : get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_MAXMIN);
7647 : 4 : CU_ASSERT(stat->bytes_read == 4096);
7648 : 4 : CU_ASSERT(stat->max_read_latency_ticks == 10);
7649 : :
7650 : : /**
7651 : : * Check that maxmins stats are reseted after previous step,
7652 : : * send get request with resetting all stats
7653 : : */
7654 : 4 : get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_ALL);
7655 : 4 : CU_ASSERT(stat->bytes_read == 4096);
7656 : 4 : CU_ASSERT(stat->max_read_latency_ticks == 0);
7657 : :
7658 : : /* Check that all stats are reseted after previous step */
7659 : 4 : get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_NONE);
7660 : 4 : CU_ASSERT(stat->bytes_read == 0);
7661 : 4 : CU_ASSERT(stat->max_read_latency_ticks == 0);
7662 : :
7663 : 4 : free(stat);
7664 : 4 : spdk_put_io_channel(io_ch);
7665 : 4 : spdk_bdev_close(desc);
7666 : 4 : free_bdev(bdev);
7667 : 4 : ut_fini_bdev();
7668 : 4 : }
7669 : :
7670 : : static void
7671 : 4 : open_ext_v2_test(void)
7672 : : {
7673 : 3 : struct spdk_bdev_open_opts opts;
7674 : : struct spdk_bdev *bdev;
7675 : 3 : struct spdk_bdev_desc *desc;
7676 : : int rc;
7677 : :
7678 : 4 : bdev = allocate_bdev("bdev0");
7679 : :
7680 : 4 : rc = spdk_bdev_open_ext_v2("bdev0", true, bdev_ut_event_cb, NULL, NULL, &desc);
7681 : 4 : CU_ASSERT(rc == 0);
7682 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7683 [ - + ]: 4 : CU_ASSERT(desc->write == true);
7684 [ - + ]: 4 : CU_ASSERT(desc->opts.no_metadata == false);
7685 : :
7686 : 4 : spdk_bdev_close(desc);
7687 : :
7688 : 4 : opts.size = sizeof(opts);
7689 : 4 : opts.no_metadata = true;
7690 : :
7691 : 4 : rc = spdk_bdev_open_ext_v2("bdev0", true, bdev_ut_event_cb, NULL, &opts, &desc);
7692 : 4 : CU_ASSERT(rc == 0);
7693 [ - + ]: 4 : CU_ASSERT(desc->write == true);
7694 [ - + ]: 4 : CU_ASSERT(desc->opts.no_metadata == true);
7695 : :
7696 : 4 : spdk_bdev_close(desc);
7697 : :
7698 : 4 : free_bdev(bdev);
7699 : 4 : }
7700 : :
7701 : : static void
7702 : 4 : bdev_io_init_dif_ctx_test(void)
7703 : : {
7704 : : struct spdk_bdev *bdev;
7705 : 3 : struct spdk_bdev_io bdev_io;
7706 : : int rc;
7707 : :
7708 : 4 : bdev = allocate_bdev("bdev0");
7709 : :
7710 : : /* This is invalid because md_len should be larger than PI size. */
7711 : 4 : bdev->dif_pi_format = SPDK_DIF_PI_FORMAT_32;
7712 : 4 : bdev->blocklen = 4096 + 8;
7713 : 4 : bdev->md_len = 8;
7714 : 4 : bdev->md_interleave = true;
7715 : :
7716 : 4 : bdev_io.bdev = bdev;
7717 : :
7718 : : /* Check if initialization detects error. */
7719 : 4 : rc = bdev_io_init_dif_ctx(&bdev_io);
7720 : 4 : CU_ASSERT(rc != 0);
7721 : :
7722 : : /* Increase md_len to pass initialization check. */
7723 : 4 : bdev->blocklen = 4096 + 16;
7724 : 4 : bdev->md_len = 16;
7725 : :
7726 : 4 : rc = bdev_io_init_dif_ctx(&bdev_io);
7727 : 4 : CU_ASSERT(rc == 0);
7728 : :
7729 : 4 : free_bdev(bdev);
7730 : 4 : }
7731 : :
7732 : : int
7733 : 4 : main(int argc, char **argv)
7734 : : {
7735 : 4 : CU_pSuite suite = NULL;
7736 : : unsigned int num_failures;
7737 : :
7738 : 4 : CU_initialize_registry();
7739 : :
7740 : 4 : suite = CU_add_suite("bdev", ut_bdev_setup, ut_bdev_teardown);
7741 : :
7742 : 4 : CU_ADD_TEST(suite, bytes_to_blocks_test);
7743 : 4 : CU_ADD_TEST(suite, num_blocks_test);
7744 : 4 : CU_ADD_TEST(suite, io_valid_test);
7745 : 4 : CU_ADD_TEST(suite, open_write_test);
7746 : 4 : CU_ADD_TEST(suite, claim_test);
7747 : 4 : CU_ADD_TEST(suite, alias_add_del_test);
7748 : 4 : CU_ADD_TEST(suite, get_device_stat_test);
7749 : 4 : CU_ADD_TEST(suite, bdev_io_types_test);
7750 : 4 : CU_ADD_TEST(suite, bdev_io_wait_test);
7751 : 4 : CU_ADD_TEST(suite, bdev_io_spans_split_test);
7752 : 4 : CU_ADD_TEST(suite, bdev_io_boundary_split_test);
7753 : 4 : CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test);
7754 : 4 : CU_ADD_TEST(suite, bdev_io_mix_split_test);
7755 : 4 : CU_ADD_TEST(suite, bdev_io_split_with_io_wait);
7756 : 4 : CU_ADD_TEST(suite, bdev_io_write_unit_split_test);
7757 : 4 : CU_ADD_TEST(suite, bdev_io_alignment_with_boundary);
7758 : 4 : CU_ADD_TEST(suite, bdev_io_alignment);
7759 : 4 : CU_ADD_TEST(suite, bdev_histograms);
7760 : 4 : CU_ADD_TEST(suite, bdev_write_zeroes);
7761 : 4 : CU_ADD_TEST(suite, bdev_compare_and_write);
7762 : 4 : CU_ADD_TEST(suite, bdev_compare);
7763 : 4 : CU_ADD_TEST(suite, bdev_compare_emulated);
7764 : 4 : CU_ADD_TEST(suite, bdev_zcopy_write);
7765 : 4 : CU_ADD_TEST(suite, bdev_zcopy_read);
7766 : 4 : CU_ADD_TEST(suite, bdev_open_while_hotremove);
7767 : 4 : CU_ADD_TEST(suite, bdev_close_while_hotremove);
7768 : 4 : CU_ADD_TEST(suite, bdev_open_ext_test);
7769 : 4 : CU_ADD_TEST(suite, bdev_open_ext_unregister);
7770 : 4 : CU_ADD_TEST(suite, bdev_set_io_timeout);
7771 : 4 : CU_ADD_TEST(suite, bdev_set_qd_sampling);
7772 : 4 : CU_ADD_TEST(suite, lba_range_overlap);
7773 : 4 : CU_ADD_TEST(suite, lock_lba_range_check_ranges);
7774 : 4 : CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding);
7775 : 4 : CU_ADD_TEST(suite, lock_lba_range_overlapped);
7776 : 4 : CU_ADD_TEST(suite, bdev_quiesce);
7777 : 4 : CU_ADD_TEST(suite, bdev_io_abort);
7778 : 4 : CU_ADD_TEST(suite, bdev_unmap);
7779 : 4 : CU_ADD_TEST(suite, bdev_write_zeroes_split_test);
7780 : 4 : CU_ADD_TEST(suite, bdev_set_options_test);
7781 : 4 : CU_ADD_TEST(suite, bdev_get_memory_domains);
7782 : 4 : CU_ADD_TEST(suite, bdev_io_ext);
7783 : 4 : CU_ADD_TEST(suite, bdev_io_ext_no_opts);
7784 : 4 : CU_ADD_TEST(suite, bdev_io_ext_invalid_opts);
7785 : 4 : CU_ADD_TEST(suite, bdev_io_ext_split);
7786 : 4 : CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer);
7787 : 4 : CU_ADD_TEST(suite, bdev_register_uuid_alias);
7788 : 4 : CU_ADD_TEST(suite, bdev_unregister_by_name);
7789 : 4 : CU_ADD_TEST(suite, for_each_bdev_test);
7790 : 4 : CU_ADD_TEST(suite, bdev_seek_test);
7791 : 4 : CU_ADD_TEST(suite, bdev_copy);
7792 : 4 : CU_ADD_TEST(suite, bdev_copy_split_test);
7793 : 4 : CU_ADD_TEST(suite, examine_locks);
7794 : 4 : CU_ADD_TEST(suite, claim_v2_rwo);
7795 : 4 : CU_ADD_TEST(suite, claim_v2_rom);
7796 : 4 : CU_ADD_TEST(suite, claim_v2_rwm);
7797 : 4 : CU_ADD_TEST(suite, claim_v2_existing_writer);
7798 : 4 : CU_ADD_TEST(suite, claim_v2_existing_v1);
7799 : 4 : CU_ADD_TEST(suite, claim_v1_existing_v2);
7800 : 4 : CU_ADD_TEST(suite, examine_claimed);
7801 : 4 : CU_ADD_TEST(suite, get_numa_id);
7802 : 4 : CU_ADD_TEST(suite, get_device_stat_with_reset);
7803 : 4 : CU_ADD_TEST(suite, open_ext_v2_test);
7804 : 4 : CU_ADD_TEST(suite, bdev_io_init_dif_ctx_test);
7805 : :
7806 : 4 : allocate_cores(1);
7807 : 4 : allocate_threads(1);
7808 : 4 : set_thread(0);
7809 : :
7810 : 4 : num_failures = spdk_ut_run_tests(argc, argv, NULL);
7811 : 4 : CU_cleanup_registry();
7812 : :
7813 : 4 : free_threads();
7814 : 4 : free_cores();
7815 : :
7816 : 4 : return num_failures;
7817 : : }
|