Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright (C) 2017 Intel Corporation. All rights reserved.
3 : : * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4 : : * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 : : */
6 : :
7 : : #include "spdk_internal/cunit.h"
8 : :
9 : : #include "common/lib/ut_multithread.c"
10 : : #include "unit/lib/json_mock.c"
11 : :
12 : : #include "spdk/config.h"
13 : : /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
14 : : #undef SPDK_CONFIG_VTUNE
15 : :
16 : : #include "bdev/bdev.c"
17 : :
18 [ - + - + ]: 672 : DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
19 [ - + # # ]: 344 : DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
20 [ # # # # ]: 0 : DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
21 : : "test_domain");
22 [ # # # # ]: 0 : DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
23 : : (struct spdk_memory_domain *domain), 0);
24 : 0 : DEFINE_STUB_V(spdk_accel_sequence_finish,
25 : : (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
26 : 0 : DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
27 : 0 : DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
28 [ # # # # ]: 0 : DEFINE_STUB(spdk_accel_append_copy, int,
29 : : (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs,
30 : : uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
31 : : struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain,
32 : : void *src_domain_ctx, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
33 [ # # # # ]: 0 : DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL);
34 : :
35 : : static bool g_memory_domain_pull_data_called;
36 : : static bool g_memory_domain_push_data_called;
37 : : static int g_accel_io_device;
38 : :
39 [ # # ]: 0 : DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
40 : : int
41 : 20 : spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
42 : : struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
43 : : spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
44 : : {
45 : 20 : g_memory_domain_pull_data_called = true;
46 [ - + + + : 20 : HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
+ + ]
47 : 16 : cpl_cb(cpl_cb_arg, 0);
48 : 16 : return 0;
49 : 5 : }
50 : :
51 [ # # ]: 0 : DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
52 : : int
53 : 20 : spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
54 : : struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
55 : : spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
56 : : {
57 : 20 : g_memory_domain_push_data_called = true;
58 [ - + + + : 20 : HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
+ + ]
59 : 16 : cpl_cb(cpl_cb_arg, 0);
60 : 16 : return 0;
61 : 5 : }
62 : :
63 : : struct spdk_io_channel *
64 : 144 : spdk_accel_get_io_channel(void)
65 : : {
66 : 144 : return spdk_get_io_channel(&g_accel_io_device);
67 : : }
68 : :
69 : : int g_status;
70 : : int g_count;
71 : : enum spdk_bdev_event_type g_event_type1;
72 : : enum spdk_bdev_event_type g_event_type2;
73 : : enum spdk_bdev_event_type g_event_type3;
74 : : enum spdk_bdev_event_type g_event_type4;
75 : : struct spdk_histogram_data *g_histogram;
76 : : void *g_unregister_arg;
77 : : int g_unregister_rc;
78 : :
79 : : void
80 : 0 : spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
81 : : int *sc, int *sk, int *asc, int *ascq)
82 : : {
83 : 0 : }
84 : :
85 : : static int
86 : 144 : ut_accel_ch_create_cb(void *io_device, void *ctx)
87 : : {
88 : 144 : return 0;
89 : : }
90 : :
91 : : static void
92 : 144 : ut_accel_ch_destroy_cb(void *io_device, void *ctx)
93 : : {
94 : 144 : }
95 : :
96 : : static int
97 : 4 : ut_bdev_setup(void)
98 : : {
99 : 4 : spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
100 : : ut_accel_ch_destroy_cb, 0, NULL);
101 : 4 : return 0;
102 : : }
103 : :
104 : : static int
105 : 4 : ut_bdev_teardown(void)
106 : : {
107 : 4 : spdk_io_device_unregister(&g_accel_io_device, NULL);
108 : :
109 : 4 : return 0;
110 : : }
111 : :
112 : : static int
113 : 336 : stub_destruct(void *ctx)
114 : : {
115 : 336 : return 0;
116 : : }
117 : :
118 : : struct ut_expected_io {
119 : : uint8_t type;
120 : : uint64_t offset;
121 : : uint64_t src_offset;
122 : : uint64_t length;
123 : : int iovcnt;
124 : : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV];
125 : : void *md_buf;
126 : : TAILQ_ENTRY(ut_expected_io) link;
127 : : };
128 : :
129 : : struct bdev_ut_io {
130 : : TAILQ_ENTRY(bdev_ut_io) link;
131 : : };
132 : :
133 : : struct bdev_ut_channel {
134 : : TAILQ_HEAD(, bdev_ut_io) outstanding_io;
135 : : uint32_t outstanding_io_count;
136 : : TAILQ_HEAD(, ut_expected_io) expected_io;
137 : : };
138 : :
139 : : static bool g_io_done;
140 : : static struct spdk_bdev_io *g_bdev_io;
141 : : static enum spdk_bdev_io_status g_io_status;
142 : : static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
143 : : static uint32_t g_bdev_ut_io_device;
144 : : static struct bdev_ut_channel *g_bdev_ut_channel;
145 : : static void *g_compare_read_buf;
146 : : static uint32_t g_compare_read_buf_len;
147 : : static void *g_compare_write_buf;
148 : : static uint32_t g_compare_write_buf_len;
149 : : static void *g_compare_md_buf;
150 : : static bool g_abort_done;
151 : : static enum spdk_bdev_io_status g_abort_status;
152 : : static void *g_zcopy_read_buf;
153 : : static uint32_t g_zcopy_read_buf_len;
154 : : static void *g_zcopy_write_buf;
155 : : static uint32_t g_zcopy_write_buf_len;
156 : : static struct spdk_bdev_io *g_zcopy_bdev_io;
157 : : static uint64_t g_seek_data_offset;
158 : : static uint64_t g_seek_hole_offset;
159 : : static uint64_t g_seek_offset;
160 : :
161 : : static struct ut_expected_io *
162 : 1028 : ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
163 : : {
164 : : struct ut_expected_io *expected_io;
165 : :
166 : 1028 : expected_io = calloc(1, sizeof(*expected_io));
167 [ + + ]: 1028 : SPDK_CU_ASSERT_FATAL(expected_io != NULL);
168 : :
169 : 1028 : expected_io->type = type;
170 : 1028 : expected_io->offset = offset;
171 : 1028 : expected_io->length = length;
172 : 1028 : expected_io->iovcnt = iovcnt;
173 : :
174 : 1028 : return expected_io;
175 : : }
176 : :
177 : : static struct ut_expected_io *
178 : 84 : ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length)
179 : : {
180 : : struct ut_expected_io *expected_io;
181 : :
182 : 84 : expected_io = calloc(1, sizeof(*expected_io));
183 [ + + ]: 84 : SPDK_CU_ASSERT_FATAL(expected_io != NULL);
184 : :
185 : 84 : expected_io->type = type;
186 : 84 : expected_io->offset = offset;
187 : 84 : expected_io->src_offset = src_offset;
188 : 84 : expected_io->length = length;
189 : :
190 : 84 : return expected_io;
191 : : }
192 : :
193 : : static void
194 : 2184 : ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
195 : : {
196 : 2184 : expected_io->iov[pos].iov_base = base;
197 : 2184 : expected_io->iov[pos].iov_len = len;
198 : 2184 : }
199 : :
200 : : static void
201 : 1624 : stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
202 : : {
203 : 1624 : struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
204 : : struct ut_expected_io *expected_io;
205 : : struct iovec *iov, *expected_iov;
206 : : struct spdk_bdev_io *bio_to_abort;
207 : : struct bdev_ut_io *bio;
208 : : int i;
209 : :
210 : 1624 : g_bdev_io = bdev_io;
211 : :
212 [ + + + + ]: 1624 : if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
213 : 44 : uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
214 : :
215 : 44 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
216 : 44 : CU_ASSERT(g_compare_read_buf_len == len);
217 [ - + - + ]: 44 : memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len);
218 [ + + + + : 44 : if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) {
+ + ]
219 [ - + - + ]: 15 : memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf,
220 : 12 : bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks);
221 : 3 : }
222 : 11 : }
223 : :
224 [ + + + + ]: 1624 : if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
225 : 4 : uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
226 : :
227 : 4 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
228 : 4 : CU_ASSERT(g_compare_write_buf_len == len);
229 [ - + - + ]: 4 : memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len);
230 : 1 : }
231 : :
232 [ + + + + ]: 1624 : if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) {
233 : 36 : uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
234 : :
235 : 36 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
236 : 36 : CU_ASSERT(g_compare_read_buf_len == len);
237 [ + + - + : 36 : if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) {
+ + ]
238 : 16 : g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
239 : 4 : }
240 [ + + + + ]: 36 : if (bdev_io->u.bdev.md_buf &&
241 [ - + - + : 15 : memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf,
+ + ]
242 [ + + ]: 12 : bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) {
243 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
244 : 1 : }
245 : 9 : }
246 : :
247 [ + + ]: 1624 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
248 [ + + ]: 56 : if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) {
249 [ + + ]: 52 : TAILQ_FOREACH(bio, &ch->outstanding_io, link) {
250 : 52 : bio_to_abort = spdk_bdev_io_from_ctx(bio);
251 [ + + ]: 52 : if (bio_to_abort == bdev_io->u.abort.bio_to_abort) {
252 [ + + ]: 52 : TAILQ_REMOVE(&ch->outstanding_io, bio, link);
253 : 52 : ch->outstanding_io_count--;
254 : 52 : spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED);
255 : 52 : break;
256 : : }
257 : 0 : }
258 : 13 : }
259 : 14 : }
260 : :
261 [ + + ]: 1624 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) {
262 [ + + ]: 16 : if (bdev_io->u.bdev.zcopy.start) {
263 : 8 : g_zcopy_bdev_io = bdev_io;
264 [ + + ]: 8 : if (bdev_io->u.bdev.zcopy.populate) {
265 : : /* Start of a read */
266 : 4 : CU_ASSERT(g_zcopy_read_buf != NULL);
267 : 4 : CU_ASSERT(g_zcopy_read_buf_len > 0);
268 : 4 : bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf;
269 : 4 : bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len;
270 : 4 : bdev_io->u.bdev.iovcnt = 1;
271 : 1 : } else {
272 : : /* Start of a write */
273 : 4 : CU_ASSERT(g_zcopy_write_buf != NULL);
274 : 4 : CU_ASSERT(g_zcopy_write_buf_len > 0);
275 : 4 : bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf;
276 : 4 : bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len;
277 : 4 : bdev_io->u.bdev.iovcnt = 1;
278 : : }
279 : 2 : } else {
280 [ + + ]: 8 : if (bdev_io->u.bdev.zcopy.commit) {
281 : : /* End of write */
282 : 4 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf);
283 : 4 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len);
284 : 4 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
285 : 4 : g_zcopy_write_buf = NULL;
286 : 4 : g_zcopy_write_buf_len = 0;
287 : 1 : } else {
288 : : /* End of read */
289 : 4 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf);
290 : 4 : CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len);
291 : 4 : CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
292 : 4 : g_zcopy_read_buf = NULL;
293 : 4 : g_zcopy_read_buf_len = 0;
294 : : }
295 : : }
296 : 4 : }
297 : :
298 [ + + ]: 1624 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) {
299 : 4 : bdev_io->u.bdev.seek.offset = g_seek_data_offset;
300 : 1 : }
301 : :
302 [ + + ]: 1624 : if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) {
303 : 4 : bdev_io->u.bdev.seek.offset = g_seek_hole_offset;
304 : 1 : }
305 : :
306 : 1624 : TAILQ_INSERT_TAIL(&ch->outstanding_io, (struct bdev_ut_io *)bdev_io->driver_ctx, link);
307 : 1624 : ch->outstanding_io_count++;
308 : :
309 : 1624 : expected_io = TAILQ_FIRST(&ch->expected_io);
310 [ + + ]: 1624 : if (expected_io == NULL) {
311 : 512 : return;
312 : : }
313 [ + + ]: 1112 : TAILQ_REMOVE(&ch->expected_io, expected_io, link);
314 : :
315 [ + + ]: 1112 : if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
316 : 1112 : CU_ASSERT(bdev_io->type == expected_io->type);
317 : 278 : }
318 : :
319 [ + + ]: 1112 : if (expected_io->md_buf != NULL) {
320 : 112 : CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf);
321 : 28 : }
322 : :
323 [ + + ]: 1112 : if (expected_io->length == 0) {
324 : 0 : free(expected_io);
325 : 0 : return;
326 : : }
327 : :
328 : 1112 : CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
329 : 1112 : CU_ASSERT(expected_io->length == bdev_io->u.bdev.num_blocks);
330 [ + + ]: 1112 : if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) {
331 : 84 : CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks);
332 : 21 : }
333 : :
334 [ + + ]: 1112 : if (expected_io->iovcnt == 0) {
335 : 404 : free(expected_io);
336 : : /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */
337 : 404 : return;
338 : : }
339 : :
340 : 708 : CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
341 [ + + ]: 2892 : for (i = 0; i < expected_io->iovcnt; i++) {
342 : 2184 : expected_iov = &expected_io->iov[i];
343 [ + + ]: 2184 : if (bdev_io->internal.f.has_bounce_buf == false) {
344 : 2168 : iov = &bdev_io->u.bdev.iovs[i];
345 : 542 : } else {
346 : 16 : iov = bdev_io->internal.bounce_buf.orig_iovs;
347 : : }
348 : 2184 : CU_ASSERT(iov->iov_len == expected_iov->iov_len);
349 : 2184 : CU_ASSERT(iov->iov_base == expected_iov->iov_base);
350 : 546 : }
351 : :
352 : 708 : free(expected_io);
353 : 406 : }
354 : :
355 : : static void
356 : 212 : stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch,
357 : : struct spdk_bdev_io *bdev_io, bool success)
358 : : {
359 : 212 : CU_ASSERT(success == true);
360 : :
361 : 212 : stub_submit_request(_ch, bdev_io);
362 : 212 : }
363 : :
364 : : static void
365 : 212 : stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
366 : : {
367 : 265 : spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb,
368 : 212 : bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
369 : 212 : }
370 : :
371 : : static uint32_t
372 : 692 : stub_complete_io(uint32_t num_to_complete)
373 : : {
374 : 692 : struct bdev_ut_channel *ch = g_bdev_ut_channel;
375 : : struct bdev_ut_io *bio;
376 : : struct spdk_bdev_io *bdev_io;
377 : : static enum spdk_bdev_io_status io_status;
378 : 692 : uint32_t num_completed = 0;
379 : :
380 [ + + ]: 2260 : while (num_completed < num_to_complete) {
381 [ + + ]: 1580 : if (TAILQ_EMPTY(&ch->outstanding_io)) {
382 : 12 : break;
383 : : }
384 : 1568 : bio = TAILQ_FIRST(&ch->outstanding_io);
385 [ + + ]: 1568 : TAILQ_REMOVE(&ch->outstanding_io, bio, link);
386 : 1568 : bdev_io = spdk_bdev_io_from_ctx(bio);
387 : 1568 : ch->outstanding_io_count--;
388 [ + + ]: 1568 : io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS :
389 : 12 : g_io_exp_status;
390 : 1568 : spdk_bdev_io_complete(bdev_io, io_status);
391 : 1568 : num_completed++;
392 : : }
393 : :
394 : 692 : return num_completed;
395 : : }
396 : :
397 : : static struct spdk_io_channel *
398 : 144 : bdev_ut_get_io_channel(void *ctx)
399 : : {
400 : 144 : return spdk_get_io_channel(&g_bdev_ut_io_device);
401 : : }
402 : :
403 : : static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = {
404 : : [SPDK_BDEV_IO_TYPE_READ] = true,
405 : : [SPDK_BDEV_IO_TYPE_WRITE] = true,
406 : : [SPDK_BDEV_IO_TYPE_COMPARE] = true,
407 : : [SPDK_BDEV_IO_TYPE_UNMAP] = true,
408 : : [SPDK_BDEV_IO_TYPE_FLUSH] = true,
409 : : [SPDK_BDEV_IO_TYPE_RESET] = true,
410 : : [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true,
411 : : [SPDK_BDEV_IO_TYPE_NVME_IO] = true,
412 : : [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true,
413 : : [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true,
414 : : [SPDK_BDEV_IO_TYPE_ZCOPY] = true,
415 : : [SPDK_BDEV_IO_TYPE_ABORT] = true,
416 : : [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true,
417 : : [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true,
418 : : [SPDK_BDEV_IO_TYPE_COPY] = true,
419 : : };
420 : :
421 : : static void
422 : 88 : ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable)
423 : : {
424 : 88 : g_io_types_supported[io_type] = enable;
425 : 88 : }
426 : :
427 : : static bool
428 : 1592 : stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
429 : : {
430 [ - + ]: 1592 : return g_io_types_supported[io_type];
431 : : }
432 : :
433 : : static struct spdk_bdev_fn_table fn_table = {
434 : : .destruct = stub_destruct,
435 : : .submit_request = stub_submit_request,
436 : : .get_io_channel = bdev_ut_get_io_channel,
437 : : .io_type_supported = stub_io_type_supported,
438 : : };
439 : :
440 : : static int
441 : 144 : bdev_ut_create_ch(void *io_device, void *ctx_buf)
442 : : {
443 : 144 : struct bdev_ut_channel *ch = ctx_buf;
444 : :
445 : 144 : CU_ASSERT(g_bdev_ut_channel == NULL);
446 : 144 : g_bdev_ut_channel = ch;
447 : :
448 : 144 : TAILQ_INIT(&ch->outstanding_io);
449 : 144 : ch->outstanding_io_count = 0;
450 : 144 : TAILQ_INIT(&ch->expected_io);
451 : 144 : return 0;
452 : : }
453 : :
454 : : static void
455 : 144 : bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
456 : : {
457 : 144 : CU_ASSERT(g_bdev_ut_channel != NULL);
458 : 144 : g_bdev_ut_channel = NULL;
459 : 144 : }
460 : :
461 : : struct spdk_bdev_module bdev_ut_if;
462 : :
463 : : static int
464 : 172 : bdev_ut_module_init(void)
465 : : {
466 : 172 : spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
467 : : sizeof(struct bdev_ut_channel), NULL);
468 : 172 : spdk_bdev_module_init_done(&bdev_ut_if);
469 : 172 : return 0;
470 : : }
471 : :
472 : : static void
473 : 172 : bdev_ut_module_fini(void)
474 : : {
475 : 172 : spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
476 : 172 : }
477 : :
478 : : struct spdk_bdev_module bdev_ut_if = {
479 : : .name = "bdev_ut",
480 : : .module_init = bdev_ut_module_init,
481 : : .module_fini = bdev_ut_module_fini,
482 : : .async_init = true,
483 : : };
484 : :
485 : : static void vbdev_ut_examine_config(struct spdk_bdev *bdev);
486 : : static void vbdev_ut_examine_disk(struct spdk_bdev *bdev);
487 : :
488 : : static int
489 : 172 : vbdev_ut_module_init(void)
490 : : {
491 : 172 : return 0;
492 : : }
493 : :
494 : : static void
495 : 516 : vbdev_ut_module_fini(void)
496 : : {
497 : 516 : }
498 : :
499 : : static int
500 : 344 : vbdev_ut_get_ctx_size(void)
501 : : {
502 : 344 : return sizeof(struct bdev_ut_io);
503 : : }
504 : :
505 : : struct spdk_bdev_module vbdev_ut_if = {
506 : : .name = "vbdev_ut",
507 : : .module_init = vbdev_ut_module_init,
508 : : .module_fini = vbdev_ut_module_fini,
509 : : .examine_config = vbdev_ut_examine_config,
510 : : .examine_disk = vbdev_ut_examine_disk,
511 : : .get_ctx_size = vbdev_ut_get_ctx_size,
512 : : };
513 : :
514 : 4 : SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
515 : 4 : SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
516 : :
517 : : struct ut_examine_ctx {
518 : : void (*examine_config)(struct spdk_bdev *bdev);
519 : : void (*examine_disk)(struct spdk_bdev *bdev);
520 : : uint32_t examine_config_count;
521 : : uint32_t examine_disk_count;
522 : : };
523 : :
524 : : static void
525 : 332 : vbdev_ut_examine_config(struct spdk_bdev *bdev)
526 : : {
527 : 332 : struct ut_examine_ctx *ctx = bdev->ctxt;
528 : :
529 [ + + ]: 332 : if (ctx != NULL) {
530 : 12 : ctx->examine_config_count++;
531 [ + - ]: 12 : if (ctx->examine_config != NULL) {
532 : 12 : ctx->examine_config(bdev);
533 : 3 : }
534 : 3 : }
535 : :
536 : 332 : spdk_bdev_module_examine_done(&vbdev_ut_if);
537 : 332 : }
538 : :
539 : : static void
540 : 308 : vbdev_ut_examine_disk(struct spdk_bdev *bdev)
541 : : {
542 : 308 : struct ut_examine_ctx *ctx = bdev->ctxt;
543 : :
544 [ + + ]: 308 : if (ctx != NULL) {
545 : 12 : ctx->examine_disk_count++;
546 [ + - ]: 12 : if (ctx->examine_disk != NULL) {
547 : 12 : ctx->examine_disk(bdev);
548 : 3 : }
549 : 3 : }
550 : :
551 : 308 : spdk_bdev_module_examine_done(&vbdev_ut_if);
552 : 308 : }
553 : :
554 : : static void
555 : 172 : bdev_init_cb(void *arg, int rc)
556 : : {
557 : 172 : CU_ASSERT(rc == 0);
558 : 172 : }
559 : :
560 : : static void
561 : 344 : bdev_fini_cb(void *arg)
562 : : {
563 : 344 : }
564 : :
565 : : static void
566 : 172 : ut_init_bdev(struct spdk_bdev_opts *opts)
567 : : {
568 : : int rc;
569 : :
570 [ + + ]: 172 : if (opts != NULL) {
571 : 60 : rc = spdk_bdev_set_opts(opts);
572 : 60 : CU_ASSERT(rc == 0);
573 : 15 : }
574 : 172 : rc = spdk_iobuf_initialize();
575 : 172 : CU_ASSERT(rc == 0);
576 : 172 : spdk_bdev_initialize(bdev_init_cb, NULL);
577 : 172 : poll_threads();
578 : 172 : }
579 : :
580 : : static void
581 : 172 : ut_fini_bdev(void)
582 : : {
583 : 172 : spdk_bdev_finish(bdev_fini_cb, NULL);
584 : 172 : spdk_iobuf_finish(bdev_fini_cb, NULL);
585 : 172 : poll_threads();
586 : 172 : }
587 : :
588 : : static struct spdk_bdev *
589 : 304 : allocate_bdev_ctx(char *name, void *ctx)
590 : : {
591 : : struct spdk_bdev *bdev;
592 : : int rc;
593 : :
594 : 304 : bdev = calloc(1, sizeof(*bdev));
595 [ + + ]: 304 : SPDK_CU_ASSERT_FATAL(bdev != NULL);
596 : :
597 : 304 : bdev->ctxt = ctx;
598 : 304 : bdev->name = name;
599 : 304 : bdev->fn_table = &fn_table;
600 : 304 : bdev->module = &bdev_ut_if;
601 : 304 : bdev->blockcnt = 1024;
602 : 304 : bdev->blocklen = 512;
603 : :
604 : 304 : spdk_uuid_generate(&bdev->uuid);
605 : :
606 : 304 : rc = spdk_bdev_register(bdev);
607 : 304 : poll_threads();
608 : 304 : CU_ASSERT(rc == 0);
609 : :
610 : 304 : return bdev;
611 : : }
612 : :
613 : : static struct spdk_bdev *
614 : 292 : allocate_bdev(char *name)
615 : : {
616 : 292 : return allocate_bdev_ctx(name, NULL);
617 : : }
618 : :
619 : : static struct spdk_bdev *
620 : 20 : allocate_vbdev(char *name)
621 : : {
622 : : struct spdk_bdev *bdev;
623 : : int rc;
624 : :
625 : 20 : bdev = calloc(1, sizeof(*bdev));
626 [ + + ]: 20 : SPDK_CU_ASSERT_FATAL(bdev != NULL);
627 : :
628 : 20 : bdev->name = name;
629 : 20 : bdev->fn_table = &fn_table;
630 : 20 : bdev->module = &vbdev_ut_if;
631 : 20 : bdev->blockcnt = 1024;
632 : 20 : bdev->blocklen = 512;
633 : :
634 : 20 : rc = spdk_bdev_register(bdev);
635 : 20 : poll_threads();
636 : 20 : CU_ASSERT(rc == 0);
637 : :
638 : 20 : return bdev;
639 : : }
640 : :
641 : : static void
642 : 292 : free_bdev(struct spdk_bdev *bdev)
643 : : {
644 : 292 : spdk_bdev_unregister(bdev, NULL, NULL);
645 : 292 : poll_threads();
646 [ - + ]: 292 : memset(bdev, 0xFF, sizeof(*bdev));
647 : 292 : free(bdev);
648 : 292 : }
649 : :
650 : : static void
651 : 20 : free_vbdev(struct spdk_bdev *bdev)
652 : : {
653 : 20 : spdk_bdev_unregister(bdev, NULL, NULL);
654 : 20 : poll_threads();
655 [ - + ]: 20 : memset(bdev, 0xFF, sizeof(*bdev));
656 : 20 : free(bdev);
657 : 20 : }
658 : :
659 : : static void
660 : 4 : get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
661 : : {
662 : : const char *bdev_name;
663 : :
664 : 4 : CU_ASSERT(bdev != NULL);
665 : 4 : CU_ASSERT(rc == 0);
666 : 4 : bdev_name = spdk_bdev_get_name(bdev);
667 [ - + ]: 4 : CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
668 : :
669 : 4 : free(stat);
670 : :
671 : 4 : *(bool *)cb_arg = true;
672 : 4 : }
673 : :
674 : : static void
675 : 12 : bdev_unregister_cb(void *cb_arg, int rc)
676 : : {
677 : 12 : g_unregister_arg = cb_arg;
678 : 12 : g_unregister_rc = rc;
679 : 12 : }
680 : :
681 : : static void
682 : 4 : bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
683 : : {
684 : 4 : }
685 : :
686 : : static void
687 : 16 : bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
688 : : {
689 : 16 : struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
690 : :
691 : 16 : g_event_type1 = type;
692 [ + + ]: 16 : if (SPDK_BDEV_EVENT_REMOVE == type) {
693 : 8 : spdk_bdev_close(desc);
694 : 2 : }
695 : 16 : }
696 : :
697 : : static void
698 : 8 : bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
699 : : {
700 : 8 : struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
701 : :
702 : 8 : g_event_type2 = type;
703 [ + + ]: 8 : if (SPDK_BDEV_EVENT_REMOVE == type) {
704 : 8 : spdk_bdev_close(desc);
705 : 2 : }
706 : 8 : }
707 : :
708 : : static void
709 : 4 : bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
710 : : {
711 : 4 : g_event_type3 = type;
712 : 4 : }
713 : :
714 : : static void
715 : 4 : bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
716 : : {
717 : 4 : g_event_type4 = type;
718 : 4 : }
719 : :
720 : : static void
721 : 16 : bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
722 : : {
723 : 16 : g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io);
724 : 16 : spdk_bdev_free_io(bdev_io);
725 : 16 : }
726 : :
727 : : static void
728 : 4 : get_device_stat_test(void)
729 : : {
730 : : struct spdk_bdev *bdev;
731 : : struct spdk_bdev_io_stat *stat;
732 : 3 : bool done;
733 : :
734 : 4 : bdev = allocate_bdev("bdev0");
735 : 4 : stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
736 [ - + ]: 4 : if (stat == NULL) {
737 : 0 : free_bdev(bdev);
738 : 0 : return;
739 : : }
740 : :
741 : 4 : done = false;
742 : 4 : spdk_bdev_get_device_stat(bdev, stat, SPDK_BDEV_RESET_STAT_NONE, get_device_stat_cb, &done);
743 [ + + + + ]: 8 : while (!done) { poll_threads(); }
744 : :
745 : 4 : free_bdev(bdev);
746 : 1 : }
747 : :
748 : : static void
749 : 4 : open_write_test(void)
750 : : {
751 : : struct spdk_bdev *bdev[9];
752 : 4 : struct spdk_bdev_desc *desc[9] = {};
753 : : int rc;
754 : :
755 : 4 : ut_init_bdev(NULL);
756 : :
757 : : /*
758 : : * Create a tree of bdevs to test various open w/ write cases.
759 : : *
760 : : * bdev0 through bdev3 are physical block devices, such as NVMe
761 : : * namespaces or Ceph block devices.
762 : : *
763 : : * bdev4 is a virtual bdev with multiple base bdevs. This models
764 : : * caching or RAID use cases.
765 : : *
766 : : * bdev5 through bdev7 are all virtual bdevs with the same base
767 : : * bdev (except bdev7). This models partitioning or logical volume
768 : : * use cases.
769 : : *
770 : : * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
771 : : * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
772 : : * models caching, RAID, partitioning or logical volumes use cases.
773 : : *
774 : : * bdev8 is a virtual bdev with multiple base bdevs, but these
775 : : * base bdevs are themselves virtual bdevs.
776 : : *
777 : : * bdev8
778 : : * |
779 : : * +----------+
780 : : * | |
781 : : * bdev4 bdev5 bdev6 bdev7
782 : : * | | | |
783 : : * +---+---+ +---+ + +---+---+
784 : : * | | \ | / \
785 : : * bdev0 bdev1 bdev2 bdev3
786 : : */
787 : :
788 : 4 : bdev[0] = allocate_bdev("bdev0");
789 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
790 : 4 : CU_ASSERT(rc == 0);
791 : :
792 : 4 : bdev[1] = allocate_bdev("bdev1");
793 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
794 : 4 : CU_ASSERT(rc == 0);
795 : :
796 : 4 : bdev[2] = allocate_bdev("bdev2");
797 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
798 : 4 : CU_ASSERT(rc == 0);
799 : :
800 : 4 : bdev[3] = allocate_bdev("bdev3");
801 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
802 : 4 : CU_ASSERT(rc == 0);
803 : :
804 : 4 : bdev[4] = allocate_vbdev("bdev4");
805 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
806 : 4 : CU_ASSERT(rc == 0);
807 : :
808 : 4 : bdev[5] = allocate_vbdev("bdev5");
809 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
810 : 4 : CU_ASSERT(rc == 0);
811 : :
812 : 4 : bdev[6] = allocate_vbdev("bdev6");
813 : :
814 : 4 : bdev[7] = allocate_vbdev("bdev7");
815 : :
816 : 4 : bdev[8] = allocate_vbdev("bdev8");
817 : :
818 : : /* Open bdev0 read-only. This should succeed. */
819 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]);
820 : 4 : CU_ASSERT(rc == 0);
821 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
822 : 4 : CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0]));
823 : 4 : spdk_bdev_close(desc[0]);
824 : :
825 : : /*
826 : : * Open bdev1 read/write. This should fail since bdev1 has been claimed
827 : : * by a vbdev module.
828 : : */
829 : 4 : rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]);
830 : 4 : CU_ASSERT(rc == -EPERM);
831 : :
832 : : /*
833 : : * Open bdev4 read/write. This should fail since bdev3 has been claimed
834 : : * by a vbdev module.
835 : : */
836 : 4 : rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]);
837 : 4 : CU_ASSERT(rc == -EPERM);
838 : :
839 : : /* Open bdev4 read-only. This should succeed. */
840 : 4 : rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]);
841 : 4 : CU_ASSERT(rc == 0);
842 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
843 : 4 : CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4]));
844 : 4 : spdk_bdev_close(desc[4]);
845 : :
846 : : /*
847 : : * Open bdev8 read/write. This should succeed since it is a leaf
848 : : * bdev.
849 : : */
850 : 4 : rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]);
851 : 4 : CU_ASSERT(rc == 0);
852 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
853 : 4 : CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8]));
854 : 4 : spdk_bdev_close(desc[8]);
855 : :
856 : : /*
857 : : * Open bdev5 read/write. This should fail since bdev4 has been claimed
858 : : * by a vbdev module.
859 : : */
860 : 4 : rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]);
861 : 4 : CU_ASSERT(rc == -EPERM);
862 : :
863 : : /* Open bdev4 read-only. This should succeed. */
864 : 4 : rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]);
865 : 4 : CU_ASSERT(rc == 0);
866 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
867 : 4 : CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5]));
868 : 4 : spdk_bdev_close(desc[5]);
869 : :
870 : 4 : free_vbdev(bdev[8]);
871 : :
872 : 4 : free_vbdev(bdev[5]);
873 : 4 : free_vbdev(bdev[6]);
874 : 4 : free_vbdev(bdev[7]);
875 : :
876 : 4 : free_vbdev(bdev[4]);
877 : :
878 : 4 : free_bdev(bdev[0]);
879 : 4 : free_bdev(bdev[1]);
880 : 4 : free_bdev(bdev[2]);
881 : 4 : free_bdev(bdev[3]);
882 : :
883 : 4 : ut_fini_bdev();
884 : 4 : }
885 : :
886 : : static void
887 : 4 : claim_test(void)
888 : : {
889 : : struct spdk_bdev *bdev;
890 : 3 : struct spdk_bdev_desc *desc, *open_desc;
891 : : int rc;
892 : : uint32_t count;
893 : :
894 : 4 : ut_init_bdev(NULL);
895 : :
896 : : /*
897 : : * A vbdev that uses a read-only bdev may need it to remain read-only.
898 : : * To do so, it opens the bdev read-only, then claims it without
899 : : * passing a spdk_bdev_desc.
900 : : */
901 : 4 : bdev = allocate_bdev("bdev0");
902 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
903 : 4 : CU_ASSERT(rc == 0);
904 [ - + ]: 4 : CU_ASSERT(desc->write == false);
905 : :
906 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
907 : 4 : CU_ASSERT(rc == 0);
908 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
909 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if);
910 : :
911 : : /* There should be only one open descriptor and it should still be ro */
912 : 4 : count = 0;
913 [ + + ]: 8 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
914 : 4 : CU_ASSERT(open_desc == desc);
915 [ - + ]: 4 : CU_ASSERT(!open_desc->write);
916 : 4 : count++;
917 : 1 : }
918 : 4 : CU_ASSERT(count == 1);
919 : :
920 : : /* A read-only bdev is upgraded to read-write if desc is passed. */
921 : 4 : spdk_bdev_module_release_bdev(bdev);
922 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if);
923 : 4 : CU_ASSERT(rc == 0);
924 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
925 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if);
926 : :
927 : : /* There should be only one open descriptor and it should be rw */
928 : 4 : count = 0;
929 [ + + ]: 8 : TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) {
930 : 4 : CU_ASSERT(open_desc == desc);
931 [ - + ]: 4 : CU_ASSERT(open_desc->write);
932 : 4 : count++;
933 : 1 : }
934 : 4 : CU_ASSERT(count == 1);
935 : :
936 : 4 : spdk_bdev_close(desc);
937 : 4 : free_bdev(bdev);
938 : 4 : ut_fini_bdev();
939 : 4 : }
940 : :
941 : : static void
942 : 4 : bytes_to_blocks_test(void)
943 : : {
944 : 3 : struct spdk_bdev bdev;
945 : 3 : uint64_t offset_blocks, num_blocks;
946 : :
947 [ - + ]: 4 : memset(&bdev, 0, sizeof(bdev));
948 : :
949 : 4 : bdev.blocklen = 512;
950 : :
951 : : /* All parameters valid */
952 : 4 : offset_blocks = 0;
953 : 4 : num_blocks = 0;
954 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
955 : 4 : CU_ASSERT(offset_blocks == 1);
956 : 4 : CU_ASSERT(num_blocks == 2);
957 : :
958 : : /* Offset not a block multiple */
959 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
960 : :
961 : : /* Length not a block multiple */
962 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
963 : :
964 : : /* In case blocklen not the power of two */
965 : 4 : bdev.blocklen = 100;
966 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
967 : 4 : CU_ASSERT(offset_blocks == 1);
968 : 4 : CU_ASSERT(num_blocks == 2);
969 : :
970 : : /* Offset not a block multiple */
971 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
972 : :
973 : : /* Length not a block multiple */
974 : 4 : CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
975 : 4 : }
976 : :
977 : : static void
978 : 4 : num_blocks_test(void)
979 : : {
980 : : struct spdk_bdev *bdev;
981 : 4 : struct spdk_bdev_desc *desc = NULL;
982 : : int rc;
983 : :
984 : 4 : ut_init_bdev(NULL);
985 : 4 : bdev = allocate_bdev("num_blocks");
986 : :
987 : 4 : spdk_bdev_notify_blockcnt_change(bdev, 50);
988 : :
989 : : /* Growing block number */
990 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 70) == 0);
991 : : /* Shrinking block number */
992 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 30) == 0);
993 : :
994 : 4 : rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc);
995 : 4 : CU_ASSERT(rc == 0);
996 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
997 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
998 : :
999 : : /* Growing block number */
1000 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 80) == 0);
1001 : : /* Shrinking block number */
1002 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 20) != 0);
1003 : :
1004 : 4 : g_event_type1 = 0xFF;
1005 : : /* Growing block number */
1006 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 90) == 0);
1007 : :
1008 : 4 : poll_threads();
1009 : 4 : CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE);
1010 : :
1011 : 4 : g_event_type1 = 0xFF;
1012 : : /* Growing block number and closing */
1013 : 4 : CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 100) == 0);
1014 : :
1015 : 4 : spdk_bdev_close(desc);
1016 : 4 : free_bdev(bdev);
1017 : 4 : ut_fini_bdev();
1018 : :
1019 : 4 : poll_threads();
1020 : :
1021 : : /* Callback is not called for closed device */
1022 : 4 : CU_ASSERT_EQUAL(g_event_type1, 0xFF);
1023 : 4 : }
1024 : :
1025 : : static void
1026 : 4 : io_valid_test(void)
1027 : : {
1028 : 3 : struct spdk_bdev bdev;
1029 : :
1030 [ - + ]: 4 : memset(&bdev, 0, sizeof(bdev));
1031 : :
1032 : 4 : bdev.blocklen = 512;
1033 : 4 : spdk_spin_init(&bdev.internal.spinlock);
1034 : :
1035 : 4 : spdk_bdev_notify_blockcnt_change(&bdev, 100);
1036 : :
1037 : : /* All parameters valid */
1038 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true);
1039 : :
1040 : : /* Last valid block */
1041 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true);
1042 : :
1043 : : /* Offset past end of bdev */
1044 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false);
1045 : :
1046 : : /* Offset + length past end of bdev */
1047 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false);
1048 : :
1049 : : /* Offset near end of uint64_t range (2^64 - 1) */
1050 : 4 : CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
1051 : :
1052 : 4 : spdk_spin_destroy(&bdev.internal.spinlock);
1053 : 4 : }
1054 : :
1055 : : static void
1056 : 4 : alias_add_del_test(void)
1057 : : {
1058 : : struct spdk_bdev *bdev[3];
1059 : : int rc;
1060 : :
1061 : 4 : ut_init_bdev(NULL);
1062 : :
1063 : : /* Creating and registering bdevs */
1064 : 4 : bdev[0] = allocate_bdev("bdev0");
1065 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
1066 : :
1067 : 4 : bdev[1] = allocate_bdev("bdev1");
1068 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
1069 : :
1070 : 4 : bdev[2] = allocate_bdev("bdev2");
1071 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
1072 : :
1073 : 4 : poll_threads();
1074 : :
1075 : : /*
1076 : : * Trying adding an alias identical to name.
1077 : : * Alias is identical to name, so it can not be added to aliases list
1078 : : */
1079 : 4 : rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
1080 : 4 : CU_ASSERT(rc == -EEXIST);
1081 : :
1082 : : /*
1083 : : * Trying to add empty alias,
1084 : : * this one should fail
1085 : : */
1086 : 4 : rc = spdk_bdev_alias_add(bdev[0], NULL);
1087 : 4 : CU_ASSERT(rc == -EINVAL);
1088 : :
1089 : : /* Trying adding same alias to two different registered bdevs */
1090 : :
1091 : : /* Alias is used first time, so this one should pass */
1092 : 4 : rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
1093 : 4 : CU_ASSERT(rc == 0);
1094 : :
1095 : : /* Alias was added to another bdev, so this one should fail */
1096 : 4 : rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
1097 : 4 : CU_ASSERT(rc == -EEXIST);
1098 : :
1099 : : /* Alias is used first time, so this one should pass */
1100 : 4 : rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
1101 : 4 : CU_ASSERT(rc == 0);
1102 : :
1103 : : /* Trying removing an alias from registered bdevs */
1104 : :
1105 : : /* Alias is not on a bdev aliases list, so this one should fail */
1106 : 4 : rc = spdk_bdev_alias_del(bdev[0], "not existing");
1107 : 4 : CU_ASSERT(rc == -ENOENT);
1108 : :
1109 : : /* Alias is present on a bdev aliases list, so this one should pass */
1110 : 4 : rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
1111 : 4 : CU_ASSERT(rc == 0);
1112 : :
1113 : : /* Alias is present on a bdev aliases list, so this one should pass */
1114 : 4 : rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
1115 : 4 : CU_ASSERT(rc == 0);
1116 : :
1117 : : /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
1118 : 4 : rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
1119 : 4 : CU_ASSERT(rc != 0);
1120 : :
1121 : : /* Trying to del all alias from empty alias list */
1122 : 4 : spdk_bdev_alias_del_all(bdev[2]);
1123 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
1124 : :
1125 : : /* Trying to del all alias from non-empty alias list */
1126 : 4 : rc = spdk_bdev_alias_add(bdev[2], "alias0");
1127 : 4 : CU_ASSERT(rc == 0);
1128 : 4 : rc = spdk_bdev_alias_add(bdev[2], "alias1");
1129 : 4 : CU_ASSERT(rc == 0);
1130 : 4 : spdk_bdev_alias_del_all(bdev[2]);
1131 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
1132 : :
1133 : : /* Unregister and free bdevs */
1134 : 4 : spdk_bdev_unregister(bdev[0], NULL, NULL);
1135 : 4 : spdk_bdev_unregister(bdev[1], NULL, NULL);
1136 : 4 : spdk_bdev_unregister(bdev[2], NULL, NULL);
1137 : :
1138 : 4 : poll_threads();
1139 : :
1140 : 4 : free(bdev[0]);
1141 : 4 : free(bdev[1]);
1142 : 4 : free(bdev[2]);
1143 : :
1144 : 4 : ut_fini_bdev();
1145 : 4 : }
1146 : :
1147 : : static void
1148 : 596 : io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1149 : : {
1150 : 596 : g_io_done = true;
1151 : 596 : g_io_status = bdev_io->internal.status;
1152 [ + + + + ]: 596 : if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) &&
1153 : 4 : (bdev_io->u.bdev.zcopy.start)) {
1154 : 8 : g_zcopy_bdev_io = bdev_io;
1155 : 2 : } else {
1156 : 588 : spdk_bdev_free_io(bdev_io);
1157 : 588 : g_zcopy_bdev_io = NULL;
1158 : : }
1159 : 596 : }
1160 : :
1161 : : struct bdev_ut_io_wait_entry {
1162 : : struct spdk_bdev_io_wait_entry entry;
1163 : : struct spdk_io_channel *io_ch;
1164 : : struct spdk_bdev_desc *desc;
1165 : : bool submitted;
1166 : : };
1167 : :
1168 : : static void
1169 : 8 : io_wait_cb(void *arg)
1170 : : {
1171 : 8 : struct bdev_ut_io_wait_entry *entry = arg;
1172 : : int rc;
1173 : :
1174 : 8 : rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
1175 : 8 : CU_ASSERT(rc == 0);
1176 : 8 : entry->submitted = true;
1177 : 8 : }
1178 : :
1179 : : static void
1180 : 4 : bdev_io_types_test(void)
1181 : : {
1182 : : struct spdk_bdev *bdev;
1183 : 4 : struct spdk_bdev_desc *desc = NULL;
1184 : : struct spdk_io_channel *io_ch;
1185 : 4 : struct spdk_bdev_opts bdev_opts = {};
1186 : : int rc;
1187 : :
1188 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
1189 : 4 : bdev_opts.bdev_io_pool_size = 4;
1190 : 4 : bdev_opts.bdev_io_cache_size = 2;
1191 : 4 : ut_init_bdev(&bdev_opts);
1192 : :
1193 : 4 : bdev = allocate_bdev("bdev0");
1194 : :
1195 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
1196 : 4 : CU_ASSERT(rc == 0);
1197 : 4 : poll_threads();
1198 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1199 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1200 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
1201 : 4 : CU_ASSERT(io_ch != NULL);
1202 : :
1203 : : /* WRITE and WRITE ZEROES are not supported */
1204 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
1205 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false);
1206 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL);
1207 : 4 : CU_ASSERT(rc == -ENOTSUP);
1208 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
1209 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true);
1210 : :
1211 : : /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */
1212 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false);
1213 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false);
1214 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false);
1215 : 4 : rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL);
1216 : 4 : CU_ASSERT(rc == -ENOTSUP);
1217 : 4 : rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL);
1218 : 4 : CU_ASSERT(rc == -ENOTSUP);
1219 : 4 : rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL);
1220 : 4 : CU_ASSERT(rc == -ENOTSUP);
1221 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true);
1222 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true);
1223 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true);
1224 : :
1225 : 4 : spdk_put_io_channel(io_ch);
1226 : 4 : spdk_bdev_close(desc);
1227 : 4 : free_bdev(bdev);
1228 : 4 : ut_fini_bdev();
1229 : 4 : }
1230 : :
1231 : : static void
1232 : 4 : bdev_io_wait_test(void)
1233 : : {
1234 : : struct spdk_bdev *bdev;
1235 : 4 : struct spdk_bdev_desc *desc = NULL;
1236 : : struct spdk_io_channel *io_ch;
1237 : 4 : struct spdk_bdev_opts bdev_opts = {};
1238 : 3 : struct bdev_ut_io_wait_entry io_wait_entry;
1239 : 3 : struct bdev_ut_io_wait_entry io_wait_entry2;
1240 : : int rc;
1241 : :
1242 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
1243 : 4 : bdev_opts.bdev_io_pool_size = 4;
1244 : 4 : bdev_opts.bdev_io_cache_size = 2;
1245 : 4 : ut_init_bdev(&bdev_opts);
1246 : :
1247 : 4 : bdev = allocate_bdev("bdev0");
1248 : :
1249 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
1250 : 4 : CU_ASSERT(rc == 0);
1251 : 4 : poll_threads();
1252 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1253 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1254 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
1255 : 4 : CU_ASSERT(io_ch != NULL);
1256 : :
1257 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1258 : 4 : CU_ASSERT(rc == 0);
1259 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1260 : 4 : CU_ASSERT(rc == 0);
1261 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1262 : 4 : CU_ASSERT(rc == 0);
1263 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1264 : 4 : CU_ASSERT(rc == 0);
1265 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
1266 : :
1267 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1268 : 4 : CU_ASSERT(rc == -ENOMEM);
1269 : :
1270 : 4 : io_wait_entry.entry.bdev = bdev;
1271 : 4 : io_wait_entry.entry.cb_fn = io_wait_cb;
1272 : 4 : io_wait_entry.entry.cb_arg = &io_wait_entry;
1273 : 4 : io_wait_entry.io_ch = io_ch;
1274 : 4 : io_wait_entry.desc = desc;
1275 : 4 : io_wait_entry.submitted = false;
1276 : : /* Cannot use the same io_wait_entry for two different calls. */
1277 : 4 : memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
1278 : 4 : io_wait_entry2.entry.cb_arg = &io_wait_entry2;
1279 : :
1280 : : /* Queue two I/O waits. */
1281 : 4 : rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
1282 : 4 : CU_ASSERT(rc == 0);
1283 [ - + ]: 4 : CU_ASSERT(io_wait_entry.submitted == false);
1284 : 4 : rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
1285 : 4 : CU_ASSERT(rc == 0);
1286 [ - + ]: 4 : CU_ASSERT(io_wait_entry2.submitted == false);
1287 : :
1288 : 4 : stub_complete_io(1);
1289 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
1290 [ - + ]: 4 : CU_ASSERT(io_wait_entry.submitted == true);
1291 [ - + ]: 4 : CU_ASSERT(io_wait_entry2.submitted == false);
1292 : :
1293 : 4 : stub_complete_io(1);
1294 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
1295 [ - + ]: 4 : CU_ASSERT(io_wait_entry2.submitted == true);
1296 : :
1297 : 4 : stub_complete_io(4);
1298 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1299 : :
1300 : 4 : spdk_put_io_channel(io_ch);
1301 : 4 : spdk_bdev_close(desc);
1302 : 4 : free_bdev(bdev);
1303 : 4 : ut_fini_bdev();
1304 : 4 : }
1305 : :
1306 : : static void
1307 : 4 : bdev_io_spans_split_test(void)
1308 : : {
1309 : 3 : struct spdk_bdev bdev;
1310 : 3 : struct spdk_bdev_io bdev_io;
1311 : 3 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV];
1312 : :
1313 [ - + ]: 4 : memset(&bdev, 0, sizeof(bdev));
1314 : 4 : bdev_io.u.bdev.iovs = iov;
1315 : :
1316 : 4 : bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
1317 : 4 : bdev.optimal_io_boundary = 0;
1318 : 4 : bdev.max_segment_size = 0;
1319 : 4 : bdev.max_num_segments = 0;
1320 : 4 : bdev_io.bdev = &bdev;
1321 : :
1322 : : /* bdev has no optimal_io_boundary and max_size set - so this should return false. */
1323 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1324 : :
1325 : 4 : bdev.split_on_optimal_io_boundary = true;
1326 : 4 : bdev.optimal_io_boundary = 32;
1327 : 4 : bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
1328 : :
1329 : : /* RESETs are not based on LBAs - so this should return false. */
1330 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1331 : :
1332 : 4 : bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
1333 : 4 : bdev_io.u.bdev.offset_blocks = 0;
1334 : 4 : bdev_io.u.bdev.num_blocks = 32;
1335 : :
1336 : : /* This I/O run right up to, but does not cross, the boundary - so this should return false. */
1337 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1338 : :
1339 : 4 : bdev_io.u.bdev.num_blocks = 33;
1340 : :
1341 : : /* This I/O spans a boundary. */
1342 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1343 : :
1344 : 4 : bdev_io.u.bdev.num_blocks = 32;
1345 : 4 : bdev.max_segment_size = 512 * 32;
1346 : 4 : bdev.max_num_segments = 1;
1347 : 4 : bdev_io.u.bdev.iovcnt = 1;
1348 : 4 : iov[0].iov_len = 512;
1349 : :
1350 : : /* Does not cross and exceed max_size or max_segs */
1351 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1352 : :
1353 : 4 : bdev.split_on_optimal_io_boundary = false;
1354 : 4 : bdev.max_segment_size = 512;
1355 : 4 : bdev.max_num_segments = 1;
1356 : 4 : bdev_io.u.bdev.iovcnt = 2;
1357 : :
1358 : : /* Exceed max_segs */
1359 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1360 : :
1361 : 4 : bdev.max_num_segments = 2;
1362 : 4 : iov[0].iov_len = 513;
1363 : 4 : iov[1].iov_len = 512;
1364 : :
1365 : : /* Exceed max_sizes */
1366 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1367 : :
1368 : 4 : bdev.max_segment_size = 0;
1369 : 4 : bdev.write_unit_size = 32;
1370 : 4 : bdev.split_on_write_unit = true;
1371 : 4 : bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE;
1372 : :
1373 : : /* This I/O is one write unit */
1374 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
1375 : :
1376 : 4 : bdev_io.u.bdev.num_blocks = 32 * 2;
1377 : :
1378 : : /* This I/O is more than one write unit */
1379 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1380 : :
1381 : 4 : bdev_io.u.bdev.offset_blocks = 1;
1382 : 4 : bdev_io.u.bdev.num_blocks = 32;
1383 : :
1384 : : /* This I/O is not aligned to write unit size */
1385 : 4 : CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
1386 : 4 : }
1387 : :
1388 : : static void
1389 : 4 : bdev_io_boundary_split_test(void)
1390 : : {
1391 : : struct spdk_bdev *bdev;
1392 : 4 : struct spdk_bdev_desc *desc = NULL;
1393 : : struct spdk_io_channel *io_ch;
1394 : 4 : struct spdk_bdev_opts bdev_opts = {};
1395 : 3 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
1396 : : struct ut_expected_io *expected_io;
1397 : 4 : void *md_buf = (void *)0xFF000000;
1398 : : uint64_t i;
1399 : : int rc;
1400 : :
1401 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
1402 : 4 : bdev_opts.bdev_io_pool_size = 512;
1403 : 4 : bdev_opts.bdev_io_cache_size = 64;
1404 : 4 : ut_init_bdev(&bdev_opts);
1405 : :
1406 : 4 : bdev = allocate_bdev("bdev0");
1407 : :
1408 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
1409 : 4 : CU_ASSERT(rc == 0);
1410 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
1411 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
1412 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
1413 : 4 : CU_ASSERT(io_ch != NULL);
1414 : :
1415 : 4 : bdev->optimal_io_boundary = 16;
1416 : 4 : bdev->split_on_optimal_io_boundary = false;
1417 : :
1418 : 4 : g_io_done = false;
1419 : :
1420 : : /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
1421 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
1422 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
1423 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1424 : :
1425 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1426 : 4 : CU_ASSERT(rc == 0);
1427 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1428 : :
1429 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1430 : 4 : stub_complete_io(1);
1431 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1432 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1433 : :
1434 : 4 : bdev->split_on_optimal_io_boundary = true;
1435 : 4 : bdev->md_interleave = false;
1436 : 4 : bdev->md_len = 8;
1437 : :
1438 : : /* Now test that a single-vector command is split correctly.
1439 : : * Offset 14, length 8, payload 0xF000
1440 : : * Child - Offset 14, length 2, payload 0xF000
1441 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1442 : : *
1443 : : * Set up the expected values before calling spdk_bdev_read_blocks
1444 : : */
1445 : 4 : g_io_done = false;
1446 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1447 : 4 : expected_io->md_buf = md_buf;
1448 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1449 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1450 : :
1451 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1452 : 4 : expected_io->md_buf = md_buf + 2 * 8;
1453 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1454 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1455 : :
1456 : : /* spdk_bdev_read_blocks will submit the first child immediately. */
1457 : 4 : rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf,
1458 : : 14, 8, io_done, NULL);
1459 : 4 : CU_ASSERT(rc == 0);
1460 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1461 : :
1462 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1463 : 4 : stub_complete_io(2);
1464 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1465 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1466 : :
1467 : : /* Now set up a more complex, multi-vector command that needs to be split,
1468 : : * including splitting iovecs.
1469 : : */
1470 : 4 : iov[0].iov_base = (void *)0x10000;
1471 : 4 : iov[0].iov_len = 512;
1472 : 4 : iov[1].iov_base = (void *)0x20000;
1473 : 4 : iov[1].iov_len = 20 * 512;
1474 : 4 : iov[2].iov_base = (void *)0x30000;
1475 : 4 : iov[2].iov_len = 11 * 512;
1476 : :
1477 : 4 : g_io_done = false;
1478 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1479 : 4 : expected_io->md_buf = md_buf;
1480 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1481 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1482 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1483 : :
1484 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1485 : 4 : expected_io->md_buf = md_buf + 2 * 8;
1486 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1487 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1488 : :
1489 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1490 : 4 : expected_io->md_buf = md_buf + 18 * 8;
1491 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1492 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1493 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1494 : :
1495 : 4 : rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf,
1496 : : 14, 32, io_done, NULL);
1497 : 4 : CU_ASSERT(rc == 0);
1498 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1499 : :
1500 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
1501 : 4 : stub_complete_io(3);
1502 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1503 : :
1504 : : /* Test multi vector command that needs to be split by strip and then needs to be
1505 : : * split further due to the capacity of child iovs.
1506 : : */
1507 [ + + ]: 260 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) {
1508 : 256 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1509 : 256 : iov[i].iov_len = 512;
1510 : 64 : }
1511 : :
1512 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1513 : 4 : g_io_done = false;
1514 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV,
1515 : : SPDK_BDEV_IO_NUM_CHILD_IOV);
1516 : 4 : expected_io->md_buf = md_buf;
1517 [ + + ]: 132 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1518 : 128 : ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
1519 : 32 : }
1520 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1521 : :
1522 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1523 : : SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV);
1524 : 4 : expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8;
1525 [ + + ]: 132 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1526 : 160 : ut_expected_io_set_iov(expected_io, i,
1527 : 128 : (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
1528 : 32 : }
1529 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1530 : :
1531 : 4 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf,
1532 : : 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1533 : 4 : CU_ASSERT(rc == 0);
1534 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1535 : :
1536 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1537 : 4 : stub_complete_io(1);
1538 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1539 : :
1540 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1541 : 4 : stub_complete_io(1);
1542 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1543 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1544 : :
1545 : : /* Test multi vector command that needs to be split by strip and then needs to be
1546 : : * split further due to the capacity of child iovs. In this case, the length of
1547 : : * the rest of iovec array with an I/O boundary is the multiple of block size.
1548 : : */
1549 : :
1550 : : /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary
1551 : : * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs.
1552 : : */
1553 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1554 : 120 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1555 : 120 : iov[i].iov_len = 512;
1556 : 30 : }
1557 [ + + ]: 12 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1558 : 8 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1559 : 8 : iov[i].iov_len = 256;
1560 : 2 : }
1561 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1562 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512;
1563 : :
1564 : : /* Add an extra iovec to trigger split */
1565 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1566 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1567 : :
1568 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1569 : 4 : g_io_done = false;
1570 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1571 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV);
1572 : 4 : expected_io->md_buf = md_buf;
1573 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1574 : 150 : ut_expected_io_set_iov(expected_io, i,
1575 : 120 : (void *)((i + 1) * 0x10000), 512);
1576 : 30 : }
1577 [ + + ]: 12 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
1578 : 10 : ut_expected_io_set_iov(expected_io, i,
1579 : 8 : (void *)((i + 1) * 0x10000), 256);
1580 : 2 : }
1581 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1582 : :
1583 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1,
1584 : : 1, 1);
1585 : 4 : expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8;
1586 : 4 : ut_expected_io_set_iov(expected_io, 0,
1587 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512);
1588 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1589 : :
1590 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1591 : : 1, 1);
1592 : 4 : expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8;
1593 : 4 : ut_expected_io_set_iov(expected_io, 0,
1594 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1595 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1596 : :
1597 : 4 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf,
1598 : : 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1599 : 4 : CU_ASSERT(rc == 0);
1600 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1601 : :
1602 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1603 : 4 : stub_complete_io(1);
1604 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1605 : :
1606 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1607 : 4 : stub_complete_io(2);
1608 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1609 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1610 : :
1611 : : /* Test multi vector command that needs to be split by strip and then needs to be
1612 : : * split further due to the capacity of child iovs, the child request offset should
1613 : : * be rewind to last aligned offset and go success without error.
1614 : : */
1615 [ + + ]: 128 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1616 : 124 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1617 : 124 : iov[i].iov_len = 512;
1618 : 31 : }
1619 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000);
1620 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1621 : :
1622 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1623 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
1624 : :
1625 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1626 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1627 : :
1628 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1629 : 4 : g_io_done = false;
1630 : 4 : g_io_status = 0;
1631 : : /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */
1632 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1633 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1);
1634 : 4 : expected_io->md_buf = md_buf;
1635 [ + + ]: 128 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1636 : 155 : ut_expected_io_set_iov(expected_io, i,
1637 : 124 : (void *)((i + 1) * 0x10000), 512);
1638 : 31 : }
1639 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1640 : : /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */
1641 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1,
1642 : : 1, 2);
1643 : 4 : expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8;
1644 : 4 : ut_expected_io_set_iov(expected_io, 0,
1645 : : (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256);
1646 : 4 : ut_expected_io_set_iov(expected_io, 1,
1647 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256);
1648 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1649 : : /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */
1650 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
1651 : : 1, 1);
1652 : 4 : expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8;
1653 : 4 : ut_expected_io_set_iov(expected_io, 0,
1654 : : (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1655 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1656 : :
1657 : 4 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf,
1658 : : 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1659 : 4 : CU_ASSERT(rc == 0);
1660 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1661 : :
1662 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1663 : 4 : stub_complete_io(1);
1664 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1665 : :
1666 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1667 : 4 : stub_complete_io(2);
1668 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1669 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1670 : :
1671 : : /* Test multi vector command that needs to be split due to the IO boundary and
1672 : : * the capacity of child iovs. Especially test the case when the command is
1673 : : * split due to the capacity of child iovs, the tail address is not aligned with
1674 : : * block size and is rewinded to the aligned address.
1675 : : *
1676 : : * The iovecs used in read request is complex but is based on the data
1677 : : * collected in the real issue. We change the base addresses but keep the lengths
1678 : : * not to loose the credibility of the test.
1679 : : */
1680 : 4 : bdev->optimal_io_boundary = 128;
1681 : 4 : g_io_done = false;
1682 : 4 : g_io_status = 0;
1683 : :
1684 [ + + ]: 128 : for (i = 0; i < 31; i++) {
1685 : 124 : iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20));
1686 : 124 : iov[i].iov_len = 1024;
1687 : 31 : }
1688 : 4 : iov[31].iov_base = (void *)0xFEED1F00000;
1689 : 4 : iov[31].iov_len = 32768;
1690 : 4 : iov[32].iov_base = (void *)0xFEED2000000;
1691 : 4 : iov[32].iov_len = 160;
1692 : 4 : iov[33].iov_base = (void *)0xFEED2100000;
1693 : 4 : iov[33].iov_len = 4096;
1694 : 4 : iov[34].iov_base = (void *)0xFEED2200000;
1695 : 4 : iov[34].iov_len = 4096;
1696 : 4 : iov[35].iov_base = (void *)0xFEED2300000;
1697 : 4 : iov[35].iov_len = 4096;
1698 : 4 : iov[36].iov_base = (void *)0xFEED2400000;
1699 : 4 : iov[36].iov_len = 4096;
1700 : 4 : iov[37].iov_base = (void *)0xFEED2500000;
1701 : 4 : iov[37].iov_len = 4096;
1702 : 4 : iov[38].iov_base = (void *)0xFEED2600000;
1703 : 4 : iov[38].iov_len = 4096;
1704 : 4 : iov[39].iov_base = (void *)0xFEED2700000;
1705 : 4 : iov[39].iov_len = 4096;
1706 : 4 : iov[40].iov_base = (void *)0xFEED2800000;
1707 : 4 : iov[40].iov_len = 4096;
1708 : 4 : iov[41].iov_base = (void *)0xFEED2900000;
1709 : 4 : iov[41].iov_len = 4096;
1710 : 4 : iov[42].iov_base = (void *)0xFEED2A00000;
1711 : 4 : iov[42].iov_len = 4096;
1712 : 4 : iov[43].iov_base = (void *)0xFEED2B00000;
1713 : 4 : iov[43].iov_len = 12288;
1714 : 4 : iov[44].iov_base = (void *)0xFEED2C00000;
1715 : 4 : iov[44].iov_len = 8192;
1716 : 4 : iov[45].iov_base = (void *)0xFEED2F00000;
1717 : 4 : iov[45].iov_len = 4096;
1718 : 4 : iov[46].iov_base = (void *)0xFEED3000000;
1719 : 4 : iov[46].iov_len = 4096;
1720 : 4 : iov[47].iov_base = (void *)0xFEED3100000;
1721 : 4 : iov[47].iov_len = 4096;
1722 : 4 : iov[48].iov_base = (void *)0xFEED3200000;
1723 : 4 : iov[48].iov_len = 24576;
1724 : 4 : iov[49].iov_base = (void *)0xFEED3300000;
1725 : 4 : iov[49].iov_len = 16384;
1726 : 4 : iov[50].iov_base = (void *)0xFEED3400000;
1727 : 4 : iov[50].iov_len = 12288;
1728 : 4 : iov[51].iov_base = (void *)0xFEED3500000;
1729 : 4 : iov[51].iov_len = 4096;
1730 : 4 : iov[52].iov_base = (void *)0xFEED3600000;
1731 : 4 : iov[52].iov_len = 4096;
1732 : 4 : iov[53].iov_base = (void *)0xFEED3700000;
1733 : 4 : iov[53].iov_len = 4096;
1734 : 4 : iov[54].iov_base = (void *)0xFEED3800000;
1735 : 4 : iov[54].iov_len = 28672;
1736 : 4 : iov[55].iov_base = (void *)0xFEED3900000;
1737 : 4 : iov[55].iov_len = 20480;
1738 : 4 : iov[56].iov_base = (void *)0xFEED3A00000;
1739 : 4 : iov[56].iov_len = 4096;
1740 : 4 : iov[57].iov_base = (void *)0xFEED3B00000;
1741 : 4 : iov[57].iov_len = 12288;
1742 : 4 : iov[58].iov_base = (void *)0xFEED3C00000;
1743 : 4 : iov[58].iov_len = 4096;
1744 : 4 : iov[59].iov_base = (void *)0xFEED3D00000;
1745 : 4 : iov[59].iov_len = 4096;
1746 : 4 : iov[60].iov_base = (void *)0xFEED3E00000;
1747 : 4 : iov[60].iov_len = 352;
1748 : :
1749 : : /* The 1st child IO must be from iov[0] to iov[31] split by the capacity
1750 : : * of child iovs,
1751 : : */
1752 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32);
1753 : 4 : expected_io->md_buf = md_buf;
1754 [ + + ]: 132 : for (i = 0; i < 32; i++) {
1755 : 128 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
1756 : 32 : }
1757 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1758 : :
1759 : : /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33]
1760 : : * split by the IO boundary requirement.
1761 : : */
1762 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2);
1763 : 4 : expected_io->md_buf = md_buf + 126 * 8;
1764 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len);
1765 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864);
1766 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1767 : :
1768 : : /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to
1769 : : * the first 864 bytes of iov[46] split by the IO boundary requirement.
1770 : : */
1771 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14);
1772 : 4 : expected_io->md_buf = md_buf + 128 * 8;
1773 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864),
1774 : 4 : iov[33].iov_len - 864);
1775 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len);
1776 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len);
1777 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len);
1778 : 4 : ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len);
1779 : 4 : ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len);
1780 : 4 : ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len);
1781 : 4 : ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len);
1782 : 4 : ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len);
1783 : 4 : ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len);
1784 : 4 : ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len);
1785 : 4 : ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len);
1786 : 4 : ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len);
1787 : 4 : ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864);
1788 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1789 : :
1790 : : /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the
1791 : : * first 864 bytes of iov[52] split by the IO boundary requirement.
1792 : : */
1793 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7);
1794 : 4 : expected_io->md_buf = md_buf + 256 * 8;
1795 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864),
1796 : 4 : iov[46].iov_len - 864);
1797 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len);
1798 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len);
1799 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len);
1800 : 4 : ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len);
1801 : 4 : ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len);
1802 : 4 : ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864);
1803 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1804 : :
1805 : : /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to
1806 : : * the first 4096 bytes of iov[57] split by the IO boundary requirement.
1807 : : */
1808 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6);
1809 : 4 : expected_io->md_buf = md_buf + 384 * 8;
1810 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864),
1811 : 4 : iov[52].iov_len - 864);
1812 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len);
1813 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len);
1814 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len);
1815 : 4 : ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len);
1816 : 4 : ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960);
1817 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1818 : :
1819 : : /* The 6th child IO must be from the remaining 7328 bytes of iov[57]
1820 : : * to the first 3936 bytes of iov[58] split by the capacity of child iovs.
1821 : : */
1822 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3);
1823 : 4 : expected_io->md_buf = md_buf + 512 * 8;
1824 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960),
1825 : 4 : iov[57].iov_len - 4960);
1826 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len);
1827 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936);
1828 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1829 : :
1830 : : /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */
1831 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2);
1832 : 4 : expected_io->md_buf = md_buf + 542 * 8;
1833 : 5 : ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936),
1834 : 4 : iov[59].iov_len - 3936);
1835 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len);
1836 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1837 : :
1838 : 4 : rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf,
1839 : : 0, 543, io_done, NULL);
1840 : 4 : CU_ASSERT(rc == 0);
1841 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1842 : :
1843 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1844 : 4 : stub_complete_io(1);
1845 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1846 : :
1847 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1848 : 4 : stub_complete_io(5);
1849 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1850 : :
1851 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1852 : 4 : stub_complete_io(1);
1853 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1854 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1855 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1856 : :
1857 : : /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be
1858 : : * split, so test that.
1859 : : */
1860 : 4 : bdev->optimal_io_boundary = 15;
1861 : 4 : g_io_done = false;
1862 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1863 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1864 : :
1865 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1866 : 4 : CU_ASSERT(rc == 0);
1867 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1868 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1869 : 4 : stub_complete_io(1);
1870 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1871 : :
1872 : : /* Test an UNMAP. This should also not be split. */
1873 : 4 : bdev->optimal_io_boundary = 16;
1874 : 4 : g_io_done = false;
1875 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1876 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1877 : :
1878 : 4 : rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1879 : 4 : CU_ASSERT(rc == 0);
1880 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1881 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1882 : 4 : stub_complete_io(1);
1883 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1884 : :
1885 : : /* Test a FLUSH. This should also not be split. */
1886 : 4 : bdev->optimal_io_boundary = 16;
1887 : 4 : g_io_done = false;
1888 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1889 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1890 : :
1891 : 4 : rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1892 : 4 : CU_ASSERT(rc == 0);
1893 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1894 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1895 : 4 : stub_complete_io(1);
1896 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1897 : :
1898 : : /* Test a COPY. This should also not be split. */
1899 : 4 : bdev->optimal_io_boundary = 15;
1900 : 4 : g_io_done = false;
1901 : 4 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36);
1902 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1903 : :
1904 : 4 : rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL);
1905 : 4 : CU_ASSERT(rc == 0);
1906 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1907 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1908 : 4 : stub_complete_io(1);
1909 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1910 : :
1911 : 4 : CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1912 : :
1913 : : /* Children requests return an error status */
1914 : 4 : bdev->optimal_io_boundary = 16;
1915 : 4 : iov[0].iov_base = (void *)0x10000;
1916 : 4 : iov[0].iov_len = 512 * 64;
1917 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1918 : 4 : g_io_done = false;
1919 : 4 : g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1920 : :
1921 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL);
1922 : 4 : CU_ASSERT(rc == 0);
1923 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1924 : 4 : stub_complete_io(4);
1925 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1926 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1927 : 4 : stub_complete_io(1);
1928 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1929 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1930 : :
1931 : : /* Test if a multi vector command terminated with failure before continuing
1932 : : * splitting process when one of child I/O failed.
1933 : : * The multi vector command is as same as the above that needs to be split by strip
1934 : : * and then needs to be split further due to the capacity of child iovs.
1935 : : */
1936 [ + + ]: 128 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1937 : 124 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1938 : 124 : iov[i].iov_len = 512;
1939 : 31 : }
1940 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000);
1941 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1942 : :
1943 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1944 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
1945 : :
1946 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1947 : 4 : iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1948 : :
1949 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
1950 : :
1951 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1952 : 4 : g_io_done = false;
1953 : 4 : g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1954 : :
1955 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0,
1956 : : SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1957 : 4 : CU_ASSERT(rc == 0);
1958 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
1959 : :
1960 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1961 : 4 : stub_complete_io(1);
1962 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
1963 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1964 : :
1965 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1966 : :
1967 : : /* for this test we will create the following conditions to hit the code path where
1968 : : * we are trying to send and IO following a split that has no iovs because we had to
1969 : : * trim them for alignment reasons.
1970 : : *
1971 : : * - 16K boundary, our IO will start at offset 0 with a length of 0x4200
1972 : : * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV
1973 : : * position 30 and overshoot by 0x2e.
1974 : : * - That means we'll send the IO and loop back to pick up the remaining bytes at
1975 : : * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e
1976 : : * which eliniates that vector so we just send the first split IO with 30 vectors
1977 : : * and let the completion pick up the last 2 vectors.
1978 : : */
1979 : 4 : bdev->optimal_io_boundary = 32;
1980 : 4 : bdev->split_on_optimal_io_boundary = true;
1981 : 4 : g_io_done = false;
1982 : :
1983 : : /* Init all parent IOVs to 0x212 */
1984 [ + + ]: 140 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) {
1985 : 136 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
1986 : 136 : iov[i].iov_len = 0x212;
1987 : 34 : }
1988 : :
1989 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV,
1990 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1);
1991 : : /* expect 0-29 to be 1:1 with the parent iov */
1992 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1993 : 120 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
1994 : 30 : }
1995 : :
1996 : : /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment
1997 : : * where 0x1e is the amount we overshot the 16K boundary
1998 : : */
1999 : 5 : ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2,
2000 : 1 : (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4);
2001 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2002 : :
2003 : : /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was
2004 : : * shortened that take it to the next boundary and then a final one to get us to
2005 : : * 0x4200 bytes for the IO.
2006 : : */
2007 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV,
2008 : : 1, 2);
2009 : : /* position 30 picked up the remaining bytes to the next boundary */
2010 : 5 : ut_expected_io_set_iov(expected_io, 0,
2011 : 4 : (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e);
2012 : :
2013 : : /* position 31 picked the the rest of the transfer to get us to 0x4200 */
2014 : 5 : ut_expected_io_set_iov(expected_io, 1,
2015 : 1 : (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2);
2016 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2017 : :
2018 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0,
2019 : : SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
2020 : 4 : CU_ASSERT(rc == 0);
2021 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2022 : :
2023 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2024 : 4 : stub_complete_io(1);
2025 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2026 : :
2027 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2028 : 4 : stub_complete_io(1);
2029 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2030 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2031 : :
2032 : 4 : spdk_put_io_channel(io_ch);
2033 : 4 : spdk_bdev_close(desc);
2034 : 4 : free_bdev(bdev);
2035 : 4 : ut_fini_bdev();
2036 : 4 : }
2037 : :
2038 : : static void
2039 : 4 : bdev_io_max_size_and_segment_split_test(void)
2040 : : {
2041 : : struct spdk_bdev *bdev;
2042 : 4 : struct spdk_bdev_desc *desc = NULL;
2043 : : struct spdk_io_channel *io_ch;
2044 : 4 : struct spdk_bdev_opts bdev_opts = {};
2045 : 3 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
2046 : : struct ut_expected_io *expected_io;
2047 : : uint64_t i;
2048 : : int rc;
2049 : :
2050 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
2051 : 4 : bdev_opts.bdev_io_pool_size = 512;
2052 : 4 : bdev_opts.bdev_io_cache_size = 64;
2053 : 4 : bdev_opts.opts_size = sizeof(bdev_opts);
2054 : 4 : ut_init_bdev(&bdev_opts);
2055 : :
2056 : 4 : bdev = allocate_bdev("bdev0");
2057 : :
2058 : 4 : rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc);
2059 : 4 : CU_ASSERT(rc == 0);
2060 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
2061 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
2062 : 4 : CU_ASSERT(io_ch != NULL);
2063 : :
2064 : 4 : bdev->split_on_optimal_io_boundary = false;
2065 : 4 : bdev->optimal_io_boundary = 0;
2066 : :
2067 : : /* Case 0 max_num_segments == 0.
2068 : : * but segment size 2 * 512 > 512
2069 : : */
2070 : 4 : bdev->max_segment_size = 512;
2071 : 4 : bdev->max_num_segments = 0;
2072 : 4 : g_io_done = false;
2073 : :
2074 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2);
2075 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512);
2076 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512);
2077 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2078 : :
2079 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL);
2080 : 4 : CU_ASSERT(rc == 0);
2081 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2082 : :
2083 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2084 : 4 : stub_complete_io(1);
2085 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2086 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2087 : :
2088 : : /* Case 1 max_segment_size == 0
2089 : : * but iov num 2 > 1.
2090 : : */
2091 : 4 : bdev->max_segment_size = 0;
2092 : 4 : bdev->max_num_segments = 1;
2093 : 4 : g_io_done = false;
2094 : :
2095 : 4 : iov[0].iov_base = (void *)0x10000;
2096 : 4 : iov[0].iov_len = 512;
2097 : 4 : iov[1].iov_base = (void *)0x20000;
2098 : 4 : iov[1].iov_len = 8 * 512;
2099 : :
2100 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1);
2101 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len);
2102 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2103 : :
2104 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1);
2105 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len);
2106 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2107 : :
2108 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL);
2109 : 4 : CU_ASSERT(rc == 0);
2110 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2111 : :
2112 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2113 : 4 : stub_complete_io(2);
2114 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2115 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2116 : :
2117 : : /* Test that a non-vector command is split correctly.
2118 : : * Set up the expected values before calling spdk_bdev_read_blocks
2119 : : */
2120 : 4 : bdev->max_segment_size = 512;
2121 : 4 : bdev->max_num_segments = 1;
2122 : 4 : g_io_done = false;
2123 : :
2124 : : /* Child IO 0 */
2125 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1);
2126 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512);
2127 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2128 : :
2129 : : /* Child IO 1 */
2130 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1);
2131 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512);
2132 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2133 : :
2134 : : /* spdk_bdev_read_blocks will submit the first child immediately. */
2135 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL);
2136 : 4 : CU_ASSERT(rc == 0);
2137 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2138 : :
2139 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2140 : 4 : stub_complete_io(2);
2141 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2142 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2143 : :
2144 : : /* Now set up a more complex, multi-vector command that needs to be split,
2145 : : * including splitting iovecs.
2146 : : */
2147 : 4 : bdev->max_segment_size = 2 * 512;
2148 : 4 : bdev->max_num_segments = 1;
2149 : 4 : g_io_done = false;
2150 : :
2151 : 4 : iov[0].iov_base = (void *)0x10000;
2152 : 4 : iov[0].iov_len = 2 * 512;
2153 : 4 : iov[1].iov_base = (void *)0x20000;
2154 : 4 : iov[1].iov_len = 4 * 512;
2155 : 4 : iov[2].iov_base = (void *)0x30000;
2156 : 4 : iov[2].iov_len = 6 * 512;
2157 : :
2158 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
2159 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2);
2160 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2161 : :
2162 : : /* Split iov[1].size to 2 iov entries then split the segments */
2163 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1);
2164 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2);
2165 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2166 : :
2167 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1);
2168 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2);
2169 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2170 : :
2171 : : /* Split iov[2].size to 3 iov entries then split the segments */
2172 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1);
2173 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2);
2174 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2175 : :
2176 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1);
2177 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2);
2178 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2179 : :
2180 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1);
2181 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2);
2182 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2183 : :
2184 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL);
2185 : 4 : CU_ASSERT(rc == 0);
2186 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2187 : :
2188 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
2189 : 4 : stub_complete_io(6);
2190 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2191 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2192 : :
2193 : : /* Test multi vector command that needs to be split by strip and then needs to be
2194 : : * split further due to the capacity of parent IO child iovs.
2195 : : */
2196 : 4 : bdev->max_segment_size = 512;
2197 : 4 : bdev->max_num_segments = 1;
2198 : 4 : g_io_done = false;
2199 : :
2200 [ + + ]: 132 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
2201 : 128 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2202 : 128 : iov[i].iov_len = 512 * 2;
2203 : 32 : }
2204 : :
2205 : : /* Each input iov.size is split into 2 iovs,
2206 : : * half of the input iov can fill all child iov entries of a single IO.
2207 : : */
2208 [ + + ]: 68 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) {
2209 : 64 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1);
2210 : 64 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512);
2211 : 64 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2212 : :
2213 : 64 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1);
2214 : 64 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512);
2215 : 64 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2216 : 16 : }
2217 : :
2218 : : /* The remaining iov is split in the second round */
2219 [ + + ]: 68 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
2220 : 64 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1);
2221 : 64 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512);
2222 : 64 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2223 : :
2224 : 64 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1);
2225 : 64 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512);
2226 : 64 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2227 : 16 : }
2228 : :
2229 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0,
2230 : : SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
2231 : 4 : CU_ASSERT(rc == 0);
2232 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2233 : :
2234 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV);
2235 : 4 : stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV);
2236 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2237 : :
2238 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV);
2239 : 4 : stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV);
2240 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2241 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2242 : :
2243 : : /* A wrong case, a child IO that is divided does
2244 : : * not meet the principle of multiples of block size,
2245 : : * and exits with error
2246 : : */
2247 : 4 : bdev->max_segment_size = 512;
2248 : 4 : bdev->max_num_segments = 1;
2249 : 4 : g_io_done = false;
2250 : :
2251 : 4 : iov[0].iov_base = (void *)0x10000;
2252 : 4 : iov[0].iov_len = 512 + 256;
2253 : 4 : iov[1].iov_base = (void *)0x20000;
2254 : 4 : iov[1].iov_len = 256;
2255 : :
2256 : : /* iov[0] is split to 512 and 256.
2257 : : * 256 is less than a block size, and it is found
2258 : : * in the next round of split that it is the first child IO smaller than
2259 : : * the block size, so the error exit
2260 : : */
2261 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1);
2262 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512);
2263 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2264 : :
2265 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL);
2266 : 4 : CU_ASSERT(rc == 0);
2267 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2268 : :
2269 : : /* First child IO is OK */
2270 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2271 : 4 : stub_complete_io(1);
2272 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2273 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2274 : :
2275 : : /* error exit */
2276 : 4 : stub_complete_io(1);
2277 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2278 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
2279 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2280 : :
2281 : : /* Test multi vector command that needs to be split by strip and then needs to be
2282 : : * split further due to the capacity of child iovs.
2283 : : *
2284 : : * In this case, the last two iovs need to be split, but it will exceed the capacity
2285 : : * of child iovs, so it needs to wait until the first batch completed.
2286 : : */
2287 : 4 : bdev->max_segment_size = 512;
2288 : 4 : bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV;
2289 : 4 : g_io_done = false;
2290 : :
2291 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2292 : 120 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2293 : 120 : iov[i].iov_len = 512;
2294 : 30 : }
2295 [ + + ]: 12 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) {
2296 : 8 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2297 : 8 : iov[i].iov_len = 512 * 2;
2298 : 2 : }
2299 : :
2300 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
2301 : : SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV);
2302 : : /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */
2303 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2304 : 120 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
2305 : 30 : }
2306 : : /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */
2307 : 4 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512);
2308 : 4 : ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512);
2309 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2310 : :
2311 : : /* Child iov entries exceed the max num of parent IO so split it in next round */
2312 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2);
2313 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512);
2314 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512);
2315 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2316 : :
2317 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0,
2318 : : SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL);
2319 : 4 : CU_ASSERT(rc == 0);
2320 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2321 : :
2322 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2323 : 4 : stub_complete_io(1);
2324 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2325 : :
2326 : : /* Next round */
2327 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2328 : 4 : stub_complete_io(1);
2329 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2330 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2331 : :
2332 : : /* This case is similar to the previous one, but the io composed of
2333 : : * the last few entries of child iov is not enough for a blocklen, so they
2334 : : * cannot be put into this IO, but wait until the next time.
2335 : : */
2336 : 4 : bdev->max_segment_size = 512;
2337 : 4 : bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV;
2338 : 4 : g_io_done = false;
2339 : :
2340 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2341 : 120 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2342 : 120 : iov[i].iov_len = 512;
2343 : 30 : }
2344 : :
2345 [ + + ]: 20 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) {
2346 : 16 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2347 : 16 : iov[i].iov_len = 128;
2348 : 4 : }
2349 : :
2350 : : /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2.
2351 : : * Because the left 2 iov is not enough for a blocklen.
2352 : : */
2353 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
2354 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2);
2355 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2356 : 120 : ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
2357 : 30 : }
2358 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2359 : :
2360 : : /* The second child io waits until the end of the first child io before executing.
2361 : : * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO.
2362 : : * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2
2363 : : */
2364 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2,
2365 : : 1, 4);
2366 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len);
2367 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len);
2368 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len);
2369 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len);
2370 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2371 : :
2372 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0,
2373 : : SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL);
2374 : 4 : CU_ASSERT(rc == 0);
2375 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2376 : :
2377 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2378 : 4 : stub_complete_io(1);
2379 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2380 : :
2381 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2382 : 4 : stub_complete_io(1);
2383 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2384 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2385 : :
2386 : : /* A very complicated case. Each sg entry exceeds max_segment_size and
2387 : : * needs to be split. At the same time, child io must be a multiple of blocklen.
2388 : : * At the same time, child iovcnt exceeds parent iovcnt.
2389 : : */
2390 : 4 : bdev->max_segment_size = 512 + 128;
2391 : 4 : bdev->max_num_segments = 3;
2392 : 4 : g_io_done = false;
2393 : :
2394 [ + + ]: 124 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) {
2395 : 120 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2396 : 120 : iov[i].iov_len = 512 + 256;
2397 : 30 : }
2398 : :
2399 [ + + ]: 20 : for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) {
2400 : 16 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2401 : 16 : iov[i].iov_len = 512 + 128;
2402 : 4 : }
2403 : :
2404 : : /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries.
2405 : : * Consume 4 parent IO iov entries per for() round and 6 block size.
2406 : : * Generate 9 child IOs.
2407 : : */
2408 [ + + ]: 16 : for (i = 0; i < 3; i++) {
2409 : 12 : uint32_t j = i * 4;
2410 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3);
2411 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640);
2412 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128);
2413 : 12 : ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256);
2414 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2415 : :
2416 : : /* Child io must be a multiple of blocklen
2417 : : * iov[j + 2] must be split. If the third entry is also added,
2418 : : * the multiple of blocklen cannot be guaranteed. But it still
2419 : : * occupies one iov entry of the parent child iov.
2420 : : */
2421 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2);
2422 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512);
2423 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512);
2424 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2425 : :
2426 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3);
2427 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256);
2428 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640);
2429 : 12 : ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128);
2430 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2431 : 3 : }
2432 : :
2433 : : /* Child iov position at 27, the 10th child IO
2434 : : * iov entry index is 3 * 4 and offset is 3 * 6
2435 : : */
2436 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3);
2437 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640);
2438 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128);
2439 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256);
2440 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2441 : :
2442 : : /* Child iov position at 30, the 11th child IO */
2443 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2);
2444 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512);
2445 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512);
2446 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2447 : :
2448 : : /* The 2nd split round and iovpos is 0, the 12th child IO */
2449 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3);
2450 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256);
2451 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640);
2452 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128);
2453 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2454 : :
2455 : : /* Consume 9 child IOs and 27 child iov entries.
2456 : : * Consume 4 parent IO iov entries per for() round and 6 block size.
2457 : : * Parent IO iov index start from 16 and block offset start from 24
2458 : : */
2459 [ + + ]: 16 : for (i = 0; i < 3; i++) {
2460 : 12 : uint32_t j = i * 4 + 16;
2461 : 12 : uint32_t offset = i * 6 + 24;
2462 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3);
2463 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640);
2464 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128);
2465 : 12 : ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256);
2466 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2467 : :
2468 : : /* Child io must be a multiple of blocklen
2469 : : * iov[j + 2] must be split. If the third entry is also added,
2470 : : * the multiple of blocklen cannot be guaranteed. But it still
2471 : : * occupies one iov entry of the parent child iov.
2472 : : */
2473 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2);
2474 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512);
2475 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512);
2476 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2477 : :
2478 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3);
2479 : 12 : ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256);
2480 : 12 : ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640);
2481 : 12 : ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128);
2482 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2483 : 3 : }
2484 : :
2485 : : /* The 22th child IO, child iov position at 30 */
2486 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1);
2487 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512);
2488 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2489 : :
2490 : : /* The third round */
2491 : : /* Here is the 23nd child IO and child iovpos is 0 */
2492 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3);
2493 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256);
2494 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640);
2495 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128);
2496 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2497 : :
2498 : : /* The 24th child IO */
2499 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3);
2500 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640);
2501 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640);
2502 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256);
2503 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2504 : :
2505 : : /* The 25th child IO */
2506 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2);
2507 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384);
2508 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640);
2509 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2510 : :
2511 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0,
2512 : : 50, io_done, NULL);
2513 : 4 : CU_ASSERT(rc == 0);
2514 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2515 : :
2516 : : /* Parent IO supports up to 32 child iovs, so it is calculated that
2517 : : * a maximum of 11 IOs can be split at a time, and the
2518 : : * splitting will continue after the first batch is over.
2519 : : */
2520 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11);
2521 : 4 : stub_complete_io(11);
2522 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2523 : :
2524 : : /* The 2nd round */
2525 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11);
2526 : 4 : stub_complete_io(11);
2527 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2528 : :
2529 : : /* The last round */
2530 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2531 : 4 : stub_complete_io(3);
2532 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2533 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2534 : :
2535 : : /* Test an WRITE_ZEROES. This should also not be split. */
2536 : 4 : bdev->max_segment_size = 512;
2537 : 4 : bdev->max_num_segments = 1;
2538 : 4 : g_io_done = false;
2539 : :
2540 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
2541 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2542 : :
2543 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
2544 : 4 : CU_ASSERT(rc == 0);
2545 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2546 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2547 : 4 : stub_complete_io(1);
2548 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2549 : :
2550 : : /* Test an UNMAP. This should also not be split. */
2551 : 4 : g_io_done = false;
2552 : :
2553 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0);
2554 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2555 : :
2556 : 4 : rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL);
2557 : 4 : CU_ASSERT(rc == 0);
2558 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2559 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2560 : 4 : stub_complete_io(1);
2561 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2562 : :
2563 : : /* Test a FLUSH. This should also not be split. */
2564 : 4 : g_io_done = false;
2565 : :
2566 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0);
2567 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2568 : :
2569 : 4 : rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 4, io_done, NULL);
2570 : 4 : CU_ASSERT(rc == 0);
2571 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2572 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2573 : 4 : stub_complete_io(1);
2574 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2575 : :
2576 : : /* Test a COPY. This should also not be split. */
2577 : 4 : g_io_done = false;
2578 : :
2579 : 4 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36);
2580 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2581 : :
2582 : 4 : rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL);
2583 : 4 : CU_ASSERT(rc == 0);
2584 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2585 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2586 : 4 : stub_complete_io(1);
2587 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2588 : :
2589 : : /* Test that IOs are split on max_rw_size */
2590 : 4 : bdev->max_rw_size = 2;
2591 : 4 : bdev->max_segment_size = 0;
2592 : 4 : bdev->max_num_segments = 0;
2593 : 4 : g_io_done = false;
2594 : :
2595 : : /* 5 blocks in a contiguous buffer */
2596 : 4 : iov[0].iov_base = (void *)0x10000;
2597 : 4 : iov[0].iov_len = 5 * 512;
2598 : :
2599 : : /* First: offset=0, num_blocks=2 */
2600 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1);
2601 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512);
2602 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2603 : : /* Second: offset=2, num_blocks=2 */
2604 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 2, 1);
2605 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 2 * 512);
2606 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2607 : : /* Third: offset=4, num_blocks=1 */
2608 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1);
2609 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 4 * 512, 512);
2610 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2611 : :
2612 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 5, io_done, NULL);
2613 : 4 : CU_ASSERT(rc == 0);
2614 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2615 : :
2616 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2617 : 4 : stub_complete_io(3);
2618 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2619 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2620 : :
2621 : : /* Check splitting on both max_rw_size + max_num_segments */
2622 : 4 : bdev->max_rw_size = 2;
2623 : 4 : bdev->max_num_segments = 2;
2624 : 4 : bdev->max_segment_size = 0;
2625 : 4 : g_io_done = false;
2626 : :
2627 : : /* 5 blocks split across 4 iovs */
2628 : 4 : iov[0].iov_base = (void *)0x10000;
2629 : 4 : iov[0].iov_len = 3 * 512;
2630 : 4 : iov[1].iov_base = (void *)0x20000;
2631 : 4 : iov[1].iov_len = 256;
2632 : 4 : iov[2].iov_base = (void *)0x30000;
2633 : 4 : iov[2].iov_len = 256;
2634 : 4 : iov[3].iov_base = (void *)0x40000;
2635 : 4 : iov[3].iov_len = 512;
2636 : :
2637 : : /* First: offset=0, num_blocks=2, iovcnt=1 */
2638 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1);
2639 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512);
2640 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2641 : : /* Second: offset=2, num_blocks=1, iovcnt=1 (max_segment_size prevents from submitting
2642 : : * the rest of iov[0], and iov[1]+iov[2])
2643 : : */
2644 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 1, 1);
2645 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 512);
2646 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2647 : : /* Third: offset=3, num_blocks=1, iovcnt=2 (iov[1]+iov[2]) */
2648 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 3, 1, 2);
2649 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x20000, 256);
2650 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 256);
2651 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2652 : : /* Fourth: offset=4, num_blocks=1, iovcnt=1 (iov[3]) */
2653 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1);
2654 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x40000, 512);
2655 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2656 : :
2657 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 4, 0, 5, io_done, NULL);
2658 : 4 : CU_ASSERT(rc == 0);
2659 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2660 : :
2661 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
2662 : 4 : stub_complete_io(4);
2663 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2664 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2665 : :
2666 : : /* Check splitting on both max_rw_size + max_segment_size */
2667 : 4 : bdev->max_rw_size = 2;
2668 : 4 : bdev->max_segment_size = 512;
2669 : 4 : bdev->max_num_segments = 0;
2670 : 4 : g_io_done = false;
2671 : :
2672 : : /* 6 blocks in a contiguous buffer */
2673 : 4 : iov[0].iov_base = (void *)0x10000;
2674 : 4 : iov[0].iov_len = 6 * 512;
2675 : :
2676 : : /* We expect 3 IOs each with 2 blocks and 2 iovs */
2677 [ + + ]: 16 : for (i = 0; i < 3; ++i) {
2678 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 2, 2);
2679 : 12 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 2 * 512, 512);
2680 : 12 : ut_expected_io_set_iov(expected_io, 1, (void *)0x10000 + i * 2 * 512 + 512, 512);
2681 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2682 : 3 : }
2683 : :
2684 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 6, io_done, NULL);
2685 : 4 : CU_ASSERT(rc == 0);
2686 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2687 : :
2688 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2689 : 4 : stub_complete_io(3);
2690 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2691 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2692 : :
2693 : : /* Check splitting on max_rw_size limited by SPDK_BDEV_IO_NUM_CHILD_IOV */
2694 : 4 : bdev->max_rw_size = 1;
2695 : 4 : bdev->max_segment_size = 0;
2696 : 4 : bdev->max_num_segments = 0;
2697 : 4 : g_io_done = false;
2698 : :
2699 : : /* SPDK_BDEV_IO_NUM_CHILD_IOV + 1 blocks */
2700 : 4 : iov[0].iov_base = (void *)0x10000;
2701 : 4 : iov[0].iov_len = (SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 512;
2702 : :
2703 : : /* We expect SPDK_BDEV_IO_NUM_CHILD_IOV + 1 IOs each with a single iov */
2704 [ + + ]: 16 : for (i = 0; i < 3; ++i) {
2705 : 12 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i, 1, 1);
2706 : 12 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 512, 512);
2707 : 12 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2708 : 3 : }
2709 : :
2710 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
2711 : 4 : CU_ASSERT(rc == 0);
2712 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2713 : :
2714 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV);
2715 : 4 : stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV);
2716 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2717 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2718 : 4 : stub_complete_io(1);
2719 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2720 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2721 : :
2722 : 4 : spdk_put_io_channel(io_ch);
2723 : 4 : spdk_bdev_close(desc);
2724 : 4 : free_bdev(bdev);
2725 : 4 : ut_fini_bdev();
2726 : 4 : }
2727 : :
2728 : : static void
2729 : 4 : bdev_io_mix_split_test(void)
2730 : : {
2731 : : struct spdk_bdev *bdev;
2732 : 4 : struct spdk_bdev_desc *desc = NULL;
2733 : : struct spdk_io_channel *io_ch;
2734 : 4 : struct spdk_bdev_opts bdev_opts = {};
2735 : 3 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
2736 : : struct ut_expected_io *expected_io;
2737 : : uint64_t i;
2738 : : int rc;
2739 : :
2740 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
2741 : 4 : bdev_opts.bdev_io_pool_size = 512;
2742 : 4 : bdev_opts.bdev_io_cache_size = 64;
2743 : 4 : ut_init_bdev(&bdev_opts);
2744 : :
2745 : 4 : bdev = allocate_bdev("bdev0");
2746 : :
2747 : 4 : rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc);
2748 : 4 : CU_ASSERT(rc == 0);
2749 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
2750 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
2751 : 4 : CU_ASSERT(io_ch != NULL);
2752 : :
2753 : : /* First case optimal_io_boundary == max_segment_size * max_num_segments */
2754 : 4 : bdev->split_on_optimal_io_boundary = true;
2755 : 4 : bdev->optimal_io_boundary = 16;
2756 : :
2757 : 4 : bdev->max_segment_size = 512;
2758 : 4 : bdev->max_num_segments = 16;
2759 : 4 : g_io_done = false;
2760 : :
2761 : : /* IO crossing the IO boundary requires split
2762 : : * Total 2 child IOs.
2763 : : */
2764 : :
2765 : : /* The 1st child IO split the segment_size to multiple segment entry */
2766 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2);
2767 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512);
2768 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512);
2769 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2770 : :
2771 : : /* The 2nd child IO split the segment_size to multiple segment entry */
2772 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2);
2773 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512);
2774 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512);
2775 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2776 : :
2777 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL);
2778 : 4 : CU_ASSERT(rc == 0);
2779 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2780 : :
2781 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2782 : 4 : stub_complete_io(2);
2783 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2784 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2785 : :
2786 : : /* Second case optimal_io_boundary > max_segment_size * max_num_segments */
2787 : 4 : bdev->max_segment_size = 15 * 512;
2788 : 4 : bdev->max_num_segments = 1;
2789 : 4 : g_io_done = false;
2790 : :
2791 : : /* IO crossing the IO boundary requires split.
2792 : : * The 1st child IO segment size exceeds the max_segment_size,
2793 : : * So 1st child IO will be split to multiple segment entry.
2794 : : * Then it split to 2 child IOs because of the max_num_segments.
2795 : : * Total 3 child IOs.
2796 : : */
2797 : :
2798 : : /* The first 2 IOs are in an IO boundary.
2799 : : * Because the optimal_io_boundary > max_segment_size * max_num_segments
2800 : : * So it split to the first 2 IOs.
2801 : : */
2802 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1);
2803 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15);
2804 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2805 : :
2806 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1);
2807 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512);
2808 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2809 : :
2810 : : /* The 3rd Child IO is because of the io boundary */
2811 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1);
2812 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2);
2813 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2814 : :
2815 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL);
2816 : 4 : CU_ASSERT(rc == 0);
2817 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2818 : :
2819 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2820 : 4 : stub_complete_io(3);
2821 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2822 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2823 : :
2824 : : /* Third case optimal_io_boundary < max_segment_size * max_num_segments */
2825 : 4 : bdev->max_segment_size = 17 * 512;
2826 : 4 : bdev->max_num_segments = 1;
2827 : 4 : g_io_done = false;
2828 : :
2829 : : /* IO crossing the IO boundary requires split.
2830 : : * Child IO does not split.
2831 : : * Total 2 child IOs.
2832 : : */
2833 : :
2834 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1);
2835 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16);
2836 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2837 : :
2838 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1);
2839 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2);
2840 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2841 : :
2842 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL);
2843 : 4 : CU_ASSERT(rc == 0);
2844 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2845 : :
2846 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2847 : 4 : stub_complete_io(2);
2848 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2849 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2850 : :
2851 : : /* Now set up a more complex, multi-vector command that needs to be split,
2852 : : * including splitting iovecs.
2853 : : * optimal_io_boundary < max_segment_size * max_num_segments
2854 : : */
2855 : 4 : bdev->max_segment_size = 3 * 512;
2856 : 4 : bdev->max_num_segments = 6;
2857 : 4 : g_io_done = false;
2858 : :
2859 : 4 : iov[0].iov_base = (void *)0x10000;
2860 : 4 : iov[0].iov_len = 4 * 512;
2861 : 4 : iov[1].iov_base = (void *)0x20000;
2862 : 4 : iov[1].iov_len = 4 * 512;
2863 : 4 : iov[2].iov_base = (void *)0x30000;
2864 : 4 : iov[2].iov_len = 10 * 512;
2865 : :
2866 : : /* IO crossing the IO boundary requires split.
2867 : : * The 1st child IO segment size exceeds the max_segment_size and after
2868 : : * splitting segment_size, the num_segments exceeds max_num_segments.
2869 : : * So 1st child IO will be split to 2 child IOs.
2870 : : * Total 3 child IOs.
2871 : : */
2872 : :
2873 : : /* The first 2 IOs are in an IO boundary.
2874 : : * After splitting segment size the segment num exceeds.
2875 : : * So it splits to 2 child IOs.
2876 : : */
2877 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6);
2878 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3);
2879 : 4 : ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512);
2880 : 4 : ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3);
2881 : 4 : ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512);
2882 : 4 : ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3);
2883 : 4 : ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3);
2884 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2885 : :
2886 : : /* The 2nd child IO has the left segment entry */
2887 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
2888 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2);
2889 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2890 : :
2891 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1);
2892 : 4 : ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2);
2893 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2894 : :
2895 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL);
2896 : 4 : CU_ASSERT(rc == 0);
2897 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2898 : :
2899 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2900 : 4 : stub_complete_io(3);
2901 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2902 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2903 : :
2904 : : /* A very complicated case. Each sg entry exceeds max_segment_size
2905 : : * and split on io boundary.
2906 : : * optimal_io_boundary < max_segment_size * max_num_segments
2907 : : */
2908 : 4 : bdev->max_segment_size = 3 * 512;
2909 : 4 : bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV;
2910 : 4 : g_io_done = false;
2911 : :
2912 [ + + ]: 84 : for (i = 0; i < 20; i++) {
2913 : 80 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
2914 : 80 : iov[i].iov_len = 512 * 4;
2915 : 20 : }
2916 : :
2917 : : /* IO crossing the IO boundary requires split.
2918 : : * 80 block length can split 5 child IOs base on offset and IO boundary.
2919 : : * Each iov entry needs to be split to 2 entries because of max_segment_size
2920 : : * Total 5 child IOs.
2921 : : */
2922 : :
2923 : : /* 4 iov entries are in an IO boundary and each iov entry splits to 2.
2924 : : * So each child IO occupies 8 child iov entries.
2925 : : */
2926 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8);
2927 [ + + ]: 20 : for (i = 0; i < 4; i++) {
2928 : 16 : int iovcnt = i * 2;
2929 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2930 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2931 : 4 : }
2932 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2933 : :
2934 : : /* 2nd child IO and total 16 child iov entries of parent IO */
2935 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8);
2936 [ + + ]: 20 : for (i = 4; i < 8; i++) {
2937 : 16 : int iovcnt = (i - 4) * 2;
2938 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2939 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2940 : 4 : }
2941 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2942 : :
2943 : : /* 3rd child IO and total 24 child iov entries of parent IO */
2944 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8);
2945 [ + + ]: 20 : for (i = 8; i < 12; i++) {
2946 : 16 : int iovcnt = (i - 8) * 2;
2947 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2948 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2949 : 4 : }
2950 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2951 : :
2952 : : /* 4th child IO and total 32 child iov entries of parent IO */
2953 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8);
2954 [ + + ]: 20 : for (i = 12; i < 16; i++) {
2955 : 16 : int iovcnt = (i - 12) * 2;
2956 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2957 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2958 : 4 : }
2959 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2960 : :
2961 : : /* 5th child IO and because of the child iov entry it should be split
2962 : : * in next round.
2963 : : */
2964 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8);
2965 [ + + ]: 20 : for (i = 16; i < 20; i++) {
2966 : 16 : int iovcnt = (i - 16) * 2;
2967 : 16 : ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3);
2968 : 16 : ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512);
2969 : 4 : }
2970 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2971 : :
2972 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL);
2973 : 4 : CU_ASSERT(rc == 0);
2974 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2975 : :
2976 : : /* First split round */
2977 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
2978 : 4 : stub_complete_io(4);
2979 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
2980 : :
2981 : : /* Second split round */
2982 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
2983 : 4 : stub_complete_io(1);
2984 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
2985 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
2986 : :
2987 : 4 : spdk_put_io_channel(io_ch);
2988 : 4 : spdk_bdev_close(desc);
2989 : 4 : free_bdev(bdev);
2990 : 4 : ut_fini_bdev();
2991 : 4 : }
2992 : :
2993 : : static void
2994 : 4 : bdev_io_split_with_io_wait(void)
2995 : : {
2996 : : struct spdk_bdev *bdev;
2997 : 4 : struct spdk_bdev_desc *desc = NULL;
2998 : : struct spdk_io_channel *io_ch;
2999 : : struct spdk_bdev_channel *channel;
3000 : : struct spdk_bdev_mgmt_channel *mgmt_ch;
3001 : 4 : struct spdk_bdev_opts bdev_opts = {};
3002 : 3 : struct iovec iov[3];
3003 : : struct ut_expected_io *expected_io;
3004 : : int rc;
3005 : :
3006 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3007 : 4 : bdev_opts.bdev_io_pool_size = 2;
3008 : 4 : bdev_opts.bdev_io_cache_size = 1;
3009 : 4 : ut_init_bdev(&bdev_opts);
3010 : :
3011 : 4 : bdev = allocate_bdev("bdev0");
3012 : :
3013 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
3014 : 4 : CU_ASSERT(rc == 0);
3015 : 4 : CU_ASSERT(desc != NULL);
3016 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3017 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
3018 : 4 : CU_ASSERT(io_ch != NULL);
3019 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
3020 : 4 : mgmt_ch = channel->shared_resource->mgmt_ch;
3021 : :
3022 : 4 : bdev->optimal_io_boundary = 16;
3023 : 4 : bdev->split_on_optimal_io_boundary = true;
3024 : :
3025 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
3026 : 4 : CU_ASSERT(rc == 0);
3027 : :
3028 : : /* Now test that a single-vector command is split correctly.
3029 : : * Offset 14, length 8, payload 0xF000
3030 : : * Child - Offset 14, length 2, payload 0xF000
3031 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
3032 : : *
3033 : : * Set up the expected values before calling spdk_bdev_read_blocks
3034 : : */
3035 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
3036 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
3037 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3038 : :
3039 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
3040 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
3041 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3042 : :
3043 : : /* The following children will be submitted sequentially due to the capacity of
3044 : : * spdk_bdev_io.
3045 : : */
3046 : :
3047 : : /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
3048 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
3049 : 4 : CU_ASSERT(rc == 0);
3050 : 4 : CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
3051 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3052 : :
3053 : : /* Completing the first read I/O will submit the first child */
3054 : 4 : stub_complete_io(1);
3055 : 4 : CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
3056 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3057 : :
3058 : : /* Completing the first child will submit the second child */
3059 : 4 : stub_complete_io(1);
3060 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3061 : :
3062 : : /* Complete the second child I/O. This should result in our callback getting
3063 : : * invoked since the parent I/O is now complete.
3064 : : */
3065 : 4 : stub_complete_io(1);
3066 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3067 : :
3068 : : /* Now set up a more complex, multi-vector command that needs to be split,
3069 : : * including splitting iovecs.
3070 : : */
3071 : 4 : iov[0].iov_base = (void *)0x10000;
3072 : 4 : iov[0].iov_len = 512;
3073 : 4 : iov[1].iov_base = (void *)0x20000;
3074 : 4 : iov[1].iov_len = 20 * 512;
3075 : 4 : iov[2].iov_base = (void *)0x30000;
3076 : 4 : iov[2].iov_len = 11 * 512;
3077 : :
3078 : 4 : g_io_done = false;
3079 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
3080 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
3081 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
3082 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3083 : :
3084 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
3085 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
3086 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3087 : :
3088 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
3089 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
3090 : 4 : ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
3091 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3092 : :
3093 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
3094 : 4 : CU_ASSERT(rc == 0);
3095 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3096 : :
3097 : : /* The following children will be submitted sequentially due to the capacity of
3098 : : * spdk_bdev_io.
3099 : : */
3100 : :
3101 : : /* Completing the first child will submit the second child */
3102 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3103 : 4 : stub_complete_io(1);
3104 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3105 : :
3106 : : /* Completing the second child will submit the third child */
3107 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3108 : 4 : stub_complete_io(1);
3109 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3110 : :
3111 : : /* Completing the third child will result in our callback getting invoked
3112 : : * since the parent I/O is now complete.
3113 : : */
3114 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3115 : 4 : stub_complete_io(1);
3116 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3117 : :
3118 : 4 : CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
3119 : :
3120 : 4 : spdk_put_io_channel(io_ch);
3121 : 4 : spdk_bdev_close(desc);
3122 : 4 : free_bdev(bdev);
3123 : 4 : ut_fini_bdev();
3124 : 4 : }
3125 : :
3126 : : static void
3127 : 4 : bdev_io_write_unit_split_test(void)
3128 : : {
3129 : : struct spdk_bdev *bdev;
3130 : 4 : struct spdk_bdev_desc *desc = NULL;
3131 : : struct spdk_io_channel *io_ch;
3132 : 4 : struct spdk_bdev_opts bdev_opts = {};
3133 : 3 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4];
3134 : : struct ut_expected_io *expected_io;
3135 : : uint64_t i;
3136 : : int rc;
3137 : :
3138 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3139 : 4 : bdev_opts.bdev_io_pool_size = 512;
3140 : 4 : bdev_opts.bdev_io_cache_size = 64;
3141 : 4 : ut_init_bdev(&bdev_opts);
3142 : :
3143 : 4 : bdev = allocate_bdev("bdev0");
3144 : :
3145 : 4 : rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc);
3146 : 4 : CU_ASSERT(rc == 0);
3147 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
3148 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
3149 : 4 : CU_ASSERT(io_ch != NULL);
3150 : :
3151 : : /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */
3152 : 4 : bdev->write_unit_size = 32;
3153 : 4 : bdev->split_on_write_unit = true;
3154 : 4 : g_io_done = false;
3155 : :
3156 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1);
3157 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512);
3158 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3159 : :
3160 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1);
3161 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512);
3162 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3163 : :
3164 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL);
3165 : 4 : CU_ASSERT(rc == 0);
3166 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3167 : :
3168 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3169 : 4 : stub_complete_io(2);
3170 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3171 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3172 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3173 : :
3174 : : /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split
3175 : : * based on write_unit_size, not optimal_io_boundary */
3176 : 4 : bdev->split_on_optimal_io_boundary = true;
3177 : 4 : bdev->optimal_io_boundary = 16;
3178 : 4 : g_io_done = false;
3179 : :
3180 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL);
3181 : 4 : CU_ASSERT(rc == 0);
3182 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3183 : :
3184 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3185 : 4 : stub_complete_io(2);
3186 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3187 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3188 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3189 : :
3190 : : /* Write I/O should fail if it is smaller than write_unit_size */
3191 : 4 : g_io_done = false;
3192 : :
3193 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL);
3194 : 4 : CU_ASSERT(rc == 0);
3195 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3196 : :
3197 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3198 : 4 : poll_threads();
3199 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3200 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3201 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3202 : :
3203 : : /* Same for I/O not aligned to write_unit_size */
3204 : 4 : g_io_done = false;
3205 : :
3206 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL);
3207 : 4 : CU_ASSERT(rc == 0);
3208 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3209 : :
3210 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3211 : 4 : poll_threads();
3212 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3213 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3214 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3215 : :
3216 : : /* Write should fail if it needs to be split but there are not enough iovs to submit
3217 : : * an entire write unit */
3218 : 4 : bdev->write_unit_size = SPDK_COUNTOF(iov) / 2;
3219 : 4 : g_io_done = false;
3220 : :
3221 [ + + ]: 516 : for (i = 0; i < SPDK_COUNTOF(iov); i++) {
3222 : 512 : iov[i].iov_base = (void *)(0x1000 + 512 * i);
3223 : 512 : iov[i].iov_len = 512;
3224 : 128 : }
3225 : :
3226 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov),
3227 : : io_done, NULL);
3228 : 4 : CU_ASSERT(rc == 0);
3229 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
3230 : :
3231 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3232 : 4 : poll_threads();
3233 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
3234 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3235 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3236 : :
3237 : 4 : spdk_put_io_channel(io_ch);
3238 : 4 : spdk_bdev_close(desc);
3239 : 4 : free_bdev(bdev);
3240 : 4 : ut_fini_bdev();
3241 : 4 : }
3242 : :
3243 : : static void
3244 : 4 : bdev_io_alignment(void)
3245 : : {
3246 : : struct spdk_bdev *bdev;
3247 : 4 : struct spdk_bdev_desc *desc = NULL;
3248 : : struct spdk_io_channel *io_ch;
3249 : 4 : struct spdk_bdev_opts bdev_opts = {};
3250 : : int rc;
3251 : 4 : void *buf = NULL;
3252 : 3 : struct iovec iovs[2];
3253 : : int iovcnt;
3254 : : uint64_t alignment;
3255 : :
3256 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3257 : 4 : bdev_opts.bdev_io_pool_size = 20;
3258 : 4 : bdev_opts.bdev_io_cache_size = 2;
3259 : 4 : ut_init_bdev(&bdev_opts);
3260 : :
3261 : 4 : fn_table.submit_request = stub_submit_request_get_buf;
3262 : 4 : bdev = allocate_bdev("bdev0");
3263 : :
3264 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
3265 : 4 : CU_ASSERT(rc == 0);
3266 : 4 : CU_ASSERT(desc != NULL);
3267 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3268 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
3269 : 4 : CU_ASSERT(io_ch != NULL);
3270 : :
3271 : : /* Create aligned buffer */
3272 [ - + ]: 4 : rc = posix_memalign(&buf, 4096, 8192);
3273 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(rc == 0);
3274 : :
3275 : : /* Pass aligned single buffer with no alignment required */
3276 : 4 : alignment = 1;
3277 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3278 : :
3279 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
3280 : 4 : CU_ASSERT(rc == 0);
3281 : 4 : stub_complete_io(1);
3282 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3283 : : alignment));
3284 : :
3285 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
3286 : 4 : CU_ASSERT(rc == 0);
3287 : 4 : stub_complete_io(1);
3288 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3289 : : alignment));
3290 : :
3291 : : /* Pass unaligned single buffer with no alignment required */
3292 : 4 : alignment = 1;
3293 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3294 : :
3295 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3296 : 4 : CU_ASSERT(rc == 0);
3297 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3298 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
3299 : 4 : stub_complete_io(1);
3300 : :
3301 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3302 : 4 : CU_ASSERT(rc == 0);
3303 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3304 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
3305 : 4 : stub_complete_io(1);
3306 : :
3307 : : /* Pass unaligned single buffer with 512 alignment required */
3308 : 4 : alignment = 512;
3309 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3310 : :
3311 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3312 : 4 : CU_ASSERT(rc == 0);
3313 : 4 : CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1);
3314 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
3315 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3316 : : alignment));
3317 : 4 : stub_complete_io(1);
3318 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3319 : :
3320 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
3321 : 4 : CU_ASSERT(rc == 0);
3322 : 4 : CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1);
3323 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
3324 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3325 : : alignment));
3326 : 4 : stub_complete_io(1);
3327 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3328 : :
3329 : : /* Pass unaligned single buffer with 4096 alignment required */
3330 : 4 : alignment = 4096;
3331 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3332 : :
3333 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
3334 : 4 : CU_ASSERT(rc == 0);
3335 : 4 : CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1);
3336 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
3337 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3338 : : alignment));
3339 : 4 : stub_complete_io(1);
3340 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3341 : :
3342 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
3343 : 4 : CU_ASSERT(rc == 0);
3344 : 4 : CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1);
3345 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
3346 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3347 : : alignment));
3348 : 4 : stub_complete_io(1);
3349 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3350 : :
3351 : : /* Pass aligned iovs with no alignment required */
3352 : 4 : alignment = 1;
3353 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3354 : :
3355 : 4 : iovcnt = 1;
3356 : 4 : iovs[0].iov_base = buf;
3357 : 4 : iovs[0].iov_len = 512;
3358 : :
3359 : 4 : rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3360 : 4 : CU_ASSERT(rc == 0);
3361 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3362 : 4 : stub_complete_io(1);
3363 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3364 : :
3365 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3366 : 4 : CU_ASSERT(rc == 0);
3367 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3368 : 4 : stub_complete_io(1);
3369 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3370 : :
3371 : : /* Pass unaligned iovs with no alignment required */
3372 : 4 : alignment = 1;
3373 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3374 : :
3375 : 4 : iovcnt = 2;
3376 : 4 : iovs[0].iov_base = buf + 16;
3377 : 4 : iovs[0].iov_len = 256;
3378 : 4 : iovs[1].iov_base = buf + 16 + 256 + 32;
3379 : 4 : iovs[1].iov_len = 256;
3380 : :
3381 : 4 : rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3382 : 4 : CU_ASSERT(rc == 0);
3383 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3384 : 4 : stub_complete_io(1);
3385 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3386 : :
3387 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3388 : 4 : CU_ASSERT(rc == 0);
3389 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3390 : 4 : stub_complete_io(1);
3391 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
3392 : :
3393 : : /* Pass unaligned iov with 2048 alignment required */
3394 : 4 : alignment = 2048;
3395 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3396 : :
3397 : 4 : iovcnt = 2;
3398 : 4 : iovs[0].iov_base = buf + 16;
3399 : 4 : iovs[0].iov_len = 256;
3400 : 4 : iovs[1].iov_base = buf + 16 + 256 + 32;
3401 : 4 : iovs[1].iov_len = 256;
3402 : :
3403 : 4 : rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3404 : 4 : CU_ASSERT(rc == 0);
3405 : 4 : CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == iovcnt);
3406 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
3407 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3408 : : alignment));
3409 : 4 : stub_complete_io(1);
3410 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3411 : :
3412 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3413 : 4 : CU_ASSERT(rc == 0);
3414 : 4 : CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == iovcnt);
3415 : 4 : CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
3416 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3417 : : alignment));
3418 : 4 : stub_complete_io(1);
3419 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3420 : :
3421 : : /* Pass iov without allocated buffer without alignment required */
3422 : 4 : alignment = 1;
3423 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3424 : :
3425 : 4 : iovcnt = 1;
3426 : 4 : iovs[0].iov_base = NULL;
3427 : 4 : iovs[0].iov_len = 0;
3428 : :
3429 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3430 : 4 : CU_ASSERT(rc == 0);
3431 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3432 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3433 : : alignment));
3434 : 4 : stub_complete_io(1);
3435 : :
3436 : : /* Pass iov without allocated buffer with 1024 alignment required */
3437 : 4 : alignment = 1024;
3438 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3439 : :
3440 : 4 : iovcnt = 1;
3441 : 4 : iovs[0].iov_base = NULL;
3442 : 4 : iovs[0].iov_len = 0;
3443 : :
3444 : 4 : rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
3445 : 4 : CU_ASSERT(rc == 0);
3446 : 4 : CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
3447 : 4 : CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
3448 : : alignment));
3449 : 4 : stub_complete_io(1);
3450 : :
3451 : 4 : spdk_put_io_channel(io_ch);
3452 : 4 : spdk_bdev_close(desc);
3453 : 4 : free_bdev(bdev);
3454 : 4 : fn_table.submit_request = stub_submit_request;
3455 : 4 : ut_fini_bdev();
3456 : :
3457 : 4 : free(buf);
3458 : 4 : }
3459 : :
3460 : : static void
3461 : 4 : bdev_io_alignment_with_boundary(void)
3462 : : {
3463 : : struct spdk_bdev *bdev;
3464 : 4 : struct spdk_bdev_desc *desc = NULL;
3465 : : struct spdk_io_channel *io_ch;
3466 : 4 : struct spdk_bdev_opts bdev_opts = {};
3467 : : int rc;
3468 : 4 : void *buf = NULL;
3469 : 3 : struct iovec iovs[2];
3470 : : int iovcnt;
3471 : : uint64_t alignment;
3472 : :
3473 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
3474 : 4 : bdev_opts.bdev_io_pool_size = 20;
3475 : 4 : bdev_opts.bdev_io_cache_size = 2;
3476 : 4 : bdev_opts.opts_size = sizeof(bdev_opts);
3477 : 4 : ut_init_bdev(&bdev_opts);
3478 : :
3479 : 4 : fn_table.submit_request = stub_submit_request_get_buf;
3480 : 4 : bdev = allocate_bdev("bdev0");
3481 : :
3482 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
3483 : 4 : CU_ASSERT(rc == 0);
3484 : 4 : CU_ASSERT(desc != NULL);
3485 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3486 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
3487 : 4 : CU_ASSERT(io_ch != NULL);
3488 : :
3489 : : /* Create aligned buffer */
3490 [ - + ]: 4 : rc = posix_memalign(&buf, 4096, 131072);
3491 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(rc == 0);
3492 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3493 : :
3494 : : #ifdef NOTDEF
3495 : : /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */
3496 : : alignment = 512;
3497 : : bdev->required_alignment = spdk_u32log2(alignment);
3498 : : bdev->optimal_io_boundary = 2;
3499 : : bdev->split_on_optimal_io_boundary = true;
3500 : :
3501 : : iovcnt = 1;
3502 : : iovs[0].iov_base = NULL;
3503 : : iovs[0].iov_len = 512 * 3;
3504 : :
3505 : : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
3506 : : CU_ASSERT(rc == 0);
3507 : : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3508 : : stub_complete_io(2);
3509 : :
3510 : : /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */
3511 : : alignment = 512;
3512 : : bdev->required_alignment = spdk_u32log2(alignment);
3513 : : bdev->optimal_io_boundary = 16;
3514 : : bdev->split_on_optimal_io_boundary = true;
3515 : :
3516 : : iovcnt = 1;
3517 : : iovs[0].iov_base = NULL;
3518 : : iovs[0].iov_len = 512 * 16;
3519 : :
3520 : : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL);
3521 : : CU_ASSERT(rc == 0);
3522 : : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3523 : : stub_complete_io(2);
3524 : :
3525 : : /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */
3526 : : alignment = 512;
3527 : : bdev->required_alignment = spdk_u32log2(alignment);
3528 : : bdev->optimal_io_boundary = 128;
3529 : : bdev->split_on_optimal_io_boundary = true;
3530 : :
3531 : : iovcnt = 1;
3532 : : iovs[0].iov_base = buf + 16;
3533 : : iovs[0].iov_len = 512 * 160;
3534 : : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
3535 : : CU_ASSERT(rc == 0);
3536 : : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3537 : : stub_complete_io(2);
3538 : :
3539 : : #endif
3540 : :
3541 : : /* 512 * 3 with 2 IO boundary */
3542 : 4 : alignment = 512;
3543 : 4 : bdev->required_alignment = spdk_u32log2(alignment);
3544 : 4 : bdev->optimal_io_boundary = 2;
3545 : 4 : bdev->split_on_optimal_io_boundary = true;
3546 : :
3547 : 4 : iovcnt = 2;
3548 : 4 : iovs[0].iov_base = buf + 16;
3549 : 4 : iovs[0].iov_len = 512;
3550 : 4 : iovs[1].iov_base = buf + 16 + 512 + 32;
3551 : 4 : iovs[1].iov_len = 1024;
3552 : :
3553 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
3554 : 4 : CU_ASSERT(rc == 0);
3555 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3556 : 4 : stub_complete_io(2);
3557 : :
3558 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
3559 : 4 : CU_ASSERT(rc == 0);
3560 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3561 : 4 : stub_complete_io(2);
3562 : :
3563 : : /* 512 * 64 with 32 IO boundary */
3564 : 4 : bdev->optimal_io_boundary = 32;
3565 : 4 : iovcnt = 2;
3566 : 4 : iovs[0].iov_base = buf + 16;
3567 : 4 : iovs[0].iov_len = 16384;
3568 : 4 : iovs[1].iov_base = buf + 16 + 16384 + 32;
3569 : 4 : iovs[1].iov_len = 16384;
3570 : :
3571 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
3572 : 4 : CU_ASSERT(rc == 0);
3573 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
3574 : 4 : stub_complete_io(3);
3575 : :
3576 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
3577 : 4 : CU_ASSERT(rc == 0);
3578 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
3579 : 4 : stub_complete_io(3);
3580 : :
3581 : : /* 512 * 160 with 32 IO boundary */
3582 : 4 : iovcnt = 1;
3583 : 4 : iovs[0].iov_base = buf + 16;
3584 : 4 : iovs[0].iov_len = 16384 + 65536;
3585 : :
3586 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
3587 : 4 : CU_ASSERT(rc == 0);
3588 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
3589 : 4 : stub_complete_io(6);
3590 : :
3591 : 4 : spdk_put_io_channel(io_ch);
3592 : 4 : spdk_bdev_close(desc);
3593 : 4 : free_bdev(bdev);
3594 : 4 : fn_table.submit_request = stub_submit_request;
3595 : 4 : ut_fini_bdev();
3596 : :
3597 : 4 : free(buf);
3598 : 4 : }
3599 : :
3600 : : static void
3601 : 8 : histogram_status_cb(void *cb_arg, int status)
3602 : : {
3603 : 8 : g_status = status;
3604 : 8 : }
3605 : :
3606 : : static void
3607 : 12 : histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
3608 : : {
3609 : 12 : g_status = status;
3610 : 12 : g_histogram = histogram;
3611 : 12 : }
3612 : :
3613 : : static void
3614 : 89088 : histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
3615 : : uint64_t total, uint64_t so_far)
3616 : : {
3617 : 89088 : g_count += count;
3618 : 89088 : }
3619 : :
3620 : : static void
3621 : 8 : histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
3622 : : {
3623 : 8 : spdk_histogram_data_fn cb_fn = cb_arg;
3624 : :
3625 : 8 : g_status = status;
3626 : :
3627 [ + + ]: 8 : if (status == 0) {
3628 : 4 : spdk_histogram_data_iterate(histogram, cb_fn, NULL);
3629 : 1 : }
3630 : 8 : }
3631 : :
3632 : : static void
3633 : 4 : bdev_histograms(void)
3634 : : {
3635 : : struct spdk_bdev *bdev;
3636 : 4 : struct spdk_bdev_desc *desc = NULL;
3637 : : struct spdk_io_channel *ch;
3638 : : struct spdk_histogram_data *histogram;
3639 : 3 : uint8_t buf[4096];
3640 : : int rc;
3641 : :
3642 : 4 : ut_init_bdev(NULL);
3643 : :
3644 : 4 : bdev = allocate_bdev("bdev");
3645 : :
3646 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
3647 : 4 : CU_ASSERT(rc == 0);
3648 : 4 : CU_ASSERT(desc != NULL);
3649 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3650 : :
3651 : 4 : ch = spdk_bdev_get_io_channel(desc);
3652 : 4 : CU_ASSERT(ch != NULL);
3653 : :
3654 : : /* Enable histogram */
3655 : 4 : g_status = -1;
3656 : 4 : spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
3657 : 4 : poll_threads();
3658 : 4 : CU_ASSERT(g_status == 0);
3659 [ - + ]: 4 : CU_ASSERT(bdev->internal.histogram_enabled == true);
3660 : :
3661 : : /* Allocate histogram */
3662 : 4 : histogram = spdk_histogram_data_alloc();
3663 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(histogram != NULL);
3664 : :
3665 : : /* Check if histogram is zeroed */
3666 : 4 : spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
3667 : 4 : poll_threads();
3668 : 4 : CU_ASSERT(g_status == 0);
3669 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
3670 : :
3671 : 4 : g_count = 0;
3672 : 4 : spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
3673 : :
3674 : 4 : CU_ASSERT(g_count == 0);
3675 : :
3676 : 4 : rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL);
3677 : 4 : CU_ASSERT(rc == 0);
3678 : :
3679 : 4 : spdk_delay_us(10);
3680 : 4 : stub_complete_io(1);
3681 : 4 : poll_threads();
3682 : :
3683 : 4 : rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL);
3684 : 4 : CU_ASSERT(rc == 0);
3685 : :
3686 : 4 : spdk_delay_us(10);
3687 : 4 : stub_complete_io(1);
3688 : 4 : poll_threads();
3689 : :
3690 : : /* Check if histogram gathered data from all I/O channels */
3691 : 4 : g_histogram = NULL;
3692 : 4 : spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
3693 : 4 : poll_threads();
3694 : 4 : CU_ASSERT(g_status == 0);
3695 [ - + ]: 4 : CU_ASSERT(bdev->internal.histogram_enabled == true);
3696 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
3697 : :
3698 : 4 : g_count = 0;
3699 : 4 : spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
3700 : 4 : CU_ASSERT(g_count == 2);
3701 : :
3702 : 4 : g_count = 0;
3703 : 4 : spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count);
3704 : 4 : CU_ASSERT(g_status == 0);
3705 : 4 : CU_ASSERT(g_count == 2);
3706 : :
3707 : : /* Disable histogram */
3708 : 4 : spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
3709 : 4 : poll_threads();
3710 : 4 : CU_ASSERT(g_status == 0);
3711 [ - + ]: 4 : CU_ASSERT(bdev->internal.histogram_enabled == false);
3712 : :
3713 : : /* Try to run histogram commands on disabled bdev */
3714 : 4 : spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
3715 : 4 : poll_threads();
3716 : 4 : CU_ASSERT(g_status == -EFAULT);
3717 : :
3718 : 4 : spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL);
3719 : 4 : CU_ASSERT(g_status == -EFAULT);
3720 : :
3721 : 4 : spdk_histogram_data_free(histogram);
3722 : 4 : spdk_put_io_channel(ch);
3723 : 4 : spdk_bdev_close(desc);
3724 : 4 : free_bdev(bdev);
3725 : 4 : ut_fini_bdev();
3726 : 4 : }
3727 : :
3728 : : static void
3729 : 8 : _bdev_compare(bool emulated)
3730 : : {
3731 : : struct spdk_bdev *bdev;
3732 : 8 : struct spdk_bdev_desc *desc = NULL;
3733 : : struct spdk_io_channel *ioch;
3734 : : struct ut_expected_io *expected_io;
3735 : : uint64_t offset, num_blocks;
3736 : : uint32_t num_completed;
3737 : 6 : char aa_buf[512];
3738 : 6 : char bb_buf[512];
3739 : 6 : struct iovec compare_iov;
3740 : : uint8_t expected_io_type;
3741 : : int rc;
3742 : :
3743 [ + + ]: 8 : if (emulated) {
3744 : 4 : expected_io_type = SPDK_BDEV_IO_TYPE_READ;
3745 : 1 : } else {
3746 : 4 : expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE;
3747 : : }
3748 : :
3749 [ - + ]: 8 : memset(aa_buf, 0xaa, sizeof(aa_buf));
3750 [ - + ]: 8 : memset(bb_buf, 0xbb, sizeof(bb_buf));
3751 : :
3752 : 8 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated;
3753 : :
3754 : 8 : ut_init_bdev(NULL);
3755 : 8 : fn_table.submit_request = stub_submit_request_get_buf;
3756 : 8 : bdev = allocate_bdev("bdev");
3757 : :
3758 : 8 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
3759 : 8 : CU_ASSERT_EQUAL(rc, 0);
3760 [ + + ]: 8 : SPDK_CU_ASSERT_FATAL(desc != NULL);
3761 : 8 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3762 : 8 : ioch = spdk_bdev_get_io_channel(desc);
3763 [ - + ]: 8 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
3764 : :
3765 : 8 : fn_table.submit_request = stub_submit_request_get_buf;
3766 : 8 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3767 : :
3768 : 8 : offset = 50;
3769 : 8 : num_blocks = 1;
3770 : 8 : compare_iov.iov_base = aa_buf;
3771 : 8 : compare_iov.iov_len = sizeof(aa_buf);
3772 : :
3773 : : /* 1. successful comparev */
3774 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3775 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3776 : :
3777 : 8 : g_io_done = false;
3778 : 8 : g_compare_read_buf = aa_buf;
3779 : 8 : g_compare_read_buf_len = sizeof(aa_buf);
3780 : 8 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3781 : 8 : CU_ASSERT_EQUAL(rc, 0);
3782 : 8 : num_completed = stub_complete_io(1);
3783 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3784 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3785 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3786 : :
3787 : : /* 2. miscompare comparev */
3788 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3789 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3790 : :
3791 : 8 : g_io_done = false;
3792 : 8 : g_compare_read_buf = bb_buf;
3793 : 8 : g_compare_read_buf_len = sizeof(bb_buf);
3794 : 8 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3795 : 8 : CU_ASSERT_EQUAL(rc, 0);
3796 : 8 : num_completed = stub_complete_io(1);
3797 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3798 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3799 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3800 : :
3801 : : /* 3. successful compare */
3802 : 8 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3803 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3804 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3805 : :
3806 : 8 : g_io_done = false;
3807 : 8 : g_compare_read_buf = aa_buf;
3808 : 8 : g_compare_read_buf_len = sizeof(aa_buf);
3809 : 8 : rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL);
3810 : 8 : CU_ASSERT_EQUAL(rc, 0);
3811 : 8 : num_completed = stub_complete_io(1);
3812 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3813 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3814 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3815 : :
3816 : : /* 4. miscompare compare */
3817 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3818 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3819 : :
3820 : 8 : g_io_done = false;
3821 : 8 : g_compare_read_buf = bb_buf;
3822 : 8 : g_compare_read_buf_len = sizeof(bb_buf);
3823 : 8 : rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL);
3824 : 8 : CU_ASSERT_EQUAL(rc, 0);
3825 : 8 : num_completed = stub_complete_io(1);
3826 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3827 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3828 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3829 : :
3830 : 8 : spdk_put_io_channel(ioch);
3831 : 8 : spdk_bdev_close(desc);
3832 : 8 : free_bdev(bdev);
3833 : 8 : fn_table.submit_request = stub_submit_request;
3834 : 8 : ut_fini_bdev();
3835 : :
3836 : 8 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
3837 : :
3838 : 8 : g_compare_read_buf = NULL;
3839 : 8 : }
3840 : :
3841 : : static void
3842 : 8 : _bdev_compare_with_md(bool emulated)
3843 : : {
3844 : : struct spdk_bdev *bdev;
3845 : 8 : struct spdk_bdev_desc *desc = NULL;
3846 : : struct spdk_io_channel *ioch;
3847 : : struct ut_expected_io *expected_io;
3848 : : uint64_t offset, num_blocks;
3849 : : uint32_t num_completed;
3850 : 6 : char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */];
3851 : 6 : char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */];
3852 : 6 : char buf_miscompare[1024 /* 2 * blocklen */];
3853 : 6 : char md_buf[16];
3854 : 6 : char md_buf_miscompare[16];
3855 : 6 : struct iovec compare_iov;
3856 : : uint8_t expected_io_type;
3857 : : int rc;
3858 : :
3859 [ + + ]: 8 : if (emulated) {
3860 : 4 : expected_io_type = SPDK_BDEV_IO_TYPE_READ;
3861 : 1 : } else {
3862 : 4 : expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE;
3863 : : }
3864 : :
3865 [ - + ]: 8 : memset(buf, 0xaa, sizeof(buf));
3866 [ - + ]: 8 : memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare));
3867 : : /* make last md different */
3868 [ - + ]: 8 : memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8);
3869 [ - + ]: 8 : memset(buf_miscompare, 0xbb, sizeof(buf_miscompare));
3870 [ - + ]: 8 : memset(md_buf, 0xaa, 16);
3871 [ - + ]: 8 : memset(md_buf_miscompare, 0xbb, 16);
3872 : :
3873 : 8 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated;
3874 : :
3875 : 8 : ut_init_bdev(NULL);
3876 : 8 : fn_table.submit_request = stub_submit_request_get_buf;
3877 : 8 : bdev = allocate_bdev("bdev");
3878 : :
3879 : 8 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
3880 : 8 : CU_ASSERT_EQUAL(rc, 0);
3881 [ + + ]: 8 : SPDK_CU_ASSERT_FATAL(desc != NULL);
3882 : 8 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
3883 : 8 : ioch = spdk_bdev_get_io_channel(desc);
3884 [ - + ]: 8 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
3885 : :
3886 : 8 : fn_table.submit_request = stub_submit_request_get_buf;
3887 : 8 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3888 : :
3889 : 8 : offset = 50;
3890 : 8 : num_blocks = 2;
3891 : :
3892 : : /* interleaved md & data */
3893 : 8 : bdev->md_interleave = true;
3894 : 8 : bdev->md_len = 8;
3895 : 8 : bdev->blocklen = 512 + 8;
3896 : 8 : compare_iov.iov_base = buf;
3897 : 8 : compare_iov.iov_len = sizeof(buf);
3898 : :
3899 : : /* 1. successful compare with md interleaved */
3900 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3901 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3902 : :
3903 : 8 : g_io_done = false;
3904 : 8 : g_compare_read_buf = buf;
3905 : 8 : g_compare_read_buf_len = sizeof(buf);
3906 : 8 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3907 : 8 : CU_ASSERT_EQUAL(rc, 0);
3908 : 8 : num_completed = stub_complete_io(1);
3909 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3910 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3911 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3912 : :
3913 : : /* 2. miscompare with md interleaved */
3914 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3915 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3916 : :
3917 : 8 : g_io_done = false;
3918 : 8 : g_compare_read_buf = buf_interleaved_miscompare;
3919 : 8 : g_compare_read_buf_len = sizeof(buf_interleaved_miscompare);
3920 : 8 : rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
3921 : 8 : CU_ASSERT_EQUAL(rc, 0);
3922 : 8 : num_completed = stub_complete_io(1);
3923 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3924 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3925 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3926 : :
3927 : : /* Separate data & md buffers */
3928 : 8 : bdev->md_interleave = false;
3929 : 8 : bdev->blocklen = 512;
3930 : 8 : compare_iov.iov_base = buf;
3931 : 8 : compare_iov.iov_len = 1024;
3932 : :
3933 : : /* 3. successful compare with md separated */
3934 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3935 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3936 : :
3937 : 8 : g_io_done = false;
3938 : 8 : g_compare_read_buf = buf;
3939 : 8 : g_compare_read_buf_len = 1024;
3940 : 8 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3941 : 8 : g_compare_md_buf = md_buf;
3942 : 10 : rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf,
3943 : 2 : offset, num_blocks, io_done, NULL);
3944 : 8 : CU_ASSERT_EQUAL(rc, 0);
3945 : 8 : num_completed = stub_complete_io(1);
3946 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3947 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3948 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3949 : :
3950 : : /* 4. miscompare with md separated where md buf is different */
3951 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3952 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3953 : :
3954 : 8 : g_io_done = false;
3955 : 8 : g_compare_read_buf = buf;
3956 : 8 : g_compare_read_buf_len = 1024;
3957 : 8 : g_compare_md_buf = md_buf_miscompare;
3958 : 10 : rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf,
3959 : 2 : offset, num_blocks, io_done, NULL);
3960 : 8 : CU_ASSERT_EQUAL(rc, 0);
3961 : 8 : num_completed = stub_complete_io(1);
3962 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3963 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3964 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3965 : :
3966 : : /* 5. miscompare with md separated where buf is different */
3967 : 8 : expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0);
3968 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
3969 : :
3970 : 8 : g_io_done = false;
3971 : 8 : g_compare_read_buf = buf_miscompare;
3972 : 8 : g_compare_read_buf_len = sizeof(buf_miscompare);
3973 : 8 : g_compare_md_buf = md_buf;
3974 : 10 : rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf,
3975 : 2 : offset, num_blocks, io_done, NULL);
3976 : 8 : CU_ASSERT_EQUAL(rc, 0);
3977 : 8 : num_completed = stub_complete_io(1);
3978 : 8 : CU_ASSERT_EQUAL(num_completed, 1);
3979 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
3980 : 8 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
3981 : :
3982 : 8 : bdev->md_len = 0;
3983 : 8 : g_compare_md_buf = NULL;
3984 : :
3985 : 8 : spdk_put_io_channel(ioch);
3986 : 8 : spdk_bdev_close(desc);
3987 : 8 : free_bdev(bdev);
3988 : 8 : fn_table.submit_request = stub_submit_request;
3989 : 8 : ut_fini_bdev();
3990 : :
3991 : 8 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
3992 : :
3993 : 8 : g_compare_read_buf = NULL;
3994 : 8 : }
3995 : :
3996 : : static void
3997 : 4 : bdev_compare(void)
3998 : : {
3999 : 4 : _bdev_compare(false);
4000 : 4 : _bdev_compare_with_md(false);
4001 : 4 : }
4002 : :
4003 : : static void
4004 : 4 : bdev_compare_emulated(void)
4005 : : {
4006 : 4 : _bdev_compare(true);
4007 : 4 : _bdev_compare_with_md(true);
4008 : 4 : }
4009 : :
4010 : : static void
4011 : 4 : bdev_compare_and_write(void)
4012 : : {
4013 : : struct spdk_bdev *bdev;
4014 : 4 : struct spdk_bdev_desc *desc = NULL;
4015 : : struct spdk_io_channel *ioch;
4016 : : struct ut_expected_io *expected_io;
4017 : : uint64_t offset, num_blocks;
4018 : : uint32_t num_completed;
4019 : 3 : char aa_buf[512];
4020 : 3 : char bb_buf[512];
4021 : 3 : char cc_buf[512];
4022 : 3 : char write_buf[512];
4023 : 3 : struct iovec compare_iov;
4024 : 3 : struct iovec write_iov;
4025 : : int rc;
4026 : :
4027 : 4 : memset(aa_buf, 0xaa, sizeof(aa_buf));
4028 : 4 : memset(bb_buf, 0xbb, sizeof(bb_buf));
4029 : 4 : memset(cc_buf, 0xcc, sizeof(cc_buf));
4030 : :
4031 : 4 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false;
4032 : :
4033 : 4 : ut_init_bdev(NULL);
4034 : 4 : fn_table.submit_request = stub_submit_request_get_buf;
4035 : 4 : bdev = allocate_bdev("bdev");
4036 : :
4037 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4038 : 4 : CU_ASSERT_EQUAL(rc, 0);
4039 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4040 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4041 : 4 : ioch = spdk_bdev_get_io_channel(desc);
4042 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4043 : :
4044 : 4 : fn_table.submit_request = stub_submit_request_get_buf;
4045 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4046 : :
4047 : 4 : offset = 50;
4048 : 4 : num_blocks = 1;
4049 : 4 : compare_iov.iov_base = aa_buf;
4050 : 4 : compare_iov.iov_len = sizeof(aa_buf);
4051 : 4 : write_iov.iov_base = bb_buf;
4052 : 4 : write_iov.iov_len = sizeof(bb_buf);
4053 : :
4054 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
4055 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4056 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0);
4057 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4058 : :
4059 : 4 : g_io_done = false;
4060 : 4 : g_compare_read_buf = aa_buf;
4061 : 4 : g_compare_read_buf_len = sizeof(aa_buf);
4062 : 4 : memset(write_buf, 0, sizeof(write_buf));
4063 : 4 : g_compare_write_buf = write_buf;
4064 : 4 : g_compare_write_buf_len = sizeof(write_buf);
4065 : 5 : rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
4066 : 1 : offset, num_blocks, io_done, NULL);
4067 : : /* Trigger range locking */
4068 : 4 : poll_threads();
4069 : 4 : CU_ASSERT_EQUAL(rc, 0);
4070 : 4 : num_completed = stub_complete_io(1);
4071 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4072 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
4073 : 4 : num_completed = stub_complete_io(1);
4074 : : /* Trigger range unlocking */
4075 : 4 : poll_threads();
4076 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4077 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4078 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4079 : 4 : CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0);
4080 : :
4081 : : /* Test miscompare */
4082 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
4083 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4084 : :
4085 : 4 : g_io_done = false;
4086 : 4 : g_compare_read_buf = cc_buf;
4087 : 4 : g_compare_read_buf_len = sizeof(cc_buf);
4088 : 4 : memset(write_buf, 0, sizeof(write_buf));
4089 : 4 : g_compare_write_buf = write_buf;
4090 : 4 : g_compare_write_buf_len = sizeof(write_buf);
4091 : 5 : rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
4092 : 1 : offset, num_blocks, io_done, NULL);
4093 : : /* Trigger range locking */
4094 : 4 : poll_threads();
4095 : 4 : CU_ASSERT_EQUAL(rc, 0);
4096 : 4 : num_completed = stub_complete_io(1);
4097 : : /* Trigger range unlocking earlier because we expect error here */
4098 : 4 : poll_threads();
4099 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4100 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4101 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
4102 : 4 : num_completed = stub_complete_io(1);
4103 : 4 : CU_ASSERT_EQUAL(num_completed, 0);
4104 : :
4105 : 4 : spdk_put_io_channel(ioch);
4106 : 4 : spdk_bdev_close(desc);
4107 : 4 : free_bdev(bdev);
4108 : 4 : fn_table.submit_request = stub_submit_request;
4109 : 4 : ut_fini_bdev();
4110 : :
4111 : 4 : g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
4112 : :
4113 : 4 : g_compare_read_buf = NULL;
4114 : 4 : g_compare_write_buf = NULL;
4115 : 4 : }
4116 : :
4117 : : static void
4118 : 4 : bdev_write_zeroes(void)
4119 : : {
4120 : : struct spdk_bdev *bdev;
4121 : 4 : struct spdk_bdev_desc *desc = NULL;
4122 : : struct spdk_io_channel *ioch;
4123 : : struct ut_expected_io *expected_io;
4124 : : uint64_t offset, num_io_blocks, num_blocks;
4125 : : uint32_t num_completed, num_requests;
4126 : : int rc;
4127 : :
4128 : 4 : ut_init_bdev(NULL);
4129 : 4 : bdev = allocate_bdev("bdev");
4130 : :
4131 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4132 : 4 : CU_ASSERT_EQUAL(rc, 0);
4133 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4134 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4135 : 4 : ioch = spdk_bdev_get_io_channel(desc);
4136 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4137 : :
4138 : 4 : fn_table.submit_request = stub_submit_request;
4139 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4140 : :
4141 : : /* First test that if the bdev supports write_zeroes, the request won't be split */
4142 : 4 : bdev->md_len = 0;
4143 : 4 : bdev->blocklen = 4096;
4144 [ - + ]: 4 : num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
4145 : :
4146 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
4147 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4148 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4149 : 4 : CU_ASSERT_EQUAL(rc, 0);
4150 : 4 : num_completed = stub_complete_io(1);
4151 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4152 : :
4153 : : /* Check that if write zeroes is not supported it'll be replaced by regular writes */
4154 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
4155 : 4 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
4156 [ - + ]: 4 : num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen;
4157 : 4 : num_requests = 2;
4158 [ - + ]: 4 : num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests;
4159 : :
4160 [ + + ]: 12 : for (offset = 0; offset < num_requests; ++offset) {
4161 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
4162 : 2 : offset * num_io_blocks, num_io_blocks, 0);
4163 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4164 : 2 : }
4165 : :
4166 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4167 : 4 : CU_ASSERT_EQUAL(rc, 0);
4168 : 4 : num_completed = stub_complete_io(num_requests);
4169 : 4 : CU_ASSERT_EQUAL(num_completed, num_requests);
4170 : :
4171 : : /* Check that the splitting is correct if bdev has interleaved metadata */
4172 : 4 : bdev->md_interleave = true;
4173 : 4 : bdev->md_len = 64;
4174 : 4 : bdev->blocklen = 4096 + 64;
4175 : 4 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
4176 [ - + ]: 4 : num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
4177 : :
4178 : 4 : num_requests = offset = 0;
4179 [ + + ]: 12 : while (offset < num_blocks) {
4180 [ + + + + : 8 : num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset);
- + ]
4181 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
4182 : 2 : offset, num_io_blocks, 0);
4183 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4184 : 8 : offset += num_io_blocks;
4185 : 8 : num_requests++;
4186 : : }
4187 : :
4188 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4189 : 4 : CU_ASSERT_EQUAL(rc, 0);
4190 : 4 : num_completed = stub_complete_io(num_requests);
4191 : 4 : CU_ASSERT_EQUAL(num_completed, num_requests);
4192 : 4 : num_completed = stub_complete_io(num_requests);
4193 [ - + ]: 4 : assert(num_completed == 0);
4194 : :
4195 : : /* Check the the same for separate metadata buffer */
4196 : 4 : bdev->md_interleave = false;
4197 : 4 : bdev->md_len = 64;
4198 : 4 : bdev->blocklen = 4096;
4199 : 4 : bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE);
4200 : :
4201 : 4 : num_requests = offset = 0;
4202 [ + + ]: 12 : while (offset < num_blocks) {
4203 [ + + + - : 8 : num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks);
- + ]
4204 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
4205 : 2 : offset, num_io_blocks, 0);
4206 : 8 : expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen;
4207 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4208 : 8 : offset += num_io_blocks;
4209 : 8 : num_requests++;
4210 : : }
4211 : :
4212 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
4213 : 4 : CU_ASSERT_EQUAL(rc, 0);
4214 : 4 : num_completed = stub_complete_io(num_requests);
4215 : 4 : CU_ASSERT_EQUAL(num_completed, num_requests);
4216 : :
4217 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
4218 : 4 : spdk_put_io_channel(ioch);
4219 : 4 : spdk_bdev_close(desc);
4220 : 4 : free_bdev(bdev);
4221 : 4 : ut_fini_bdev();
4222 : 4 : }
4223 : :
4224 : : static void
4225 : 4 : bdev_zcopy_write(void)
4226 : : {
4227 : : struct spdk_bdev *bdev;
4228 : 4 : struct spdk_bdev_desc *desc = NULL;
4229 : : struct spdk_io_channel *ioch;
4230 : : struct ut_expected_io *expected_io;
4231 : : uint64_t offset, num_blocks;
4232 : : uint32_t num_completed;
4233 : 3 : char aa_buf[512];
4234 : 3 : struct iovec iov;
4235 : : int rc;
4236 : 4 : const bool populate = false;
4237 : 4 : const bool commit = true;
4238 : :
4239 [ - + ]: 4 : memset(aa_buf, 0xaa, sizeof(aa_buf));
4240 : :
4241 : 4 : ut_init_bdev(NULL);
4242 : 4 : bdev = allocate_bdev("bdev");
4243 : :
4244 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4245 : 4 : CU_ASSERT_EQUAL(rc, 0);
4246 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4247 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4248 : 4 : ioch = spdk_bdev_get_io_channel(desc);
4249 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4250 : :
4251 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4252 : :
4253 : 4 : offset = 50;
4254 : 4 : num_blocks = 1;
4255 : 4 : iov.iov_base = NULL;
4256 : 4 : iov.iov_len = 0;
4257 : :
4258 : 4 : g_zcopy_read_buf = (void *) 0x1122334455667788UL;
4259 : 4 : g_zcopy_read_buf_len = (uint32_t) -1;
4260 : : /* Do a zcopy start for a write (populate=false) */
4261 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4262 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4263 : 4 : g_io_done = false;
4264 : 4 : g_zcopy_write_buf = aa_buf;
4265 : 4 : g_zcopy_write_buf_len = sizeof(aa_buf);
4266 : 4 : g_zcopy_bdev_io = NULL;
4267 : 4 : rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL);
4268 : 4 : CU_ASSERT_EQUAL(rc, 0);
4269 : 4 : num_completed = stub_complete_io(1);
4270 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4271 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4272 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4273 : : /* Check that the iov has been set up */
4274 : 4 : CU_ASSERT(iov.iov_base == g_zcopy_write_buf);
4275 : 4 : CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len);
4276 : : /* Check that the bdev_io has been saved */
4277 : 4 : CU_ASSERT(g_zcopy_bdev_io != NULL);
4278 : : /* Now do the zcopy end for a write (commit=true) */
4279 : 4 : g_io_done = false;
4280 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4281 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4282 : 4 : rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL);
4283 : 4 : CU_ASSERT_EQUAL(rc, 0);
4284 : 4 : num_completed = stub_complete_io(1);
4285 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4286 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4287 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4288 : : /* Check the g_zcopy are reset by io_done */
4289 : 4 : CU_ASSERT(g_zcopy_write_buf == NULL);
4290 : 4 : CU_ASSERT(g_zcopy_write_buf_len == 0);
4291 : : /* Check that io_done has freed the g_zcopy_bdev_io */
4292 : 4 : CU_ASSERT(g_zcopy_bdev_io == NULL);
4293 : :
4294 : : /* Check the zcopy read buffer has not been touched which
4295 : : * ensures that the correct buffers were used.
4296 : : */
4297 : 4 : CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL);
4298 : 4 : CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1);
4299 : :
4300 : 4 : spdk_put_io_channel(ioch);
4301 : 4 : spdk_bdev_close(desc);
4302 : 4 : free_bdev(bdev);
4303 : 4 : ut_fini_bdev();
4304 : 4 : }
4305 : :
4306 : : static void
4307 : 4 : bdev_zcopy_read(void)
4308 : : {
4309 : : struct spdk_bdev *bdev;
4310 : 4 : struct spdk_bdev_desc *desc = NULL;
4311 : : struct spdk_io_channel *ioch;
4312 : : struct ut_expected_io *expected_io;
4313 : : uint64_t offset, num_blocks;
4314 : : uint32_t num_completed;
4315 : 3 : char aa_buf[512];
4316 : 3 : struct iovec iov;
4317 : : int rc;
4318 : 4 : const bool populate = true;
4319 : 4 : const bool commit = false;
4320 : :
4321 [ - + ]: 4 : memset(aa_buf, 0xaa, sizeof(aa_buf));
4322 : :
4323 : 4 : ut_init_bdev(NULL);
4324 : 4 : bdev = allocate_bdev("bdev");
4325 : :
4326 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
4327 : 4 : CU_ASSERT_EQUAL(rc, 0);
4328 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4329 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4330 : 4 : ioch = spdk_bdev_get_io_channel(desc);
4331 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
4332 : :
4333 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
4334 : :
4335 : 4 : offset = 50;
4336 : 4 : num_blocks = 1;
4337 : 4 : iov.iov_base = NULL;
4338 : 4 : iov.iov_len = 0;
4339 : :
4340 : 4 : g_zcopy_write_buf = (void *) 0x1122334455667788UL;
4341 : 4 : g_zcopy_write_buf_len = (uint32_t) -1;
4342 : :
4343 : : /* Do a zcopy start for a read (populate=true) */
4344 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4345 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4346 : 4 : g_io_done = false;
4347 : 4 : g_zcopy_read_buf = aa_buf;
4348 : 4 : g_zcopy_read_buf_len = sizeof(aa_buf);
4349 : 4 : g_zcopy_bdev_io = NULL;
4350 : 4 : rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL);
4351 : 4 : CU_ASSERT_EQUAL(rc, 0);
4352 : 4 : num_completed = stub_complete_io(1);
4353 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4354 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4355 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4356 : : /* Check that the iov has been set up */
4357 : 4 : CU_ASSERT(iov.iov_base == g_zcopy_read_buf);
4358 : 4 : CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len);
4359 : : /* Check that the bdev_io has been saved */
4360 : 4 : CU_ASSERT(g_zcopy_bdev_io != NULL);
4361 : :
4362 : : /* Now do the zcopy end for a read (commit=false) */
4363 : 4 : g_io_done = false;
4364 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0);
4365 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
4366 : 4 : rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL);
4367 : 4 : CU_ASSERT_EQUAL(rc, 0);
4368 : 4 : num_completed = stub_complete_io(1);
4369 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
4370 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
4371 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
4372 : : /* Check the g_zcopy are reset by io_done */
4373 : 4 : CU_ASSERT(g_zcopy_read_buf == NULL);
4374 : 4 : CU_ASSERT(g_zcopy_read_buf_len == 0);
4375 : : /* Check that io_done has freed the g_zcopy_bdev_io */
4376 : 4 : CU_ASSERT(g_zcopy_bdev_io == NULL);
4377 : :
4378 : : /* Check the zcopy write buffer has not been touched which
4379 : : * ensures that the correct buffers were used.
4380 : : */
4381 : 4 : CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL);
4382 : 4 : CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1);
4383 : :
4384 : 4 : spdk_put_io_channel(ioch);
4385 : 4 : spdk_bdev_close(desc);
4386 : 4 : free_bdev(bdev);
4387 : 4 : ut_fini_bdev();
4388 : 4 : }
4389 : :
4390 : : static void
4391 : 4 : bdev_open_while_hotremove(void)
4392 : : {
4393 : : struct spdk_bdev *bdev;
4394 : 4 : struct spdk_bdev_desc *desc[2] = {};
4395 : : int rc;
4396 : :
4397 : 4 : bdev = allocate_bdev("bdev");
4398 : :
4399 : 4 : rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]);
4400 : 4 : CU_ASSERT(rc == 0);
4401 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
4402 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0]));
4403 : :
4404 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
4405 : : /* Bdev unregister is handled asynchronously. Poll thread to complete. */
4406 : 4 : poll_threads();
4407 : :
4408 : 4 : rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]);
4409 : 4 : CU_ASSERT(rc == -ENODEV);
4410 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc[1] == NULL);
4411 : :
4412 : 4 : spdk_bdev_close(desc[0]);
4413 : 4 : free_bdev(bdev);
4414 : 4 : }
4415 : :
4416 : : static void
4417 : 4 : bdev_close_while_hotremove(void)
4418 : : {
4419 : : struct spdk_bdev *bdev;
4420 : 4 : struct spdk_bdev_desc *desc = NULL;
4421 : 4 : int rc = 0;
4422 : :
4423 : 4 : bdev = allocate_bdev("bdev");
4424 : :
4425 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc);
4426 : 4 : CU_ASSERT_EQUAL(rc, 0);
4427 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4428 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4429 : :
4430 : : /* Simulate hot-unplug by unregistering bdev */
4431 : 4 : g_event_type1 = 0xFF;
4432 : 4 : g_unregister_arg = NULL;
4433 : 4 : g_unregister_rc = -1;
4434 : 4 : spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678);
4435 : : /* Close device while remove event is in flight */
4436 : 4 : spdk_bdev_close(desc);
4437 : :
4438 : : /* Ensure that unregister callback is delayed */
4439 : 4 : CU_ASSERT_EQUAL(g_unregister_arg, NULL);
4440 : 4 : CU_ASSERT_EQUAL(g_unregister_rc, -1);
4441 : :
4442 : 4 : poll_threads();
4443 : :
4444 : : /* Event callback shall not be issued because device was closed */
4445 : 4 : CU_ASSERT_EQUAL(g_event_type1, 0xFF);
4446 : : /* Unregister callback is issued */
4447 : 4 : CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678);
4448 : 4 : CU_ASSERT_EQUAL(g_unregister_rc, 0);
4449 : :
4450 : 4 : free_bdev(bdev);
4451 : 4 : }
4452 : :
4453 : : static void
4454 : 4 : bdev_open_ext_test(void)
4455 : : {
4456 : : struct spdk_bdev *bdev;
4457 : 4 : struct spdk_bdev_desc *desc1 = NULL;
4458 : 4 : struct spdk_bdev_desc *desc2 = NULL;
4459 : 4 : int rc = 0;
4460 : :
4461 : 4 : bdev = allocate_bdev("bdev");
4462 : :
4463 : 4 : rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1);
4464 : 4 : CU_ASSERT_EQUAL(rc, -EINVAL);
4465 : :
4466 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1);
4467 : 4 : CU_ASSERT_EQUAL(rc, 0);
4468 : :
4469 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2);
4470 : 4 : CU_ASSERT_EQUAL(rc, 0);
4471 : :
4472 : 4 : g_event_type1 = 0xFF;
4473 : 4 : g_event_type2 = 0xFF;
4474 : :
4475 : : /* Simulate hot-unplug by unregistering bdev */
4476 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
4477 : 4 : poll_threads();
4478 : :
4479 : : /* Check if correct events have been triggered in event callback fn */
4480 : 4 : CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE);
4481 : 4 : CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE);
4482 : :
4483 : 4 : free_bdev(bdev);
4484 : 4 : poll_threads();
4485 : 4 : }
4486 : :
4487 : : static void
4488 : 4 : bdev_open_ext_unregister(void)
4489 : : {
4490 : : struct spdk_bdev *bdev;
4491 : 4 : struct spdk_bdev_desc *desc1 = NULL;
4492 : 4 : struct spdk_bdev_desc *desc2 = NULL;
4493 : 4 : struct spdk_bdev_desc *desc3 = NULL;
4494 : 4 : struct spdk_bdev_desc *desc4 = NULL;
4495 : 4 : int rc = 0;
4496 : :
4497 : 4 : bdev = allocate_bdev("bdev");
4498 : :
4499 : 4 : rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1);
4500 : 4 : CU_ASSERT_EQUAL(rc, -EINVAL);
4501 : :
4502 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1);
4503 : 4 : CU_ASSERT_EQUAL(rc, 0);
4504 : :
4505 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2);
4506 : 4 : CU_ASSERT_EQUAL(rc, 0);
4507 : :
4508 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3);
4509 : 4 : CU_ASSERT_EQUAL(rc, 0);
4510 : :
4511 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4);
4512 : 4 : CU_ASSERT_EQUAL(rc, 0);
4513 : :
4514 : 4 : g_event_type1 = 0xFF;
4515 : 4 : g_event_type2 = 0xFF;
4516 : 4 : g_event_type3 = 0xFF;
4517 : 4 : g_event_type4 = 0xFF;
4518 : :
4519 : 4 : g_unregister_arg = NULL;
4520 : 4 : g_unregister_rc = -1;
4521 : :
4522 : : /* Simulate hot-unplug by unregistering bdev */
4523 : 4 : spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678);
4524 : :
4525 : : /*
4526 : : * Unregister is handled asynchronously and event callback
4527 : : * (i.e., above bdev_open_cbN) will be called.
4528 : : * For bdev_open_cb3 and bdev_open_cb4, it is intended to not
4529 : : * close the desc3 and desc4 so that the bdev is not closed.
4530 : : */
4531 : 4 : poll_threads();
4532 : :
4533 : : /* Check if correct events have been triggered in event callback fn */
4534 : 4 : CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE);
4535 : 4 : CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE);
4536 : 4 : CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE);
4537 : 4 : CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE);
4538 : :
4539 : : /* Check that unregister callback is delayed */
4540 : 4 : CU_ASSERT(g_unregister_arg == NULL);
4541 : 4 : CU_ASSERT(g_unregister_rc == -1);
4542 : :
4543 : : /*
4544 : : * Explicitly close desc3. As desc4 is still opened there, the
4545 : : * unergister callback is still delayed to execute.
4546 : : */
4547 : 4 : spdk_bdev_close(desc3);
4548 : 4 : CU_ASSERT(g_unregister_arg == NULL);
4549 : 4 : CU_ASSERT(g_unregister_rc == -1);
4550 : :
4551 : : /*
4552 : : * Explicitly close desc4 to trigger the ongoing bdev unregister
4553 : : * operation after last desc is closed.
4554 : : */
4555 : 4 : spdk_bdev_close(desc4);
4556 : :
4557 : : /* Poll the thread for the async unregister operation */
4558 : 4 : poll_threads();
4559 : :
4560 : : /* Check that unregister callback is executed */
4561 : 4 : CU_ASSERT(g_unregister_arg == (void *)0x12345678);
4562 : 4 : CU_ASSERT(g_unregister_rc == 0);
4563 : :
4564 : 4 : free_bdev(bdev);
4565 : 4 : poll_threads();
4566 : 4 : }
4567 : :
4568 : : struct timeout_io_cb_arg {
4569 : : struct iovec iov;
4570 : : uint8_t type;
4571 : : };
4572 : :
4573 : : static int
4574 : 56 : bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
4575 : : {
4576 : : struct spdk_bdev_io *bdev_io;
4577 : 56 : int n = 0;
4578 : :
4579 [ - + ]: 56 : if (!ch) {
4580 : 0 : return -1;
4581 : : }
4582 : :
4583 [ + + ]: 116 : TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
4584 : 60 : n++;
4585 : 15 : }
4586 : :
4587 : 56 : return n;
4588 : 14 : }
4589 : :
4590 : : static void
4591 : 12 : bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
4592 : : {
4593 : 12 : struct timeout_io_cb_arg *ctx = cb_arg;
4594 : :
4595 : 12 : ctx->type = bdev_io->type;
4596 : 12 : ctx->iov.iov_base = bdev_io->iov.iov_base;
4597 : 12 : ctx->iov.iov_len = bdev_io->iov.iov_len;
4598 : 12 : }
4599 : :
4600 : : static void
4601 : 4 : bdev_set_io_timeout(void)
4602 : : {
4603 : : struct spdk_bdev *bdev;
4604 : 4 : struct spdk_bdev_desc *desc = NULL;
4605 : 4 : struct spdk_io_channel *io_ch = NULL;
4606 : 4 : struct spdk_bdev_channel *bdev_ch = NULL;
4607 : 3 : struct timeout_io_cb_arg cb_arg;
4608 : :
4609 : 4 : ut_init_bdev(NULL);
4610 : 4 : bdev = allocate_bdev("bdev");
4611 : :
4612 : 4 : CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0);
4613 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4614 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4615 : :
4616 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
4617 : 4 : CU_ASSERT(io_ch != NULL);
4618 : :
4619 : 4 : bdev_ch = spdk_io_channel_get_ctx(io_ch);
4620 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
4621 : :
4622 : : /* This is the part1.
4623 : : * We will check the bdev_ch->io_submitted list
4624 : : * TO make sure that it can link IOs and only the user submitted IOs
4625 : : */
4626 : 4 : CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
4627 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4628 : 4 : CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
4629 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
4630 : 4 : stub_complete_io(1);
4631 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4632 : 4 : stub_complete_io(1);
4633 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4634 : :
4635 : : /* Split IO */
4636 : 4 : bdev->optimal_io_boundary = 16;
4637 : 4 : bdev->split_on_optimal_io_boundary = true;
4638 : :
4639 : : /* Now test that a single-vector command is split correctly.
4640 : : * Offset 14, length 8, payload 0xF000
4641 : : * Child - Offset 14, length 2, payload 0xF000
4642 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
4643 : : *
4644 : : * Set up the expected values before calling spdk_bdev_read_blocks
4645 : : */
4646 : 4 : CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
4647 : : /* We count all submitted IOs including IO that are generated by splitting. */
4648 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3);
4649 : 4 : stub_complete_io(1);
4650 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
4651 : 4 : stub_complete_io(1);
4652 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4653 : :
4654 : : /* Also include the reset IO */
4655 : 4 : CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
4656 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4657 : 4 : poll_threads();
4658 : 4 : stub_complete_io(1);
4659 : 4 : poll_threads();
4660 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4661 : :
4662 : : /* This is part2
4663 : : * Test the desc timeout poller register
4664 : : */
4665 : :
4666 : : /* Successfully set the timeout */
4667 : 4 : CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
4668 : 4 : CU_ASSERT(desc->io_timeout_poller != NULL);
4669 : 4 : CU_ASSERT(desc->timeout_in_sec == 30);
4670 : 4 : CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
4671 : 4 : CU_ASSERT(desc->cb_arg == &cb_arg);
4672 : :
4673 : : /* Change the timeout limit */
4674 : 4 : CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0);
4675 : 4 : CU_ASSERT(desc->io_timeout_poller != NULL);
4676 : 4 : CU_ASSERT(desc->timeout_in_sec == 20);
4677 : 4 : CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
4678 : 4 : CU_ASSERT(desc->cb_arg == &cb_arg);
4679 : :
4680 : : /* Disable the timeout */
4681 : 4 : CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0);
4682 : 4 : CU_ASSERT(desc->io_timeout_poller == NULL);
4683 : :
4684 : : /* This the part3
4685 : : * We will test to catch timeout IO and check whether the IO is
4686 : : * the submitted one.
4687 : : */
4688 [ - + ]: 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4689 : 4 : CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
4690 : 4 : CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0);
4691 : :
4692 : : /* Don't reach the limit */
4693 : 4 : spdk_delay_us(15 * spdk_get_ticks_hz());
4694 : 4 : poll_threads();
4695 : 4 : CU_ASSERT(cb_arg.type == 0);
4696 : 4 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
4697 : 4 : CU_ASSERT(cb_arg.iov.iov_len == 0);
4698 : :
4699 : : /* 15 + 15 = 30 reach the limit */
4700 : 4 : spdk_delay_us(15 * spdk_get_ticks_hz());
4701 : 4 : poll_threads();
4702 : 4 : CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
4703 : 4 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
4704 : 4 : CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen);
4705 : 4 : stub_complete_io(1);
4706 : :
4707 : : /* Use the same split IO above and check the IO */
4708 [ - + ]: 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4709 : 4 : CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
4710 : :
4711 : : /* The first child complete in time */
4712 : 4 : spdk_delay_us(15 * spdk_get_ticks_hz());
4713 : 4 : poll_threads();
4714 : 4 : stub_complete_io(1);
4715 : 4 : CU_ASSERT(cb_arg.type == 0);
4716 : 4 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
4717 : 4 : CU_ASSERT(cb_arg.iov.iov_len == 0);
4718 : :
4719 : : /* The second child reach the limit */
4720 : 4 : spdk_delay_us(15 * spdk_get_ticks_hz());
4721 : 4 : poll_threads();
4722 : 4 : CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
4723 : 4 : CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
4724 : 4 : CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen);
4725 : 4 : stub_complete_io(1);
4726 : :
4727 : : /* Also include the reset IO */
4728 [ - + ]: 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4729 : 4 : CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
4730 : 4 : spdk_delay_us(30 * spdk_get_ticks_hz());
4731 : 4 : poll_threads();
4732 : 4 : CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET);
4733 : 4 : stub_complete_io(1);
4734 : 4 : poll_threads();
4735 : :
4736 : 4 : spdk_put_io_channel(io_ch);
4737 : 4 : spdk_bdev_close(desc);
4738 : 4 : free_bdev(bdev);
4739 : 4 : ut_fini_bdev();
4740 : 4 : }
4741 : :
4742 : : static void
4743 : 4 : bdev_set_qd_sampling(void)
4744 : : {
4745 : : struct spdk_bdev *bdev;
4746 : 4 : struct spdk_bdev_desc *desc = NULL;
4747 : 4 : struct spdk_io_channel *io_ch = NULL;
4748 : 4 : struct spdk_bdev_channel *bdev_ch = NULL;
4749 : 3 : struct timeout_io_cb_arg cb_arg;
4750 : :
4751 : 4 : ut_init_bdev(NULL);
4752 : 4 : bdev = allocate_bdev("bdev");
4753 : :
4754 : 4 : CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0);
4755 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
4756 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4757 : :
4758 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
4759 : 4 : CU_ASSERT(io_ch != NULL);
4760 : :
4761 : 4 : bdev_ch = spdk_io_channel_get_ctx(io_ch);
4762 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
4763 : :
4764 : : /* This is the part1.
4765 : : * We will check the bdev_ch->io_submitted list
4766 : : * TO make sure that it can link IOs and only the user submitted IOs
4767 : : */
4768 : 4 : CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
4769 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4770 : 4 : CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
4771 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
4772 : 4 : stub_complete_io(1);
4773 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4774 : 4 : stub_complete_io(1);
4775 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
4776 : :
4777 : : /* This is the part2.
4778 : : * Test the bdev's qd poller register
4779 : : */
4780 : : /* 1st Successfully set the qd sampling period */
4781 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 10);
4782 : 4 : CU_ASSERT(bdev->internal.new_period == 10);
4783 : 4 : CU_ASSERT(bdev->internal.period == 10);
4784 : 4 : CU_ASSERT(bdev->internal.qd_desc != NULL);
4785 : 4 : poll_threads();
4786 : 4 : CU_ASSERT(bdev->internal.qd_poller != NULL);
4787 : :
4788 : : /* 2nd Change the qd sampling period */
4789 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 20);
4790 : 4 : CU_ASSERT(bdev->internal.new_period == 20);
4791 : 4 : CU_ASSERT(bdev->internal.period == 10);
4792 : 4 : CU_ASSERT(bdev->internal.qd_desc != NULL);
4793 : 4 : poll_threads();
4794 : 4 : CU_ASSERT(bdev->internal.qd_poller != NULL);
4795 : 4 : CU_ASSERT(bdev->internal.period == bdev->internal.new_period);
4796 : :
4797 : : /* 3rd Change the qd sampling period and verify qd_poll_in_progress */
4798 : 4 : spdk_delay_us(20);
4799 : 4 : poll_thread_times(0, 1);
4800 [ - + ]: 4 : CU_ASSERT(bdev->internal.qd_poll_in_progress == true);
4801 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 30);
4802 : 4 : CU_ASSERT(bdev->internal.new_period == 30);
4803 : 4 : CU_ASSERT(bdev->internal.period == 20);
4804 : 4 : poll_threads();
4805 [ - + ]: 4 : CU_ASSERT(bdev->internal.qd_poll_in_progress == false);
4806 : 4 : CU_ASSERT(bdev->internal.period == bdev->internal.new_period);
4807 : :
4808 : : /* 4th Disable the qd sampling period */
4809 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 0);
4810 : 4 : CU_ASSERT(bdev->internal.new_period == 0);
4811 : 4 : CU_ASSERT(bdev->internal.period == 30);
4812 : 4 : poll_threads();
4813 : 4 : CU_ASSERT(bdev->internal.qd_poller == NULL);
4814 : 4 : CU_ASSERT(bdev->internal.period == bdev->internal.new_period);
4815 : 4 : CU_ASSERT(bdev->internal.qd_desc == NULL);
4816 : :
4817 : : /* This is the part3.
4818 : : * We will test the submitted IO and reset works
4819 : : * properly with the qd sampling.
4820 : : */
4821 [ - + ]: 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4822 : 4 : spdk_bdev_set_qd_sampling_period(bdev, 1);
4823 : 4 : poll_threads();
4824 : :
4825 : 4 : CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
4826 : 4 : CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
4827 : :
4828 : : /* Also include the reset IO */
4829 [ - + ]: 4 : memset(&cb_arg, 0, sizeof(cb_arg));
4830 : 4 : CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
4831 : 4 : poll_threads();
4832 : :
4833 : : /* Close the desc */
4834 : 4 : spdk_put_io_channel(io_ch);
4835 : 4 : spdk_bdev_close(desc);
4836 : :
4837 : : /* Complete the submitted IO and reset */
4838 : 4 : stub_complete_io(2);
4839 : 4 : poll_threads();
4840 : :
4841 : 4 : free_bdev(bdev);
4842 : 4 : ut_fini_bdev();
4843 : 4 : }
4844 : :
4845 : : static void
4846 : 4 : lba_range_overlap(void)
4847 : : {
4848 : 3 : struct lba_range r1, r2;
4849 : :
4850 : 4 : r1.offset = 100;
4851 : 4 : r1.length = 50;
4852 : :
4853 : 4 : r2.offset = 0;
4854 : 4 : r2.length = 1;
4855 : 4 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4856 : :
4857 : 4 : r2.offset = 0;
4858 : 4 : r2.length = 100;
4859 : 4 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4860 : :
4861 : 4 : r2.offset = 0;
4862 : 4 : r2.length = 110;
4863 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4864 : :
4865 : 4 : r2.offset = 100;
4866 : 4 : r2.length = 10;
4867 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4868 : :
4869 : 4 : r2.offset = 110;
4870 : 4 : r2.length = 20;
4871 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4872 : :
4873 : 4 : r2.offset = 140;
4874 : 4 : r2.length = 150;
4875 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4876 : :
4877 : 4 : r2.offset = 130;
4878 : 4 : r2.length = 200;
4879 : 4 : CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
4880 : :
4881 : 4 : r2.offset = 150;
4882 : 4 : r2.length = 100;
4883 : 4 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4884 : :
4885 : 4 : r2.offset = 110;
4886 : 4 : r2.length = 0;
4887 : 4 : CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
4888 : 4 : }
4889 : :
4890 : : static bool g_lock_lba_range_done;
4891 : : static bool g_unlock_lba_range_done;
4892 : :
4893 : : static void
4894 : 32 : lock_lba_range_done(struct lba_range *range, void *ctx, int status)
4895 : : {
4896 : 32 : g_lock_lba_range_done = true;
4897 : 32 : }
4898 : :
4899 : : static void
4900 : 24 : unlock_lba_range_done(struct lba_range *range, void *ctx, int status)
4901 : : {
4902 : 24 : g_unlock_lba_range_done = true;
4903 : 24 : }
4904 : :
4905 : : static void
4906 : 4 : lock_lba_range_check_ranges(void)
4907 : : {
4908 : : struct spdk_bdev *bdev;
4909 : 4 : struct spdk_bdev_desc *desc = NULL;
4910 : : struct spdk_io_channel *io_ch;
4911 : : struct spdk_bdev_channel *channel;
4912 : : struct lba_range *range;
4913 : 3 : int ctx1;
4914 : : int rc;
4915 : :
4916 : 4 : ut_init_bdev(NULL);
4917 : 4 : bdev = allocate_bdev("bdev0");
4918 : :
4919 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
4920 : 4 : CU_ASSERT(rc == 0);
4921 : 4 : CU_ASSERT(desc != NULL);
4922 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4923 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
4924 : 4 : CU_ASSERT(io_ch != NULL);
4925 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
4926 : :
4927 : 4 : g_lock_lba_range_done = false;
4928 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
4929 : 4 : CU_ASSERT(rc == 0);
4930 : 4 : poll_threads();
4931 : :
4932 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
4933 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
4934 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
4935 : 4 : CU_ASSERT(range->offset == 20);
4936 : 4 : CU_ASSERT(range->length == 10);
4937 : 4 : CU_ASSERT(range->owner_ch == channel);
4938 : :
4939 : : /* Unlocks must exactly match a lock. */
4940 : 4 : g_unlock_lba_range_done = false;
4941 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1);
4942 : 4 : CU_ASSERT(rc == -EINVAL);
4943 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == false);
4944 : :
4945 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
4946 : 4 : CU_ASSERT(rc == 0);
4947 : 4 : spdk_delay_us(100);
4948 : 4 : poll_threads();
4949 : :
4950 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
4951 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
4952 : :
4953 : 4 : spdk_put_io_channel(io_ch);
4954 : 4 : spdk_bdev_close(desc);
4955 : 4 : free_bdev(bdev);
4956 : 4 : ut_fini_bdev();
4957 : 4 : }
4958 : :
4959 : : static void
4960 : 4 : lock_lba_range_with_io_outstanding(void)
4961 : : {
4962 : : struct spdk_bdev *bdev;
4963 : 4 : struct spdk_bdev_desc *desc = NULL;
4964 : : struct spdk_io_channel *io_ch;
4965 : : struct spdk_bdev_channel *channel;
4966 : : struct lba_range *range;
4967 : 3 : char buf[4096];
4968 : 3 : int ctx1;
4969 : : int rc;
4970 : :
4971 : 4 : ut_init_bdev(NULL);
4972 : 4 : bdev = allocate_bdev("bdev0");
4973 : :
4974 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
4975 : 4 : CU_ASSERT(rc == 0);
4976 : 4 : CU_ASSERT(desc != NULL);
4977 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
4978 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
4979 : 4 : CU_ASSERT(io_ch != NULL);
4980 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
4981 : :
4982 : 4 : g_io_done = false;
4983 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
4984 : 4 : CU_ASSERT(rc == 0);
4985 : :
4986 : 4 : g_lock_lba_range_done = false;
4987 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
4988 : 4 : CU_ASSERT(rc == 0);
4989 : 4 : poll_threads();
4990 : :
4991 : : /* The lock should immediately become valid, since there are no outstanding
4992 : : * write I/O.
4993 : : */
4994 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
4995 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
4996 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
4997 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
4998 : 4 : CU_ASSERT(range->offset == 20);
4999 : 4 : CU_ASSERT(range->length == 10);
5000 : 4 : CU_ASSERT(range->owner_ch == channel);
5001 : 4 : CU_ASSERT(range->locked_ctx == &ctx1);
5002 : :
5003 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
5004 : 4 : CU_ASSERT(rc == 0);
5005 : 4 : stub_complete_io(1);
5006 : 4 : spdk_delay_us(100);
5007 : 4 : poll_threads();
5008 : :
5009 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5010 : :
5011 : : /* Now try again, but with a write I/O. */
5012 : 4 : g_io_done = false;
5013 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
5014 : 4 : CU_ASSERT(rc == 0);
5015 : :
5016 : 4 : g_lock_lba_range_done = false;
5017 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
5018 : 4 : CU_ASSERT(rc == 0);
5019 : 4 : poll_threads();
5020 : :
5021 : : /* The lock should not be fully valid yet, since a write I/O is outstanding.
5022 : : * But note that the range should be on the channel's locked_list, to make sure no
5023 : : * new write I/O are started.
5024 : : */
5025 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5026 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5027 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5028 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5029 : 4 : CU_ASSERT(range->offset == 20);
5030 : 4 : CU_ASSERT(range->length == 10);
5031 : :
5032 : : /* Complete the write I/O. This should make the lock valid (checked by confirming
5033 : : * our callback was invoked).
5034 : : */
5035 : 4 : stub_complete_io(1);
5036 : 4 : spdk_delay_us(100);
5037 : 4 : poll_threads();
5038 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5039 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5040 : :
5041 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
5042 : 4 : CU_ASSERT(rc == 0);
5043 : 4 : poll_threads();
5044 : :
5045 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5046 : :
5047 : 4 : spdk_put_io_channel(io_ch);
5048 : 4 : spdk_bdev_close(desc);
5049 : 4 : free_bdev(bdev);
5050 : 4 : ut_fini_bdev();
5051 : 4 : }
5052 : :
5053 : : static void
5054 : 4 : lock_lba_range_overlapped(void)
5055 : : {
5056 : : struct spdk_bdev *bdev;
5057 : 4 : struct spdk_bdev_desc *desc = NULL;
5058 : : struct spdk_io_channel *io_ch;
5059 : : struct spdk_bdev_channel *channel;
5060 : : struct lba_range *range;
5061 : 3 : int ctx1;
5062 : : int rc;
5063 : :
5064 : 4 : ut_init_bdev(NULL);
5065 : 4 : bdev = allocate_bdev("bdev0");
5066 : :
5067 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5068 : 4 : CU_ASSERT(rc == 0);
5069 : 4 : CU_ASSERT(desc != NULL);
5070 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5071 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
5072 : 4 : CU_ASSERT(io_ch != NULL);
5073 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
5074 : :
5075 : : /* Lock range 20-29. */
5076 : 4 : g_lock_lba_range_done = false;
5077 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
5078 : 4 : CU_ASSERT(rc == 0);
5079 : 4 : poll_threads();
5080 : :
5081 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5082 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5083 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5084 : 4 : CU_ASSERT(range->offset == 20);
5085 : 4 : CU_ASSERT(range->length == 10);
5086 : :
5087 : : /* Try to lock range 25-39. It should not lock immediately, since it overlaps with
5088 : : * 20-29.
5089 : : */
5090 : 4 : g_lock_lba_range_done = false;
5091 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1);
5092 : 4 : CU_ASSERT(rc == 0);
5093 : 4 : poll_threads();
5094 : :
5095 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5096 : 4 : range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
5097 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5098 : 4 : CU_ASSERT(range->offset == 25);
5099 : 4 : CU_ASSERT(range->length == 15);
5100 : :
5101 : : /* Unlock 20-29. This should result in range 25-39 now getting locked since it
5102 : : * no longer overlaps with an active lock.
5103 : : */
5104 : 4 : g_unlock_lba_range_done = false;
5105 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
5106 : 4 : CU_ASSERT(rc == 0);
5107 : 4 : poll_threads();
5108 : :
5109 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5110 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
5111 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5112 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5113 : 4 : CU_ASSERT(range->offset == 25);
5114 : 4 : CU_ASSERT(range->length == 15);
5115 : :
5116 : : /* Lock 40-59. This should immediately lock since it does not overlap with the
5117 : : * currently active 25-39 lock.
5118 : : */
5119 : 4 : g_lock_lba_range_done = false;
5120 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1);
5121 : 4 : CU_ASSERT(rc == 0);
5122 : 4 : poll_threads();
5123 : :
5124 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5125 : 4 : range = TAILQ_FIRST(&bdev->internal.locked_ranges);
5126 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5127 : 4 : range = TAILQ_NEXT(range, tailq);
5128 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5129 : 4 : CU_ASSERT(range->offset == 40);
5130 : 4 : CU_ASSERT(range->length == 20);
5131 : :
5132 : : /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */
5133 : 4 : g_lock_lba_range_done = false;
5134 : 4 : rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1);
5135 : 4 : CU_ASSERT(rc == 0);
5136 : 4 : poll_threads();
5137 : :
5138 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5139 : 4 : range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
5140 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5141 : 4 : CU_ASSERT(range->offset == 35);
5142 : 4 : CU_ASSERT(range->length == 10);
5143 : :
5144 : : /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since
5145 : : * the 40-59 lock is still active.
5146 : : */
5147 : 4 : g_unlock_lba_range_done = false;
5148 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1);
5149 : 4 : CU_ASSERT(rc == 0);
5150 : 4 : poll_threads();
5151 : :
5152 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5153 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5154 : 4 : range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
5155 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5156 : 4 : CU_ASSERT(range->offset == 35);
5157 : 4 : CU_ASSERT(range->length == 10);
5158 : :
5159 : : /* Unlock 40-59. This should result in 35-44 now getting locked, since there are
5160 : : * no longer any active overlapping locks.
5161 : : */
5162 : 4 : g_unlock_lba_range_done = false;
5163 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1);
5164 : 4 : CU_ASSERT(rc == 0);
5165 : 4 : poll_threads();
5166 : :
5167 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5168 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5169 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
5170 : 4 : range = TAILQ_FIRST(&bdev->internal.locked_ranges);
5171 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5172 : 4 : CU_ASSERT(range->offset == 35);
5173 : 4 : CU_ASSERT(range->length == 10);
5174 : :
5175 : : /* Finally, unlock 35-44. */
5176 : 4 : g_unlock_lba_range_done = false;
5177 : 4 : rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1);
5178 : 4 : CU_ASSERT(rc == 0);
5179 : 4 : poll_threads();
5180 : :
5181 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5182 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges));
5183 : :
5184 : 4 : spdk_put_io_channel(io_ch);
5185 : 4 : spdk_bdev_close(desc);
5186 : 4 : free_bdev(bdev);
5187 : 4 : ut_fini_bdev();
5188 : 4 : }
5189 : :
5190 : : static void
5191 : 12 : bdev_quiesce_done(void *ctx, int status)
5192 : : {
5193 : 12 : g_lock_lba_range_done = true;
5194 : 12 : }
5195 : :
5196 : : static void
5197 : 16 : bdev_unquiesce_done(void *ctx, int status)
5198 : : {
5199 : 16 : g_unlock_lba_range_done = true;
5200 : 16 : }
5201 : :
5202 : : static void
5203 : 4 : bdev_quiesce_done_unquiesce(void *ctx, int status)
5204 : : {
5205 : 4 : struct spdk_bdev *bdev = ctx;
5206 : : int rc;
5207 : :
5208 : 4 : g_lock_lba_range_done = true;
5209 : :
5210 : 4 : rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, NULL);
5211 : 4 : CU_ASSERT(rc == 0);
5212 : 4 : }
5213 : :
5214 : : static void
5215 : 4 : bdev_quiesce(void)
5216 : : {
5217 : : struct spdk_bdev *bdev;
5218 : 4 : struct spdk_bdev_desc *desc = NULL;
5219 : : struct spdk_io_channel *io_ch;
5220 : : struct spdk_bdev_channel *channel;
5221 : : struct lba_range *range;
5222 : : struct spdk_bdev_io *bdev_io;
5223 : 3 : int ctx1;
5224 : : int rc;
5225 : :
5226 : 4 : ut_init_bdev(NULL);
5227 : 4 : bdev = allocate_bdev("bdev0");
5228 : :
5229 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5230 : 4 : CU_ASSERT(rc == 0);
5231 : 4 : CU_ASSERT(desc != NULL);
5232 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5233 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
5234 : 4 : CU_ASSERT(io_ch != NULL);
5235 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
5236 : :
5237 : 4 : g_lock_lba_range_done = false;
5238 : 4 : rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1);
5239 : 4 : CU_ASSERT(rc == 0);
5240 : 4 : poll_threads();
5241 : :
5242 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5243 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5244 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5245 : 4 : CU_ASSERT(range->offset == 0);
5246 : 4 : CU_ASSERT(range->length == bdev->blockcnt);
5247 : 4 : CU_ASSERT(range->owner_ch == NULL);
5248 : 4 : range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges);
5249 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5250 : 4 : CU_ASSERT(range->offset == 0);
5251 : 4 : CU_ASSERT(range->length == bdev->blockcnt);
5252 : 4 : CU_ASSERT(range->owner_ch == NULL);
5253 : :
5254 : 4 : g_unlock_lba_range_done = false;
5255 : 4 : rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1);
5256 : 4 : CU_ASSERT(rc == 0);
5257 : 4 : spdk_delay_us(100);
5258 : 4 : poll_threads();
5259 : :
5260 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5261 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5262 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges));
5263 : :
5264 : 4 : g_lock_lba_range_done = false;
5265 : 4 : rc = spdk_bdev_quiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_quiesce_done, &ctx1);
5266 : 4 : CU_ASSERT(rc == 0);
5267 : 4 : poll_threads();
5268 : :
5269 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5270 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5271 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5272 : 4 : CU_ASSERT(range->offset == 20);
5273 : 4 : CU_ASSERT(range->length == 10);
5274 : 4 : CU_ASSERT(range->owner_ch == NULL);
5275 : 4 : range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges);
5276 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5277 : 4 : CU_ASSERT(range->offset == 20);
5278 : 4 : CU_ASSERT(range->length == 10);
5279 : 4 : CU_ASSERT(range->owner_ch == NULL);
5280 : :
5281 : : /* Unlocks must exactly match a lock. */
5282 : 4 : g_unlock_lba_range_done = false;
5283 : 4 : rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 1, bdev_unquiesce_done, &ctx1);
5284 : 4 : CU_ASSERT(rc == -EINVAL);
5285 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == false);
5286 : :
5287 : 4 : rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_unquiesce_done, &ctx1);
5288 : 4 : CU_ASSERT(rc == 0);
5289 : 4 : spdk_delay_us(100);
5290 : 4 : poll_threads();
5291 : :
5292 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5293 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5294 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges));
5295 : :
5296 : : /* Test unquiesce from quiesce cb */
5297 : 4 : g_lock_lba_range_done = false;
5298 : 4 : g_unlock_lba_range_done = false;
5299 : 4 : rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done_unquiesce, bdev);
5300 : 4 : CU_ASSERT(rc == 0);
5301 : 4 : poll_threads();
5302 : :
5303 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5304 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5305 : :
5306 : : /* Test quiesce with read I/O */
5307 : 4 : g_lock_lba_range_done = false;
5308 : 4 : g_unlock_lba_range_done = false;
5309 : 4 : g_io_done = false;
5310 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1);
5311 : 4 : CU_ASSERT(rc == 0);
5312 : :
5313 : 4 : rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1);
5314 : 4 : CU_ASSERT(rc == 0);
5315 : 4 : poll_threads();
5316 : :
5317 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5318 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == false);
5319 : 4 : range = TAILQ_FIRST(&channel->locked_ranges);
5320 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(range != NULL);
5321 : :
5322 : 4 : stub_complete_io(1);
5323 : 4 : spdk_delay_us(100);
5324 : 4 : poll_threads();
5325 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5326 [ - + ]: 4 : CU_ASSERT(g_lock_lba_range_done == true);
5327 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->io_locked));
5328 : :
5329 : 4 : g_io_done = false;
5330 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1);
5331 : 4 : CU_ASSERT(rc == 0);
5332 : :
5333 : 4 : bdev_io = TAILQ_FIRST(&channel->io_locked);
5334 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
5335 : 4 : CU_ASSERT(bdev_io->u.bdev.offset_blocks == 20);
5336 : 4 : CU_ASSERT(bdev_io->u.bdev.num_blocks == 1);
5337 : :
5338 : 4 : rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1);
5339 : 4 : CU_ASSERT(rc == 0);
5340 : 4 : spdk_delay_us(100);
5341 : 4 : poll_threads();
5342 : :
5343 [ - + ]: 4 : CU_ASSERT(g_unlock_lba_range_done == true);
5344 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
5345 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges));
5346 : :
5347 : 4 : CU_ASSERT(TAILQ_EMPTY(&channel->io_locked));
5348 : 4 : spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
5349 : 4 : poll_threads();
5350 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5351 : :
5352 : 4 : spdk_put_io_channel(io_ch);
5353 : 4 : spdk_bdev_close(desc);
5354 : 4 : free_bdev(bdev);
5355 : 4 : ut_fini_bdev();
5356 : 4 : }
5357 : :
5358 : : static void
5359 : 36 : abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
5360 : : {
5361 : 36 : g_abort_done = true;
5362 : 36 : g_abort_status = bdev_io->internal.status;
5363 : 36 : spdk_bdev_free_io(bdev_io);
5364 : 36 : }
5365 : :
5366 : : static void
5367 : 4 : bdev_io_abort(void)
5368 : : {
5369 : : struct spdk_bdev *bdev;
5370 : 4 : struct spdk_bdev_desc *desc = NULL;
5371 : : struct spdk_io_channel *io_ch;
5372 : : struct spdk_bdev_channel *channel;
5373 : : struct spdk_bdev_mgmt_channel *mgmt_ch;
5374 : 4 : struct spdk_bdev_opts bdev_opts = {};
5375 : 3 : struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2];
5376 : 4 : uint64_t io_ctx1 = 0, io_ctx2 = 0, i;
5377 : : int rc;
5378 : :
5379 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
5380 : 4 : bdev_opts.bdev_io_pool_size = 7;
5381 : 4 : bdev_opts.bdev_io_cache_size = 2;
5382 : 4 : ut_init_bdev(&bdev_opts);
5383 : :
5384 : 4 : bdev = allocate_bdev("bdev0");
5385 : :
5386 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5387 : 4 : CU_ASSERT(rc == 0);
5388 : 4 : CU_ASSERT(desc != NULL);
5389 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5390 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
5391 : 4 : CU_ASSERT(io_ch != NULL);
5392 : 4 : channel = spdk_io_channel_get_ctx(io_ch);
5393 : 4 : mgmt_ch = channel->shared_resource->mgmt_ch;
5394 : :
5395 : 4 : g_abort_done = false;
5396 : :
5397 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false);
5398 : :
5399 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5400 : 4 : CU_ASSERT(rc == -ENOTSUP);
5401 : :
5402 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true);
5403 : :
5404 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL);
5405 : 4 : CU_ASSERT(rc == 0);
5406 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5407 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED);
5408 : :
5409 : : /* Test the case that the target I/O was successfully aborted. */
5410 : 4 : g_io_done = false;
5411 : :
5412 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
5413 : 4 : CU_ASSERT(rc == 0);
5414 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5415 : :
5416 : 4 : g_abort_done = false;
5417 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5418 : :
5419 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5420 : 4 : CU_ASSERT(rc == 0);
5421 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5422 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5423 : 4 : stub_complete_io(1);
5424 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5425 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5426 : :
5427 : : /* Test the case that the target I/O was not aborted because it completed
5428 : : * in the middle of execution of the abort.
5429 : : */
5430 : 4 : g_io_done = false;
5431 : :
5432 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
5433 : 4 : CU_ASSERT(rc == 0);
5434 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5435 : :
5436 : 4 : g_abort_done = false;
5437 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
5438 : :
5439 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5440 : 4 : CU_ASSERT(rc == 0);
5441 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5442 : :
5443 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5444 : 4 : stub_complete_io(1);
5445 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5446 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5447 : :
5448 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
5449 : 4 : stub_complete_io(1);
5450 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5451 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5452 : :
5453 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5454 : :
5455 : 4 : bdev->optimal_io_boundary = 16;
5456 : 4 : bdev->split_on_optimal_io_boundary = true;
5457 : :
5458 : : /* Test that a single-vector command which is split is aborted correctly.
5459 : : * Offset 14, length 8, payload 0xF000
5460 : : * Child - Offset 14, length 2, payload 0xF000
5461 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
5462 : : */
5463 : 4 : g_io_done = false;
5464 : :
5465 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1);
5466 : 4 : CU_ASSERT(rc == 0);
5467 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5468 : :
5469 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5470 : :
5471 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5472 : :
5473 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5474 : 4 : CU_ASSERT(rc == 0);
5475 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5476 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5477 : 4 : stub_complete_io(2);
5478 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5479 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5480 : :
5481 : : /* Test that a multi-vector command that needs to be split by strip and then
5482 : : * needs to be split is aborted correctly. Abort is requested before the second
5483 : : * child I/O was submitted. The parent I/O should complete with failure without
5484 : : * submitting the second child I/O.
5485 : : */
5486 [ + + ]: 260 : for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) {
5487 : 256 : iov[i].iov_base = (void *)((i + 1) * 0x10000);
5488 : 256 : iov[i].iov_len = 512;
5489 : 64 : }
5490 : :
5491 : 4 : bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV;
5492 : 4 : g_io_done = false;
5493 : 4 : rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0,
5494 : : SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1);
5495 : 4 : CU_ASSERT(rc == 0);
5496 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5497 : :
5498 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5499 : :
5500 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5501 : :
5502 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5503 : 4 : CU_ASSERT(rc == 0);
5504 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5505 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5506 : 4 : stub_complete_io(1);
5507 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5508 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5509 : :
5510 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5511 : :
5512 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5513 : :
5514 : 4 : bdev->optimal_io_boundary = 16;
5515 : 4 : g_io_done = false;
5516 : :
5517 : : /* Test that a single-vector command which is split is aborted correctly.
5518 : : * Differently from the above, the child abort request will be submitted
5519 : : * sequentially due to the capacity of spdk_bdev_io.
5520 : : */
5521 : 4 : rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1);
5522 : 4 : CU_ASSERT(rc == 0);
5523 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5524 : :
5525 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
5526 : :
5527 : 4 : g_abort_done = false;
5528 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5529 : :
5530 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5531 : 4 : CU_ASSERT(rc == 0);
5532 : 4 : CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
5533 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
5534 : :
5535 : 4 : stub_complete_io(1);
5536 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5537 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5538 : 4 : stub_complete_io(3);
5539 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5540 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5541 : :
5542 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5543 : :
5544 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5545 : :
5546 : 4 : bdev->split_on_optimal_io_boundary = false;
5547 : 4 : bdev->split_on_write_unit = true;
5548 : 4 : bdev->write_unit_size = 16;
5549 : :
5550 : : /* Test that a single-vector command which is split is aborted correctly.
5551 : : * Offset 16, length 32, payload 0xF000
5552 : : * Child - Offset 16, length 16, payload 0xF000
5553 : : * Child - Offset 32, length 16, payload 0xF000 + 16 * 512
5554 : : *
5555 : : * Use bdev->split_on_write_unit as a split condition.
5556 : : */
5557 : 4 : g_io_done = false;
5558 : :
5559 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 16, 32, io_done, &io_ctx1);
5560 : 4 : CU_ASSERT(rc == 0);
5561 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5562 : :
5563 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5564 : :
5565 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5566 : :
5567 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5568 : 4 : CU_ASSERT(rc == 0);
5569 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5570 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5571 : 4 : stub_complete_io(2);
5572 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5573 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5574 : :
5575 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5576 : :
5577 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5578 : :
5579 : 4 : bdev->split_on_write_unit = false;
5580 : 4 : bdev->max_rw_size = 16;
5581 : :
5582 : : /* Test that a single-vector command which is split is aborted correctly.
5583 : : * Use bdev->max_rw_size as a split condition.
5584 : : */
5585 : 4 : g_io_done = false;
5586 : :
5587 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 32, io_done, &io_ctx1);
5588 : 4 : CU_ASSERT(rc == 0);
5589 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5590 : :
5591 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5592 : :
5593 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5594 : :
5595 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5596 : 4 : CU_ASSERT(rc == 0);
5597 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5598 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5599 : 4 : stub_complete_io(2);
5600 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5601 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5602 : :
5603 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5604 : :
5605 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5606 : :
5607 : 4 : bdev->max_rw_size = 0;
5608 : 4 : bdev->max_segment_size = 512 * 16;
5609 : 4 : bdev->max_num_segments = 1;
5610 : :
5611 : : /* Test that a single-vector command which is split is aborted correctly.
5612 : : * Use bdev->max_segment_size and bdev->max_num_segments together as split conditions.
5613 : : *
5614 : : * One single-vector command is changed to one two-vectors command, but
5615 : : * bdev->max_num_segments is 1 and it is split into two single-vector commands.
5616 : : */
5617 : 4 : g_io_done = false;
5618 : :
5619 : 4 : rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 32, io_done, &io_ctx1);
5620 : 4 : CU_ASSERT(rc == 0);
5621 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5622 : :
5623 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5624 : :
5625 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5626 : :
5627 : 4 : rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
5628 : 4 : CU_ASSERT(rc == 0);
5629 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5630 : 4 : CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
5631 : 4 : stub_complete_io(2);
5632 [ - + ]: 4 : CU_ASSERT(g_abort_done == true);
5633 : 4 : CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
5634 : :
5635 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5636 : :
5637 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5638 : :
5639 : 4 : spdk_put_io_channel(io_ch);
5640 : 4 : spdk_bdev_close(desc);
5641 : 4 : free_bdev(bdev);
5642 : 4 : ut_fini_bdev();
5643 : 4 : }
5644 : :
5645 : : static void
5646 : 4 : bdev_unmap(void)
5647 : : {
5648 : : struct spdk_bdev *bdev;
5649 : 4 : struct spdk_bdev_desc *desc = NULL;
5650 : : struct spdk_io_channel *ioch;
5651 : : struct spdk_bdev_channel *bdev_ch;
5652 : : struct ut_expected_io *expected_io;
5653 : 4 : struct spdk_bdev_opts bdev_opts = {};
5654 : : uint32_t i, num_outstanding;
5655 : : uint64_t offset, num_blocks, max_unmap_blocks, num_children;
5656 : : int rc;
5657 : :
5658 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
5659 : 4 : bdev_opts.bdev_io_pool_size = 512;
5660 : 4 : bdev_opts.bdev_io_cache_size = 64;
5661 : 4 : ut_init_bdev(&bdev_opts);
5662 : :
5663 : 4 : bdev = allocate_bdev("bdev");
5664 : :
5665 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
5666 : 4 : CU_ASSERT_EQUAL(rc, 0);
5667 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5668 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5669 : 4 : ioch = spdk_bdev_get_io_channel(desc);
5670 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
5671 : 4 : bdev_ch = spdk_io_channel_get_ctx(ioch);
5672 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
5673 : :
5674 : 4 : fn_table.submit_request = stub_submit_request;
5675 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5676 : :
5677 : : /* Case 1: First test the request won't be split */
5678 : 4 : num_blocks = 32;
5679 : :
5680 : 4 : g_io_done = false;
5681 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0);
5682 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5683 : 4 : rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5684 : 4 : CU_ASSERT_EQUAL(rc, 0);
5685 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5686 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5687 : 4 : stub_complete_io(1);
5688 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5689 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5690 : :
5691 : : /* Case 2: Test the split with 2 children requests */
5692 : 4 : bdev->max_unmap = 8;
5693 : 4 : bdev->max_unmap_segments = 2;
5694 : 4 : max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments;
5695 : 4 : num_blocks = max_unmap_blocks * 2;
5696 : 4 : offset = 0;
5697 : :
5698 : 4 : g_io_done = false;
5699 [ + + ]: 12 : for (i = 0; i < 2; i++) {
5700 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0);
5701 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5702 : 8 : offset += max_unmap_blocks;
5703 : 2 : }
5704 : :
5705 : 4 : rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5706 : 4 : CU_ASSERT_EQUAL(rc, 0);
5707 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5708 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5709 : 4 : stub_complete_io(2);
5710 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5711 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5712 : :
5713 : : /* Case 3: Test the split with 15 children requests, will finish 8 requests first */
5714 : 4 : num_children = 15;
5715 : 4 : num_blocks = max_unmap_blocks * num_children;
5716 : 4 : g_io_done = false;
5717 : 4 : offset = 0;
5718 [ + + ]: 64 : for (i = 0; i < num_children; i++) {
5719 : 60 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0);
5720 : 60 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5721 : 60 : offset += max_unmap_blocks;
5722 : 15 : }
5723 : :
5724 : 4 : rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5725 : 4 : CU_ASSERT_EQUAL(rc, 0);
5726 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5727 : :
5728 [ + + ]: 12 : while (num_children > 0) {
5729 [ + + ]: 8 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS);
5730 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
5731 : 8 : stub_complete_io(num_outstanding);
5732 : 8 : num_children -= num_outstanding;
5733 : : }
5734 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5735 : :
5736 : 4 : spdk_put_io_channel(ioch);
5737 : 4 : spdk_bdev_close(desc);
5738 : 4 : free_bdev(bdev);
5739 : 4 : ut_fini_bdev();
5740 : 4 : }
5741 : :
5742 : : static void
5743 : 4 : bdev_write_zeroes_split_test(void)
5744 : : {
5745 : : struct spdk_bdev *bdev;
5746 : 4 : struct spdk_bdev_desc *desc = NULL;
5747 : : struct spdk_io_channel *ioch;
5748 : : struct spdk_bdev_channel *bdev_ch;
5749 : : struct ut_expected_io *expected_io;
5750 : 4 : struct spdk_bdev_opts bdev_opts = {};
5751 : : uint32_t i, num_outstanding;
5752 : : uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children;
5753 : : int rc;
5754 : :
5755 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
5756 : 4 : bdev_opts.bdev_io_pool_size = 512;
5757 : 4 : bdev_opts.bdev_io_cache_size = 64;
5758 : 4 : ut_init_bdev(&bdev_opts);
5759 : :
5760 : 4 : bdev = allocate_bdev("bdev");
5761 : :
5762 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
5763 : 4 : CU_ASSERT_EQUAL(rc, 0);
5764 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5765 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5766 : 4 : ioch = spdk_bdev_get_io_channel(desc);
5767 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
5768 : 4 : bdev_ch = spdk_io_channel_get_ctx(ioch);
5769 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
5770 : :
5771 : 4 : fn_table.submit_request = stub_submit_request;
5772 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
5773 : :
5774 : : /* Case 1: First test the request won't be split */
5775 : 4 : num_blocks = 32;
5776 : :
5777 : 4 : g_io_done = false;
5778 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
5779 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5780 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5781 : 4 : CU_ASSERT_EQUAL(rc, 0);
5782 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5783 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5784 : 4 : stub_complete_io(1);
5785 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5786 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5787 : :
5788 : : /* Case 2: Test the split with 2 children requests */
5789 : 4 : max_write_zeroes_blocks = 8;
5790 : 4 : bdev->max_write_zeroes = max_write_zeroes_blocks;
5791 : 4 : num_blocks = max_write_zeroes_blocks * 2;
5792 : 4 : offset = 0;
5793 : :
5794 : 4 : g_io_done = false;
5795 [ + + ]: 12 : for (i = 0; i < 2; i++) {
5796 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks,
5797 : : 0);
5798 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5799 : 8 : offset += max_write_zeroes_blocks;
5800 : 2 : }
5801 : :
5802 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5803 : 4 : CU_ASSERT_EQUAL(rc, 0);
5804 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5805 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
5806 : 4 : stub_complete_io(2);
5807 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5808 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
5809 : :
5810 : : /* Case 3: Test the split with 15 children requests, will finish 8 requests first */
5811 : 4 : num_children = 15;
5812 : 4 : num_blocks = max_write_zeroes_blocks * num_children;
5813 : 4 : g_io_done = false;
5814 : 4 : offset = 0;
5815 [ + + ]: 64 : for (i = 0; i < num_children; i++) {
5816 : 60 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks,
5817 : : 0);
5818 : 60 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5819 : 60 : offset += max_write_zeroes_blocks;
5820 : 15 : }
5821 : :
5822 : 4 : rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
5823 : 4 : CU_ASSERT_EQUAL(rc, 0);
5824 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
5825 : :
5826 [ + + ]: 12 : while (num_children > 0) {
5827 [ + + ]: 8 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS);
5828 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
5829 : 8 : stub_complete_io(num_outstanding);
5830 : 8 : num_children -= num_outstanding;
5831 : : }
5832 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
5833 : :
5834 : 4 : spdk_put_io_channel(ioch);
5835 : 4 : spdk_bdev_close(desc);
5836 : 4 : free_bdev(bdev);
5837 : 4 : ut_fini_bdev();
5838 : 4 : }
5839 : :
5840 : : static void
5841 : 4 : bdev_set_options_test(void)
5842 : : {
5843 : 4 : struct spdk_bdev_opts bdev_opts = {};
5844 : : int rc;
5845 : :
5846 : : /* Case1: Do not set opts_size */
5847 : 4 : rc = spdk_bdev_set_opts(&bdev_opts);
5848 : 4 : CU_ASSERT(rc == -1);
5849 : 4 : }
5850 : :
5851 : : static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d;
5852 : :
5853 : : static int
5854 : 12 : test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains,
5855 : : int array_size)
5856 : : {
5857 [ + + + + ]: 12 : if (array_size > 0 && domains) {
5858 : 4 : domains[0] = g_bdev_memory_domain;
5859 : 1 : }
5860 : :
5861 : 12 : return 1;
5862 : : }
5863 : :
5864 : : static void
5865 : 4 : bdev_get_memory_domains(void)
5866 : : {
5867 : 4 : struct spdk_bdev_fn_table fn_table = {
5868 : : .get_memory_domains = test_bdev_get_supported_dma_device_types_op
5869 : : };
5870 : 4 : struct spdk_bdev bdev = { .fn_table = &fn_table };
5871 : 4 : struct spdk_memory_domain *domains[2] = {};
5872 : : int rc;
5873 : :
5874 : : /* bdev is NULL */
5875 : 4 : rc = spdk_bdev_get_memory_domains(NULL, domains, 2);
5876 : 4 : CU_ASSERT(rc == -EINVAL);
5877 : :
5878 : : /* domains is NULL */
5879 : 4 : rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2);
5880 : 4 : CU_ASSERT(rc == 1);
5881 : :
5882 : : /* array size is 0 */
5883 : 4 : rc = spdk_bdev_get_memory_domains(&bdev, domains, 0);
5884 : 4 : CU_ASSERT(rc == 1);
5885 : :
5886 : : /* get_supported_dma_device_types op is set */
5887 : 4 : rc = spdk_bdev_get_memory_domains(&bdev, domains, 2);
5888 : 4 : CU_ASSERT(rc == 1);
5889 : 4 : CU_ASSERT(domains[0] == g_bdev_memory_domain);
5890 : :
5891 : : /* get_supported_dma_device_types op is not set */
5892 : 4 : fn_table.get_memory_domains = NULL;
5893 : 4 : rc = spdk_bdev_get_memory_domains(&bdev, domains, 2);
5894 : 4 : CU_ASSERT(rc == 0);
5895 : 4 : }
5896 : :
5897 : : static void
5898 : 8 : _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts)
5899 : : {
5900 : : struct spdk_bdev *bdev;
5901 : 8 : struct spdk_bdev_desc *desc = NULL;
5902 : : struct spdk_io_channel *io_ch;
5903 : 6 : char io_buf[512];
5904 : 8 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
5905 : : struct ut_expected_io *expected_io;
5906 : : int rc;
5907 : :
5908 : 8 : ut_init_bdev(NULL);
5909 : :
5910 : 8 : bdev = allocate_bdev("bdev0");
5911 : 8 : bdev->md_interleave = false;
5912 : 8 : bdev->md_len = 8;
5913 : :
5914 : 8 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
5915 : 8 : CU_ASSERT(rc == 0);
5916 [ + + ]: 8 : SPDK_CU_ASSERT_FATAL(desc != NULL);
5917 : 8 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
5918 : 8 : io_ch = spdk_bdev_get_io_channel(desc);
5919 : 8 : CU_ASSERT(io_ch != NULL);
5920 : :
5921 : : /* read */
5922 : 8 : g_io_done = false;
5923 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
5924 [ + + ]: 8 : if (ext_io_opts) {
5925 : 4 : expected_io->md_buf = ext_io_opts->metadata;
5926 : 1 : }
5927 : 8 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
5928 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5929 : :
5930 : 8 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts);
5931 : :
5932 : 8 : CU_ASSERT(rc == 0);
5933 [ - + ]: 8 : CU_ASSERT(g_io_done == false);
5934 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5935 : 8 : stub_complete_io(1);
5936 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
5937 : :
5938 : : /* write */
5939 : 8 : g_io_done = false;
5940 : 8 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
5941 [ + + ]: 8 : if (ext_io_opts) {
5942 : 4 : expected_io->md_buf = ext_io_opts->metadata;
5943 : 1 : }
5944 : 8 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
5945 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
5946 : :
5947 : 8 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts);
5948 : :
5949 : 8 : CU_ASSERT(rc == 0);
5950 [ - + ]: 8 : CU_ASSERT(g_io_done == false);
5951 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
5952 : 8 : stub_complete_io(1);
5953 [ - + ]: 8 : CU_ASSERT(g_io_done == true);
5954 : :
5955 : 8 : spdk_put_io_channel(io_ch);
5956 : 8 : spdk_bdev_close(desc);
5957 : 8 : free_bdev(bdev);
5958 : 8 : ut_fini_bdev();
5959 : :
5960 : 8 : }
5961 : :
5962 : : static void
5963 : 4 : bdev_io_ext(void)
5964 : : {
5965 : 4 : struct spdk_bdev_ext_io_opts ext_io_opts = {
5966 : : .metadata = (void *)0xFF000000,
5967 : : .size = sizeof(ext_io_opts),
5968 : : .dif_check_flags_exclude_mask = 0
5969 : : };
5970 : :
5971 : 4 : _bdev_io_ext(&ext_io_opts);
5972 : 4 : }
5973 : :
5974 : : static void
5975 : 4 : bdev_io_ext_no_opts(void)
5976 : : {
5977 : 4 : _bdev_io_ext(NULL);
5978 : 4 : }
5979 : :
5980 : : static void
5981 : 4 : bdev_io_ext_invalid_opts(void)
5982 : : {
5983 : : struct spdk_bdev *bdev;
5984 : 4 : struct spdk_bdev_desc *desc = NULL;
5985 : : struct spdk_io_channel *io_ch;
5986 : 3 : char io_buf[512];
5987 : 4 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
5988 : 4 : struct spdk_bdev_ext_io_opts ext_io_opts = {
5989 : : .metadata = (void *)0xFF000000,
5990 : : .size = sizeof(ext_io_opts),
5991 : : .dif_check_flags_exclude_mask = 0
5992 : : };
5993 : : int rc;
5994 : :
5995 : 4 : ut_init_bdev(NULL);
5996 : :
5997 : 4 : bdev = allocate_bdev("bdev0");
5998 : 4 : bdev->md_interleave = false;
5999 : 4 : bdev->md_len = 8;
6000 : :
6001 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6002 : 4 : CU_ASSERT(rc == 0);
6003 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6004 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6005 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
6006 : 4 : CU_ASSERT(io_ch != NULL);
6007 : :
6008 : : /* Test invalid ext_opts size */
6009 : 4 : ext_io_opts.size = 0;
6010 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6011 : 4 : CU_ASSERT(rc == -EINVAL);
6012 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6013 : 4 : CU_ASSERT(rc == -EINVAL);
6014 : :
6015 : 4 : ext_io_opts.size = sizeof(ext_io_opts) * 2;
6016 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6017 : 4 : CU_ASSERT(rc == -EINVAL);
6018 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6019 : 4 : CU_ASSERT(rc == -EINVAL);
6020 : :
6021 : 4 : ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) +
6022 : : sizeof(ext_io_opts.metadata) - 1;
6023 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6024 : 4 : CU_ASSERT(rc == -EINVAL);
6025 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6026 : 4 : CU_ASSERT(rc == -EINVAL);
6027 : :
6028 : 4 : spdk_put_io_channel(io_ch);
6029 : 4 : spdk_bdev_close(desc);
6030 : 4 : free_bdev(bdev);
6031 : 4 : ut_fini_bdev();
6032 : 4 : }
6033 : :
6034 : : static void
6035 : 4 : bdev_io_ext_split(void)
6036 : : {
6037 : : struct spdk_bdev *bdev;
6038 : 4 : struct spdk_bdev_desc *desc = NULL;
6039 : : struct spdk_io_channel *io_ch;
6040 : 3 : char io_buf[512];
6041 : 4 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
6042 : : struct ut_expected_io *expected_io;
6043 : 4 : struct spdk_bdev_ext_io_opts ext_io_opts = {
6044 : : .metadata = (void *)0xFF000000,
6045 : : .size = sizeof(ext_io_opts),
6046 : : .dif_check_flags_exclude_mask = 0
6047 : : };
6048 : : int rc;
6049 : :
6050 : 4 : ut_init_bdev(NULL);
6051 : :
6052 : 4 : bdev = allocate_bdev("bdev0");
6053 : 4 : bdev->md_interleave = false;
6054 : 4 : bdev->md_len = 8;
6055 : :
6056 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6057 : 4 : CU_ASSERT(rc == 0);
6058 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6059 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6060 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
6061 : 4 : CU_ASSERT(io_ch != NULL);
6062 : :
6063 : : /* Check that IO request with ext_opts and metadata is split correctly
6064 : : * Offset 14, length 8, payload 0xF000
6065 : : * Child - Offset 14, length 2, payload 0xF000
6066 : : * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
6067 : : */
6068 : 4 : bdev->optimal_io_boundary = 16;
6069 : 4 : bdev->split_on_optimal_io_boundary = true;
6070 : 4 : bdev->md_interleave = false;
6071 : 4 : bdev->md_len = 8;
6072 : :
6073 : 4 : iov.iov_base = (void *)0xF000;
6074 : 4 : iov.iov_len = 4096;
6075 [ - + ]: 4 : memset(&ext_io_opts, 0, sizeof(ext_io_opts));
6076 : 4 : ext_io_opts.metadata = (void *)0xFF000000;
6077 : 4 : ext_io_opts.size = sizeof(ext_io_opts);
6078 : 4 : g_io_done = false;
6079 : :
6080 : : /* read */
6081 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
6082 : 4 : expected_io->md_buf = ext_io_opts.metadata;
6083 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
6084 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6085 : :
6086 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
6087 : 4 : expected_io->md_buf = ext_io_opts.metadata + 2 * 8;
6088 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
6089 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6090 : :
6091 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts);
6092 : 4 : CU_ASSERT(rc == 0);
6093 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6094 : :
6095 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
6096 : 4 : stub_complete_io(2);
6097 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6098 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6099 : :
6100 : : /* write */
6101 : 4 : g_io_done = false;
6102 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1);
6103 : 4 : expected_io->md_buf = ext_io_opts.metadata;
6104 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
6105 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6106 : :
6107 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1);
6108 : 4 : expected_io->md_buf = ext_io_opts.metadata + 2 * 8;
6109 : 4 : ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
6110 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6111 : :
6112 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts);
6113 : 4 : CU_ASSERT(rc == 0);
6114 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6115 : :
6116 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
6117 : 4 : stub_complete_io(2);
6118 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6119 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6120 : :
6121 : 4 : spdk_put_io_channel(io_ch);
6122 : 4 : spdk_bdev_close(desc);
6123 : 4 : free_bdev(bdev);
6124 : 4 : ut_fini_bdev();
6125 : 4 : }
6126 : :
6127 : : static void
6128 : 4 : bdev_io_ext_bounce_buffer(void)
6129 : : {
6130 : : struct spdk_bdev *bdev;
6131 : 4 : struct spdk_bdev_desc *desc = NULL;
6132 : : struct spdk_io_channel *io_ch;
6133 : 3 : char io_buf[512];
6134 : 4 : struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
6135 : : struct ut_expected_io *expected_io, *aux_io;
6136 : 4 : struct spdk_bdev_ext_io_opts ext_io_opts = {
6137 : : .metadata = (void *)0xFF000000,
6138 : : .size = sizeof(ext_io_opts),
6139 : : .dif_check_flags_exclude_mask = 0
6140 : : };
6141 : : int rc;
6142 : :
6143 : 4 : ut_init_bdev(NULL);
6144 : :
6145 : 4 : bdev = allocate_bdev("bdev0");
6146 : 4 : bdev->md_interleave = false;
6147 : 4 : bdev->md_len = 8;
6148 : :
6149 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6150 : 4 : CU_ASSERT(rc == 0);
6151 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6152 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6153 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
6154 : 4 : CU_ASSERT(io_ch != NULL);
6155 : :
6156 : : /* Verify data pull/push
6157 : : * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */
6158 : 4 : ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef;
6159 : :
6160 : : /* read */
6161 : 4 : g_io_done = false;
6162 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
6163 : 4 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6164 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6165 : :
6166 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6167 : :
6168 : 4 : CU_ASSERT(rc == 0);
6169 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6170 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6171 : 4 : stub_complete_io(1);
6172 [ - + ]: 4 : CU_ASSERT(g_memory_domain_push_data_called == true);
6173 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6174 : :
6175 : : /* write */
6176 : 4 : g_io_done = false;
6177 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6178 : 4 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6179 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6180 : :
6181 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6182 : :
6183 : 4 : CU_ASSERT(rc == 0);
6184 [ - + ]: 4 : CU_ASSERT(g_memory_domain_pull_data_called == true);
6185 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6186 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6187 : 4 : stub_complete_io(1);
6188 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6189 : :
6190 : : /* Verify the request is queued after receiving ENOMEM from pull */
6191 : 4 : g_io_done = false;
6192 : 4 : aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6193 : 4 : ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len);
6194 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link);
6195 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL);
6196 : 4 : CU_ASSERT(rc == 0);
6197 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6198 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6199 : :
6200 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6201 : 4 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6202 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6203 : :
6204 : 4 : MOCK_SET(spdk_memory_domain_pull_data, -ENOMEM);
6205 : 4 : rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6206 : 4 : CU_ASSERT(rc == 0);
6207 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6208 : : /* The second IO has been queued */
6209 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6210 : :
6211 [ - + - + ]: 4 : MOCK_CLEAR(spdk_memory_domain_pull_data);
6212 : 4 : g_memory_domain_pull_data_called = false;
6213 : 4 : stub_complete_io(1);
6214 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6215 [ - + ]: 4 : CU_ASSERT(g_memory_domain_pull_data_called == true);
6216 : : /* The second IO should be submitted now */
6217 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6218 : 4 : g_io_done = false;
6219 : 4 : stub_complete_io(1);
6220 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6221 : :
6222 : : /* Verify the request is queued after receiving ENOMEM from push */
6223 : 4 : g_io_done = false;
6224 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
6225 : 4 : ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
6226 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6227 : :
6228 : 4 : MOCK_SET(spdk_memory_domain_push_data, -ENOMEM);
6229 : 4 : rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
6230 : 4 : CU_ASSERT(rc == 0);
6231 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6232 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6233 : :
6234 : 4 : aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
6235 : 4 : ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len);
6236 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link);
6237 : 4 : rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL);
6238 : 4 : CU_ASSERT(rc == 0);
6239 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
6240 : :
6241 : 4 : stub_complete_io(1);
6242 : : /* The IO isn't done yet, it's still waiting on push */
6243 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6244 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6245 [ - + - + ]: 4 : MOCK_CLEAR(spdk_memory_domain_push_data);
6246 : 4 : g_memory_domain_push_data_called = false;
6247 : : /* Completing the second IO should also trigger push on the first one */
6248 : 4 : stub_complete_io(1);
6249 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6250 [ - + ]: 4 : CU_ASSERT(g_memory_domain_push_data_called == true);
6251 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6252 : :
6253 : 4 : spdk_put_io_channel(io_ch);
6254 : 4 : spdk_bdev_close(desc);
6255 : 4 : free_bdev(bdev);
6256 : 4 : ut_fini_bdev();
6257 : 4 : }
6258 : :
6259 : : static void
6260 : 4 : bdev_register_uuid_alias(void)
6261 : : {
6262 : : struct spdk_bdev *bdev, *second;
6263 : 3 : char uuid[SPDK_UUID_STRING_LEN];
6264 : : int rc;
6265 : :
6266 : 4 : ut_init_bdev(NULL);
6267 : 4 : bdev = allocate_bdev("bdev0");
6268 : :
6269 : : /* Make sure an UUID was generated */
6270 : 4 : CU_ASSERT_FALSE(spdk_uuid_is_null(&bdev->uuid));
6271 : :
6272 : : /* Check that an UUID alias was registered */
6273 : 4 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
6274 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6275 : :
6276 : : /* Unregister the bdev */
6277 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
6278 : 4 : poll_threads();
6279 : 4 : CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid));
6280 : :
6281 : : /* Check the same, but this time register the bdev with non-zero UUID */
6282 : 4 : rc = spdk_bdev_register(bdev);
6283 : 4 : CU_ASSERT_EQUAL(rc, 0);
6284 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6285 : :
6286 : : /* Unregister the bdev */
6287 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
6288 : 4 : poll_threads();
6289 : 4 : CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid));
6290 : :
6291 : : /* Register the bdev using UUID as the name */
6292 : 4 : bdev->name = uuid;
6293 : 4 : rc = spdk_bdev_register(bdev);
6294 : 4 : CU_ASSERT_EQUAL(rc, 0);
6295 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6296 : :
6297 : : /* Unregister the bdev */
6298 : 4 : spdk_bdev_unregister(bdev, NULL, NULL);
6299 : 4 : poll_threads();
6300 : 4 : CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid));
6301 : :
6302 : : /* Check that it's not possible to register two bdevs with the same UUIDs */
6303 : 4 : bdev->name = "bdev0";
6304 : 4 : second = allocate_bdev("bdev1");
6305 : 4 : spdk_uuid_copy(&bdev->uuid, &second->uuid);
6306 : 4 : rc = spdk_bdev_register(bdev);
6307 : 4 : CU_ASSERT_EQUAL(rc, -EEXIST);
6308 : :
6309 : : /* Regenerate the UUID and re-check */
6310 : 4 : spdk_uuid_generate(&bdev->uuid);
6311 : 4 : rc = spdk_bdev_register(bdev);
6312 : 4 : CU_ASSERT_EQUAL(rc, 0);
6313 : :
6314 : : /* And check that both bdevs can be retrieved through their UUIDs */
6315 : 4 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid);
6316 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev);
6317 : 4 : spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid);
6318 : 4 : CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second);
6319 : :
6320 : 4 : free_bdev(second);
6321 : 4 : free_bdev(bdev);
6322 : 4 : ut_fini_bdev();
6323 : 4 : }
6324 : :
6325 : : static void
6326 : 4 : bdev_unregister_by_name(void)
6327 : : {
6328 : : struct spdk_bdev *bdev;
6329 : : int rc;
6330 : :
6331 : 4 : bdev = allocate_bdev("bdev");
6332 : :
6333 : 4 : g_event_type1 = 0xFF;
6334 : 4 : g_unregister_arg = NULL;
6335 : 4 : g_unregister_rc = -1;
6336 : :
6337 : 4 : rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678);
6338 : 4 : CU_ASSERT(rc == -ENODEV);
6339 : :
6340 : 4 : rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678);
6341 : 4 : CU_ASSERT(rc == -ENODEV);
6342 : :
6343 : 4 : rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678);
6344 : 4 : CU_ASSERT(rc == 0);
6345 : :
6346 : : /* Check that unregister callback is delayed */
6347 : 4 : CU_ASSERT(g_unregister_arg == NULL);
6348 : 4 : CU_ASSERT(g_unregister_rc == -1);
6349 : :
6350 : 4 : poll_threads();
6351 : :
6352 : : /* Event callback shall not be issued because device was closed */
6353 : 4 : CU_ASSERT(g_event_type1 == 0xFF);
6354 : : /* Unregister callback is issued */
6355 : 4 : CU_ASSERT(g_unregister_arg == (void *)0x12345678);
6356 : 4 : CU_ASSERT(g_unregister_rc == 0);
6357 : :
6358 : 4 : free_bdev(bdev);
6359 : 4 : }
6360 : :
6361 : : static int
6362 : 44 : count_bdevs(void *ctx, struct spdk_bdev *bdev)
6363 : : {
6364 : 44 : int *count = ctx;
6365 : :
6366 : 44 : (*count)++;
6367 : :
6368 : 44 : return 0;
6369 : : }
6370 : :
6371 : : static void
6372 : 4 : for_each_bdev_test(void)
6373 : : {
6374 : : struct spdk_bdev *bdev[8];
6375 : 3 : int rc, count;
6376 : :
6377 : 4 : bdev[0] = allocate_bdev("bdev0");
6378 : 4 : bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING;
6379 : :
6380 : 4 : bdev[1] = allocate_bdev("bdev1");
6381 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
6382 : 4 : CU_ASSERT(rc == 0);
6383 : :
6384 : 4 : bdev[2] = allocate_bdev("bdev2");
6385 : :
6386 : 4 : bdev[3] = allocate_bdev("bdev3");
6387 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
6388 : 4 : CU_ASSERT(rc == 0);
6389 : :
6390 : 4 : bdev[4] = allocate_bdev("bdev4");
6391 : :
6392 : 4 : bdev[5] = allocate_bdev("bdev5");
6393 : 4 : rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
6394 : 4 : CU_ASSERT(rc == 0);
6395 : :
6396 : 4 : bdev[6] = allocate_bdev("bdev6");
6397 : :
6398 : 4 : bdev[7] = allocate_bdev("bdev7");
6399 : :
6400 : 4 : count = 0;
6401 : 4 : rc = spdk_for_each_bdev(&count, count_bdevs);
6402 : 4 : CU_ASSERT(rc == 0);
6403 : 4 : CU_ASSERT(count == 7);
6404 : :
6405 : 4 : count = 0;
6406 : 4 : rc = spdk_for_each_bdev_leaf(&count, count_bdevs);
6407 : 4 : CU_ASSERT(rc == 0);
6408 : 4 : CU_ASSERT(count == 4);
6409 : :
6410 : 4 : bdev[0]->internal.status = SPDK_BDEV_STATUS_READY;
6411 : 4 : free_bdev(bdev[0]);
6412 : 4 : free_bdev(bdev[1]);
6413 : 4 : free_bdev(bdev[2]);
6414 : 4 : free_bdev(bdev[3]);
6415 : 4 : free_bdev(bdev[4]);
6416 : 4 : free_bdev(bdev[5]);
6417 : 4 : free_bdev(bdev[6]);
6418 : 4 : free_bdev(bdev[7]);
6419 : 4 : }
6420 : :
6421 : : static void
6422 : 4 : bdev_seek_test(void)
6423 : : {
6424 : : struct spdk_bdev *bdev;
6425 : 4 : struct spdk_bdev_desc *desc = NULL;
6426 : : struct spdk_io_channel *io_ch;
6427 : : int rc;
6428 : :
6429 : 4 : ut_init_bdev(NULL);
6430 : 4 : poll_threads();
6431 : :
6432 : 4 : bdev = allocate_bdev("bdev0");
6433 : :
6434 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6435 : 4 : CU_ASSERT(rc == 0);
6436 : 4 : poll_threads();
6437 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6438 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6439 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
6440 : 4 : CU_ASSERT(io_ch != NULL);
6441 : :
6442 : : /* Seek data not supported */
6443 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false);
6444 : 4 : rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL);
6445 : 4 : CU_ASSERT(rc == 0);
6446 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6447 : 4 : poll_threads();
6448 : 4 : CU_ASSERT(g_seek_offset == 0);
6449 : :
6450 : : /* Seek hole not supported */
6451 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false);
6452 : 4 : rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL);
6453 : 4 : CU_ASSERT(rc == 0);
6454 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6455 : 4 : poll_threads();
6456 : 4 : CU_ASSERT(g_seek_offset == UINT64_MAX);
6457 : :
6458 : : /* Seek data supported */
6459 : 4 : g_seek_data_offset = 12345;
6460 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true);
6461 : 4 : rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL);
6462 : 4 : CU_ASSERT(rc == 0);
6463 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6464 : 4 : stub_complete_io(1);
6465 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6466 : 4 : CU_ASSERT(g_seek_offset == 12345);
6467 : :
6468 : : /* Seek hole supported */
6469 : 4 : g_seek_hole_offset = 67890;
6470 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true);
6471 : 4 : rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL);
6472 : 4 : CU_ASSERT(rc == 0);
6473 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6474 : 4 : stub_complete_io(1);
6475 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6476 : 4 : CU_ASSERT(g_seek_offset == 67890);
6477 : :
6478 : 4 : spdk_put_io_channel(io_ch);
6479 : 4 : spdk_bdev_close(desc);
6480 : 4 : free_bdev(bdev);
6481 : 4 : ut_fini_bdev();
6482 : 4 : }
6483 : :
6484 : : static void
6485 : 4 : bdev_copy(void)
6486 : : {
6487 : : struct spdk_bdev *bdev;
6488 : 4 : struct spdk_bdev_desc *desc = NULL;
6489 : : struct spdk_io_channel *ioch;
6490 : : struct ut_expected_io *expected_io;
6491 : : uint64_t src_offset, num_blocks;
6492 : : uint32_t num_completed;
6493 : : int rc;
6494 : :
6495 : 4 : ut_init_bdev(NULL);
6496 : 4 : bdev = allocate_bdev("bdev");
6497 : :
6498 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
6499 : 4 : CU_ASSERT_EQUAL(rc, 0);
6500 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6501 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6502 : 4 : ioch = spdk_bdev_get_io_channel(desc);
6503 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
6504 : :
6505 : 4 : fn_table.submit_request = stub_submit_request;
6506 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
6507 : :
6508 : : /* First test that if the bdev supports copy, the request won't be split */
6509 : 4 : bdev->md_len = 0;
6510 : 4 : bdev->blocklen = 512;
6511 : 4 : num_blocks = 128;
6512 : 4 : src_offset = bdev->blockcnt - num_blocks;
6513 : :
6514 : 4 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks);
6515 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6516 : :
6517 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6518 : 4 : CU_ASSERT_EQUAL(rc, 0);
6519 : 4 : num_completed = stub_complete_io(1);
6520 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
6521 : :
6522 : : /* Check that if copy is not supported it'll still work */
6523 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0);
6524 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6525 : 4 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0);
6526 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6527 : :
6528 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false);
6529 : :
6530 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6531 : 4 : CU_ASSERT_EQUAL(rc, 0);
6532 : 4 : num_completed = stub_complete_io(1);
6533 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
6534 : 4 : num_completed = stub_complete_io(1);
6535 : 4 : CU_ASSERT_EQUAL(num_completed, 1);
6536 : :
6537 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true);
6538 : 4 : spdk_put_io_channel(ioch);
6539 : 4 : spdk_bdev_close(desc);
6540 : 4 : free_bdev(bdev);
6541 : 4 : ut_fini_bdev();
6542 : 4 : }
6543 : :
6544 : : static void
6545 : 4 : bdev_copy_split_test(void)
6546 : : {
6547 : : struct spdk_bdev *bdev;
6548 : 4 : struct spdk_bdev_desc *desc = NULL;
6549 : : struct spdk_io_channel *ioch;
6550 : : struct spdk_bdev_channel *bdev_ch;
6551 : : struct ut_expected_io *expected_io;
6552 : 4 : struct spdk_bdev_opts bdev_opts = {};
6553 : : uint32_t i, num_outstanding;
6554 : : uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children;
6555 : : int rc;
6556 : :
6557 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
6558 : 4 : bdev_opts.bdev_io_pool_size = 512;
6559 : 4 : bdev_opts.bdev_io_cache_size = 64;
6560 : 4 : rc = spdk_bdev_set_opts(&bdev_opts);
6561 : 4 : CU_ASSERT(rc == 0);
6562 : :
6563 : 4 : ut_init_bdev(NULL);
6564 : 4 : bdev = allocate_bdev("bdev");
6565 : :
6566 : 4 : rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc);
6567 : 4 : CU_ASSERT_EQUAL(rc, 0);
6568 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6569 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
6570 : 4 : ioch = spdk_bdev_get_io_channel(desc);
6571 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(ioch != NULL);
6572 : 4 : bdev_ch = spdk_io_channel_get_ctx(ioch);
6573 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
6574 : :
6575 : 4 : fn_table.submit_request = stub_submit_request;
6576 : 4 : g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
6577 : :
6578 : : /* Case 1: First test the request won't be split */
6579 : 4 : num_blocks = 32;
6580 : 4 : src_offset = bdev->blockcnt - num_blocks;
6581 : :
6582 : 4 : g_io_done = false;
6583 : 4 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks);
6584 : 4 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6585 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6586 : 4 : CU_ASSERT_EQUAL(rc, 0);
6587 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6588 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
6589 : 4 : stub_complete_io(1);
6590 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6591 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6592 : :
6593 : : /* Case 2: Test the split with 2 children requests */
6594 : 4 : max_copy_blocks = 8;
6595 : 4 : bdev->max_copy = max_copy_blocks;
6596 : 4 : num_children = 2;
6597 : 4 : num_blocks = max_copy_blocks * num_children;
6598 : 4 : offset = 0;
6599 : 4 : src_offset = bdev->blockcnt - num_blocks;
6600 : :
6601 : 4 : g_io_done = false;
6602 [ + + ]: 12 : for (i = 0; i < num_children; i++) {
6603 : 10 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset,
6604 : 2 : src_offset + offset, max_copy_blocks);
6605 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6606 : 8 : offset += max_copy_blocks;
6607 : 2 : }
6608 : :
6609 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6610 : 4 : CU_ASSERT_EQUAL(rc, 0);
6611 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6612 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children);
6613 : 4 : stub_complete_io(num_children);
6614 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6615 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
6616 : :
6617 : : /* Case 3: Test the split with 15 children requests, will finish 8 requests first */
6618 : 4 : num_children = 15;
6619 : 4 : num_blocks = max_copy_blocks * num_children;
6620 : 4 : offset = 0;
6621 : 4 : src_offset = bdev->blockcnt - num_blocks;
6622 : :
6623 : 4 : g_io_done = false;
6624 [ + + ]: 64 : for (i = 0; i < num_children; i++) {
6625 : 75 : expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset,
6626 : 15 : src_offset + offset, max_copy_blocks);
6627 : 60 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6628 : 60 : offset += max_copy_blocks;
6629 : 15 : }
6630 : :
6631 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
6632 : 4 : CU_ASSERT_EQUAL(rc, 0);
6633 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6634 : :
6635 [ + + ]: 12 : while (num_children > 0) {
6636 [ + + ]: 8 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS);
6637 : 8 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
6638 : 8 : stub_complete_io(num_outstanding);
6639 : 8 : num_children -= num_outstanding;
6640 : : }
6641 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6642 : :
6643 : : /* Case 4: Same test scenario as the case 2 but the configuration is different.
6644 : : * Copy is not supported.
6645 : : */
6646 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false);
6647 : :
6648 : 4 : num_children = 2;
6649 : 4 : max_copy_blocks = spdk_bdev_get_max_copy(bdev);
6650 : 4 : num_blocks = max_copy_blocks * num_children;
6651 : 4 : src_offset = bdev->blockcnt - num_blocks;
6652 : 4 : offset = 0;
6653 : :
6654 : 4 : g_io_done = false;
6655 [ + + ]: 12 : for (i = 0; i < num_children; i++) {
6656 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset,
6657 : 2 : max_copy_blocks, 0);
6658 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6659 : 8 : src_offset += max_copy_blocks;
6660 : 2 : }
6661 [ + + ]: 12 : for (i = 0; i < num_children; i++) {
6662 : 10 : expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset,
6663 : 2 : max_copy_blocks, 0);
6664 : 8 : TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
6665 : 8 : offset += max_copy_blocks;
6666 : 2 : }
6667 : :
6668 : 4 : src_offset = bdev->blockcnt - num_blocks;
6669 : 4 : offset = 0;
6670 : :
6671 : 4 : rc = spdk_bdev_copy_blocks(desc, ioch, offset, src_offset, num_blocks, io_done, NULL);
6672 : 4 : CU_ASSERT_EQUAL(rc, 0);
6673 [ - + ]: 4 : CU_ASSERT(g_io_done == false);
6674 : :
6675 [ + + ]: 8 : while (num_children > 0) {
6676 [ + - ]: 4 : num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS);
6677 : :
6678 : : /* One copy request is split into one read and one write requests. */
6679 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
6680 : 4 : stub_complete_io(num_outstanding);
6681 : 4 : CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
6682 : 4 : stub_complete_io(num_outstanding);
6683 : :
6684 : 4 : num_children -= num_outstanding;
6685 : : }
6686 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
6687 : :
6688 : 4 : ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true);
6689 : :
6690 : 4 : spdk_put_io_channel(ioch);
6691 : 4 : spdk_bdev_close(desc);
6692 : 4 : free_bdev(bdev);
6693 : 4 : ut_fini_bdev();
6694 : 4 : }
6695 : :
6696 : : static void
6697 : 4 : examine_claim_v1(struct spdk_bdev *bdev)
6698 : : {
6699 : : int rc;
6700 : :
6701 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if);
6702 : 4 : CU_ASSERT(rc == 0);
6703 : 4 : }
6704 : :
6705 : : static void
6706 : 16 : examine_no_lock_held(struct spdk_bdev *bdev)
6707 : : {
6708 : 16 : CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock));
6709 : 16 : CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock));
6710 : 16 : }
6711 : :
6712 : : struct examine_claim_v2_ctx {
6713 : : struct ut_examine_ctx examine_ctx;
6714 : : enum spdk_bdev_claim_type claim_type;
6715 : : struct spdk_bdev_desc *desc;
6716 : : };
6717 : :
6718 : : static void
6719 : 4 : examine_claim_v2(struct spdk_bdev *bdev)
6720 : : {
6721 : 4 : struct examine_claim_v2_ctx *ctx = bdev->ctxt;
6722 : : int rc;
6723 : :
6724 : 4 : rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc);
6725 : 4 : CU_ASSERT(rc == 0);
6726 : :
6727 : 4 : rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if);
6728 : 4 : CU_ASSERT(rc == 0);
6729 : 4 : }
6730 : :
6731 : : static void
6732 : 4 : examine_locks(void)
6733 : : {
6734 : : struct spdk_bdev *bdev;
6735 : 4 : struct ut_examine_ctx ctx = { 0 };
6736 : 3 : struct examine_claim_v2_ctx v2_ctx;
6737 : :
6738 : : /* Without any claims, one code path is taken */
6739 : 4 : ctx.examine_config = examine_no_lock_held;
6740 : 4 : ctx.examine_disk = examine_no_lock_held;
6741 : 4 : bdev = allocate_bdev_ctx("bdev0", &ctx);
6742 : 4 : CU_ASSERT(ctx.examine_config_count == 1);
6743 : 4 : CU_ASSERT(ctx.examine_disk_count == 1);
6744 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6745 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == NULL);
6746 : 4 : free_bdev(bdev);
6747 : :
6748 : : /* Exercise another path that is taken when examine_config() takes a v1 claim. */
6749 [ - + ]: 4 : memset(&ctx, 0, sizeof(ctx));
6750 : 4 : ctx.examine_config = examine_claim_v1;
6751 : 4 : ctx.examine_disk = examine_no_lock_held;
6752 : 4 : bdev = allocate_bdev_ctx("bdev0", &ctx);
6753 : 4 : CU_ASSERT(ctx.examine_config_count == 1);
6754 : 4 : CU_ASSERT(ctx.examine_disk_count == 1);
6755 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
6756 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if);
6757 : 4 : spdk_bdev_module_release_bdev(bdev);
6758 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6759 : 4 : CU_ASSERT(bdev->internal.claim.v1.module == NULL);
6760 : 4 : free_bdev(bdev);
6761 : :
6762 : : /* Exercise the final path that comes with v2 claims. */
6763 [ - + ]: 4 : memset(&v2_ctx, 0, sizeof(v2_ctx));
6764 : 4 : v2_ctx.examine_ctx.examine_config = examine_claim_v2;
6765 : 4 : v2_ctx.examine_ctx.examine_disk = examine_no_lock_held;
6766 : 4 : v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
6767 : 4 : bdev = allocate_bdev_ctx("bdev0", &v2_ctx);
6768 : 4 : CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1);
6769 : 4 : CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1);
6770 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6771 : 4 : spdk_bdev_close(v2_ctx.desc);
6772 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6773 : 4 : free_bdev(bdev);
6774 : 4 : }
6775 : :
6776 : : #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \
6777 : : do { \
6778 : : uint32_t len = 0; \
6779 : : struct spdk_bdev_module_claim *claim; \
6780 : : TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \
6781 : : len++; \
6782 : : } \
6783 : : CU_ASSERT(len == expect); \
6784 : : } while (0)
6785 : :
6786 : : static void
6787 : 4 : claim_v2_rwo(void)
6788 : : {
6789 : : struct spdk_bdev *bdev;
6790 : 3 : struct spdk_bdev_desc *desc;
6791 : 3 : struct spdk_bdev_desc *desc2;
6792 : 3 : struct spdk_bdev_claim_opts opts;
6793 : : int rc;
6794 : :
6795 : 4 : bdev = allocate_bdev("bdev0");
6796 : :
6797 : : /* Claim without options */
6798 : 4 : desc = NULL;
6799 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6800 : 4 : CU_ASSERT(rc == 0);
6801 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6802 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6803 : : &bdev_ut_if);
6804 : 4 : CU_ASSERT(rc == 0);
6805 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
6806 : 4 : CU_ASSERT(desc->claim != NULL);
6807 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6808 : 4 : CU_ASSERT(strcmp(desc->claim->name, "") == 0);
6809 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6810 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6811 : :
6812 : : /* Release the claim by closing the descriptor */
6813 : 4 : spdk_bdev_close(desc);
6814 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6815 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
6816 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6817 : :
6818 : : /* Claim with options */
6819 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6820 : 4 : snprintf(opts.name, sizeof(opts.name), "%s", "claim with options");
6821 : 4 : desc = NULL;
6822 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
6823 : 4 : CU_ASSERT(rc == 0);
6824 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6825 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts,
6826 : : &bdev_ut_if);
6827 : 4 : CU_ASSERT(rc == 0);
6828 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
6829 : 4 : CU_ASSERT(desc->claim != NULL);
6830 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6831 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6832 : 4 : memset(&opts, 0, sizeof(opts));
6833 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6834 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6835 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6836 : :
6837 : : /* The claim blocks new writers. */
6838 : 4 : desc2 = NULL;
6839 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
6840 : 4 : CU_ASSERT(rc == -EPERM);
6841 : 4 : CU_ASSERT(desc2 == NULL);
6842 : :
6843 : : /* New readers are allowed */
6844 : 4 : desc2 = NULL;
6845 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2);
6846 : 4 : CU_ASSERT(rc == 0);
6847 : 4 : CU_ASSERT(desc2 != NULL);
6848 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6849 : :
6850 : : /* No new v2 RWO claims are allowed */
6851 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6852 : : &bdev_ut_if);
6853 : 4 : CU_ASSERT(rc == -EPERM);
6854 : :
6855 : : /* No new v2 ROM claims are allowed */
6856 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6857 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
6858 : : &bdev_ut_if);
6859 : 4 : CU_ASSERT(rc == -EPERM);
6860 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6861 : :
6862 : : /* No new v2 RWM claims are allowed */
6863 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6864 : 4 : opts.shared_claim_key = (uint64_t)&opts;
6865 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
6866 : : &bdev_ut_if);
6867 : 4 : CU_ASSERT(rc == -EPERM);
6868 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6869 : :
6870 : : /* No new v1 claims are allowed */
6871 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
6872 : 4 : CU_ASSERT(rc == -EPERM);
6873 : :
6874 : : /* None of the above changed the existing claim */
6875 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6876 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6877 : :
6878 : : /* Closing the first descriptor now allows a new claim and it is promoted to rw. */
6879 : 4 : spdk_bdev_close(desc);
6880 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6881 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6882 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6883 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6884 : : &bdev_ut_if);
6885 : 4 : CU_ASSERT(rc == 0);
6886 : 4 : CU_ASSERT(desc2->claim != NULL);
6887 [ - + ]: 4 : CU_ASSERT(desc2->write);
6888 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
6889 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim);
6890 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6891 : 4 : spdk_bdev_close(desc2);
6892 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6893 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6894 : :
6895 : : /* Cannot claim with a key */
6896 : 4 : desc = NULL;
6897 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6898 : 4 : CU_ASSERT(rc == 0);
6899 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6900 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6901 : 4 : opts.shared_claim_key = (uint64_t)&opts;
6902 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts,
6903 : : &bdev_ut_if);
6904 : 4 : CU_ASSERT(rc == -EINVAL);
6905 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6906 [ + + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6907 : 4 : spdk_bdev_close(desc);
6908 : :
6909 : : /* Clean up */
6910 : 4 : free_bdev(bdev);
6911 : 4 : }
6912 : :
6913 : : static void
6914 : 4 : claim_v2_rom(void)
6915 : : {
6916 : : struct spdk_bdev *bdev;
6917 : 3 : struct spdk_bdev_desc *desc;
6918 : 3 : struct spdk_bdev_desc *desc2;
6919 : 3 : struct spdk_bdev_claim_opts opts;
6920 : : int rc;
6921 : :
6922 : 4 : bdev = allocate_bdev("bdev0");
6923 : :
6924 : : /* Claim without options */
6925 : 4 : desc = NULL;
6926 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6927 : 4 : CU_ASSERT(rc == 0);
6928 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6929 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
6930 : : &bdev_ut_if);
6931 : 4 : CU_ASSERT(rc == 0);
6932 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6933 : 4 : CU_ASSERT(desc->claim != NULL);
6934 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6935 : 4 : CU_ASSERT(strcmp(desc->claim->name, "") == 0);
6936 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6937 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6938 : :
6939 : : /* Release the claim by closing the descriptor */
6940 : 4 : spdk_bdev_close(desc);
6941 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6942 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
6943 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
6944 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
6945 : :
6946 : : /* Claim with options */
6947 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6948 : 4 : snprintf(opts.name, sizeof(opts.name), "%s", "claim with options");
6949 : 4 : desc = NULL;
6950 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
6951 : 4 : CU_ASSERT(rc == 0);
6952 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
6953 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts,
6954 : : &bdev_ut_if);
6955 : 4 : CU_ASSERT(rc == 0);
6956 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
6957 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc->claim != NULL);
6958 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
6959 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6960 : 4 : memset(&opts, 0, sizeof(opts));
6961 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
6962 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6963 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6964 : :
6965 : : /* The claim blocks new writers. */
6966 : 4 : desc2 = NULL;
6967 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
6968 : 4 : CU_ASSERT(rc == -EPERM);
6969 : 4 : CU_ASSERT(desc2 == NULL);
6970 : :
6971 : : /* New readers are allowed */
6972 : 4 : desc2 = NULL;
6973 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2);
6974 : 4 : CU_ASSERT(rc == 0);
6975 : 4 : CU_ASSERT(desc2 != NULL);
6976 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6977 : :
6978 : : /* No new v2 RWO claims are allowed */
6979 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
6980 : : &bdev_ut_if);
6981 : 4 : CU_ASSERT(rc == -EPERM);
6982 : :
6983 : : /* No new v2 RWM claims are allowed */
6984 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
6985 : 4 : opts.shared_claim_key = (uint64_t)&opts;
6986 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
6987 : : &bdev_ut_if);
6988 : 4 : CU_ASSERT(rc == -EPERM);
6989 [ - + ]: 4 : CU_ASSERT(!desc2->write);
6990 : :
6991 : : /* No new v1 claims are allowed */
6992 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
6993 : 4 : CU_ASSERT(rc == -EPERM);
6994 : :
6995 : : /* None of the above messed up the existing claim */
6996 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
6997 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
6998 : :
6999 : : /* New v2 ROM claims are allowed and the descriptor stays read-only. */
7000 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7001 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
7002 : : &bdev_ut_if);
7003 : 4 : CU_ASSERT(rc == 0);
7004 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7005 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
7006 : 4 : CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim);
7007 [ + + ]: 12 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 2);
7008 : :
7009 : : /* Claim remains when closing the first descriptor */
7010 : 4 : spdk_bdev_close(desc);
7011 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE);
7012 : 4 : CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs));
7013 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim);
7014 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7015 : :
7016 : : /* Claim removed when closing the other descriptor */
7017 : 4 : spdk_bdev_close(desc2);
7018 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7019 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
7020 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
7021 : :
7022 : : /* Cannot claim with a key */
7023 : 4 : desc = NULL;
7024 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
7025 : 4 : CU_ASSERT(rc == 0);
7026 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7027 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7028 : 4 : opts.shared_claim_key = (uint64_t)&opts;
7029 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts,
7030 : : &bdev_ut_if);
7031 : 4 : CU_ASSERT(rc == -EINVAL);
7032 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7033 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
7034 : 4 : spdk_bdev_close(desc);
7035 : :
7036 : : /* Cannot claim with a read-write descriptor */
7037 : 4 : desc = NULL;
7038 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
7039 : 4 : CU_ASSERT(rc == 0);
7040 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7041 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
7042 : : &bdev_ut_if);
7043 : 4 : CU_ASSERT(rc == -EINVAL);
7044 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7045 [ + + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
7046 : 4 : spdk_bdev_close(desc);
7047 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
7048 : :
7049 : : /* Clean up */
7050 : 4 : free_bdev(bdev);
7051 : 4 : }
7052 : :
7053 : : static void
7054 : 4 : claim_v2_rwm(void)
7055 : : {
7056 : : struct spdk_bdev *bdev;
7057 : 3 : struct spdk_bdev_desc *desc;
7058 : 3 : struct spdk_bdev_desc *desc2;
7059 : 3 : struct spdk_bdev_claim_opts opts;
7060 : 3 : char good_key, bad_key;
7061 : : int rc;
7062 : :
7063 : 4 : bdev = allocate_bdev("bdev0");
7064 : :
7065 : : /* Claim without options should fail */
7066 : 4 : desc = NULL;
7067 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
7068 : 4 : CU_ASSERT(rc == 0);
7069 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7070 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL,
7071 : : &bdev_ut_if);
7072 : 4 : CU_ASSERT(rc == -EINVAL);
7073 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7074 [ - + ]: 4 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 0);
7075 : 4 : CU_ASSERT(desc->claim == NULL);
7076 : :
7077 : : /* Claim with options */
7078 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7079 : 4 : snprintf(opts.name, sizeof(opts.name), "%s", "claim with options");
7080 : 4 : opts.shared_claim_key = (uint64_t)&good_key;
7081 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7082 : : &bdev_ut_if);
7083 : 4 : CU_ASSERT(rc == 0);
7084 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED);
7085 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc->claim != NULL);
7086 : 4 : CU_ASSERT(desc->claim->module == &bdev_ut_if);
7087 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
7088 : 4 : memset(&opts, 0, sizeof(opts));
7089 [ - + ]: 4 : CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0);
7090 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
7091 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7092 : :
7093 : : /* The claim blocks new writers. */
7094 : 4 : desc2 = NULL;
7095 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
7096 : 4 : CU_ASSERT(rc == -EPERM);
7097 : 4 : CU_ASSERT(desc2 == NULL);
7098 : :
7099 : : /* New readers are allowed */
7100 : 4 : desc2 = NULL;
7101 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2);
7102 : 4 : CU_ASSERT(rc == 0);
7103 : 4 : CU_ASSERT(desc2 != NULL);
7104 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7105 : :
7106 : : /* No new v2 RWO claims are allowed */
7107 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL,
7108 : : &bdev_ut_if);
7109 : 4 : CU_ASSERT(rc == -EPERM);
7110 : :
7111 : : /* No new v2 ROM claims are allowed and the descriptor stays read-only. */
7112 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7113 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL,
7114 : : &bdev_ut_if);
7115 : 4 : CU_ASSERT(rc == -EPERM);
7116 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7117 : :
7118 : : /* No new v1 claims are allowed */
7119 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7120 : 4 : CU_ASSERT(rc == -EPERM);
7121 : :
7122 : : /* No new v2 RWM claims are allowed if the key does not match */
7123 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7124 : 4 : opts.shared_claim_key = (uint64_t)&bad_key;
7125 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7126 : : &bdev_ut_if);
7127 : 4 : CU_ASSERT(rc == -EPERM);
7128 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7129 : :
7130 : : /* None of the above messed up the existing claim */
7131 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim);
7132 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7133 : :
7134 : : /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */
7135 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7136 : 4 : opts.shared_claim_key = (uint64_t)&good_key;
7137 [ - + ]: 4 : CU_ASSERT(!desc2->write);
7138 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7139 : : &bdev_ut_if);
7140 : 4 : CU_ASSERT(rc == 0);
7141 [ - + ]: 4 : CU_ASSERT(desc2->write);
7142 : 4 : CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim);
7143 [ + + ]: 12 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 2);
7144 : :
7145 : : /* Claim remains when closing the first descriptor */
7146 : 4 : spdk_bdev_close(desc);
7147 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED);
7148 : 4 : CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs));
7149 : 4 : CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim);
7150 [ + + ]: 8 : UT_ASSERT_CLAIM_V2_COUNT(bdev, 1);
7151 : :
7152 : : /* Claim removed when closing the other descriptor */
7153 : 4 : spdk_bdev_close(desc2);
7154 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7155 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
7156 : :
7157 : : /* Cannot claim without a key */
7158 : 4 : desc = NULL;
7159 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
7160 : 4 : CU_ASSERT(rc == 0);
7161 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7162 : 4 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7163 : 4 : rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts,
7164 : : &bdev_ut_if);
7165 : 4 : CU_ASSERT(rc == -EINVAL);
7166 : 4 : spdk_bdev_close(desc);
7167 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7168 : 4 : CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs));
7169 : :
7170 : : /* Clean up */
7171 : 4 : free_bdev(bdev);
7172 : 4 : }
7173 : :
7174 : : static void
7175 : 4 : claim_v2_existing_writer(void)
7176 : : {
7177 : : struct spdk_bdev *bdev;
7178 : 3 : struct spdk_bdev_desc *desc;
7179 : 3 : struct spdk_bdev_desc *desc2;
7180 : 3 : struct spdk_bdev_claim_opts opts;
7181 : : enum spdk_bdev_claim_type type;
7182 : 4 : enum spdk_bdev_claim_type types[] = {
7183 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE,
7184 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED,
7185 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE
7186 : : };
7187 : : size_t i;
7188 : : int rc;
7189 : :
7190 : 4 : bdev = allocate_bdev("bdev0");
7191 : :
7192 : 4 : desc = NULL;
7193 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc);
7194 : 4 : CU_ASSERT(rc == 0);
7195 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7196 : 4 : desc2 = NULL;
7197 : 4 : rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2);
7198 : 4 : CU_ASSERT(rc == 0);
7199 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(desc2 != NULL);
7200 : :
7201 [ + + ]: 16 : for (i = 0; i < SPDK_COUNTOF(types); i++) {
7202 : 12 : type = types[i];
7203 : 12 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7204 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) {
7205 : 4 : opts.shared_claim_key = (uint64_t)&opts;
7206 : 1 : }
7207 : 12 : rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if);
7208 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) {
7209 : 4 : CU_ASSERT(rc == -EINVAL);
7210 : 1 : } else {
7211 : 8 : CU_ASSERT(rc == -EPERM);
7212 : : }
7213 : 12 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7214 : 12 : rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if);
7215 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) {
7216 : 4 : CU_ASSERT(rc == -EINVAL);
7217 : 1 : } else {
7218 : 8 : CU_ASSERT(rc == -EPERM);
7219 : : }
7220 : 12 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE);
7221 : 3 : }
7222 : :
7223 : 4 : spdk_bdev_close(desc);
7224 : 4 : spdk_bdev_close(desc2);
7225 : :
7226 : : /* Clean up */
7227 : 4 : free_bdev(bdev);
7228 : 4 : }
7229 : :
7230 : : static void
7231 : 4 : claim_v2_existing_v1(void)
7232 : : {
7233 : : struct spdk_bdev *bdev;
7234 : 3 : struct spdk_bdev_desc *desc;
7235 : 3 : struct spdk_bdev_claim_opts opts;
7236 : : enum spdk_bdev_claim_type type;
7237 : 4 : enum spdk_bdev_claim_type types[] = {
7238 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE,
7239 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED,
7240 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE
7241 : : };
7242 : : size_t i;
7243 : : int rc;
7244 : :
7245 : 4 : bdev = allocate_bdev("bdev0");
7246 : :
7247 : 4 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7248 : 4 : CU_ASSERT(rc == 0);
7249 : 4 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
7250 : :
7251 : 4 : desc = NULL;
7252 : 4 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
7253 : 4 : CU_ASSERT(rc == 0);
7254 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7255 : :
7256 [ + + ]: 16 : for (i = 0; i < SPDK_COUNTOF(types); i++) {
7257 : 12 : type = types[i];
7258 : 12 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7259 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) {
7260 : 4 : opts.shared_claim_key = (uint64_t)&opts;
7261 : 1 : }
7262 : 12 : rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if);
7263 : 12 : CU_ASSERT(rc == -EPERM);
7264 : 12 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
7265 : 3 : }
7266 : :
7267 : 4 : spdk_bdev_module_release_bdev(bdev);
7268 : 4 : spdk_bdev_close(desc);
7269 : :
7270 : : /* Clean up */
7271 : 4 : free_bdev(bdev);
7272 : 4 : }
7273 : :
7274 : : static void
7275 : 4 : claim_v1_existing_v2(void)
7276 : : {
7277 : : struct spdk_bdev *bdev;
7278 : 3 : struct spdk_bdev_desc *desc;
7279 : 3 : struct spdk_bdev_claim_opts opts;
7280 : : enum spdk_bdev_claim_type type;
7281 : 4 : enum spdk_bdev_claim_type types[] = {
7282 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE,
7283 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED,
7284 : : SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE
7285 : : };
7286 : : size_t i;
7287 : : int rc;
7288 : :
7289 : 4 : bdev = allocate_bdev("bdev0");
7290 : :
7291 [ + + ]: 16 : for (i = 0; i < SPDK_COUNTOF(types); i++) {
7292 : 12 : type = types[i];
7293 : :
7294 : 12 : desc = NULL;
7295 : 12 : rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc);
7296 : 12 : CU_ASSERT(rc == 0);
7297 [ + + ]: 12 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7298 : :
7299 : : /* Get a v2 claim */
7300 : 12 : spdk_bdev_claim_opts_init(&opts, sizeof(opts));
7301 [ + + ]: 12 : if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) {
7302 : 4 : opts.shared_claim_key = (uint64_t)&opts;
7303 : 1 : }
7304 : 12 : rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if);
7305 : 12 : CU_ASSERT(rc == 0);
7306 : :
7307 : : /* Fail to get a v1 claim */
7308 : 12 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7309 : 12 : CU_ASSERT(rc == -EPERM);
7310 : :
7311 : 12 : spdk_bdev_close(desc);
7312 : :
7313 : : /* Now v1 succeeds */
7314 : 12 : rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if);
7315 : 12 : CU_ASSERT(rc == 0)
7316 : 12 : spdk_bdev_module_release_bdev(bdev);
7317 : 3 : }
7318 : :
7319 : : /* Clean up */
7320 : 4 : free_bdev(bdev);
7321 : 4 : }
7322 : :
7323 : : static int ut_examine_claimed_init0(void);
7324 : : static int ut_examine_claimed_init1(void);
7325 : : static void ut_examine_claimed_config0(struct spdk_bdev *bdev);
7326 : : static void ut_examine_claimed_disk0(struct spdk_bdev *bdev);
7327 : : static void ut_examine_claimed_config1(struct spdk_bdev *bdev);
7328 : : static void ut_examine_claimed_disk1(struct spdk_bdev *bdev);
7329 : :
7330 : : #define UT_MAX_EXAMINE_MODS 2
7331 : : struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = {
7332 : : {
7333 : : .name = "vbdev_ut_examine0",
7334 : : .module_init = ut_examine_claimed_init0,
7335 : : .module_fini = vbdev_ut_module_fini,
7336 : : .examine_config = ut_examine_claimed_config0,
7337 : : .examine_disk = ut_examine_claimed_disk0,
7338 : : },
7339 : : {
7340 : : .name = "vbdev_ut_examine1",
7341 : : .module_init = ut_examine_claimed_init1,
7342 : : .module_fini = vbdev_ut_module_fini,
7343 : : .examine_config = ut_examine_claimed_config1,
7344 : : .examine_disk = ut_examine_claimed_disk1,
7345 : : }
7346 : : };
7347 : :
7348 : 4 : SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0])
7349 : 4 : SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1])
7350 : :
7351 : : struct ut_examine_claimed_ctx {
7352 : : uint32_t examine_config_count;
7353 : : uint32_t examine_disk_count;
7354 : :
7355 : : /* Claim type to take, with these options */
7356 : : enum spdk_bdev_claim_type claim_type;
7357 : : struct spdk_bdev_claim_opts claim_opts;
7358 : :
7359 : : /* Expected return value from spdk_bdev_module_claim_bdev_desc() */
7360 : : int expect_claim_err;
7361 : :
7362 : : /* Descriptor used for a claim */
7363 : : struct spdk_bdev_desc *desc;
7364 : : } examine_claimed_ctx[UT_MAX_EXAMINE_MODS];
7365 : :
7366 : : bool ut_testing_examine_claimed;
7367 : :
7368 : : /*
7369 : : * Store the order in which the modules were initialized,
7370 : : * since we have no guarantee on the order of execution of the constructors.
7371 : : * Modules are examined in reverse order of their initialization.
7372 : : */
7373 : : static int g_ut_examine_claimed_order[UT_MAX_EXAMINE_MODS];
7374 : : static int
7375 : 344 : ut_examine_claimed_init(uint32_t modnum)
7376 : : {
7377 : : static int current = UT_MAX_EXAMINE_MODS;
7378 : :
7379 : : /* Only do this for the first initialization of the bdev framework */
7380 [ + + ]: 344 : if (current == 0) {
7381 : 336 : return 0;
7382 : : }
7383 : 8 : g_ut_examine_claimed_order[modnum] = --current;
7384 : :
7385 : 8 : return 0;
7386 : 86 : }
7387 : :
7388 : : static int
7389 : 172 : ut_examine_claimed_init0(void)
7390 : : {
7391 : 172 : return ut_examine_claimed_init(0);
7392 : : }
7393 : :
7394 : : static int
7395 : 172 : ut_examine_claimed_init1(void)
7396 : : {
7397 : 172 : return ut_examine_claimed_init(1);
7398 : : }
7399 : :
7400 : : static void
7401 : 32 : reset_examine_claimed_ctx(void)
7402 : : {
7403 : : struct ut_examine_claimed_ctx *ctx;
7404 : : uint32_t i;
7405 : :
7406 [ + + ]: 96 : for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) {
7407 : 64 : ctx = &examine_claimed_ctx[i];
7408 [ + + ]: 64 : if (ctx->desc != NULL) {
7409 : 40 : spdk_bdev_close(ctx->desc);
7410 : 10 : }
7411 [ - + ]: 64 : memset(ctx, 0, sizeof(*ctx));
7412 : 64 : spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts));
7413 : 16 : }
7414 : 32 : }
7415 : :
7416 : : static void
7417 : 664 : examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum)
7418 : : {
7419 [ + + ]: 664 : SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS);
7420 : 664 : struct spdk_bdev_module *module = &examine_claimed_mods[modnum];
7421 : 664 : struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum];
7422 : : int rc;
7423 : :
7424 [ + + + + ]: 664 : if (!ut_testing_examine_claimed) {
7425 : 616 : spdk_bdev_module_examine_done(module);
7426 : 616 : return;
7427 : : }
7428 : :
7429 : 48 : ctx->examine_config_count++;
7430 : :
7431 [ + + ]: 48 : if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) {
7432 : 50 : rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts,
7433 : 10 : &ctx->desc);
7434 : 40 : CU_ASSERT(rc == 0);
7435 : :
7436 : 40 : rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module);
7437 : 40 : CU_ASSERT(rc == ctx->expect_claim_err);
7438 : 10 : }
7439 : 48 : spdk_bdev_module_examine_done(module);
7440 : 166 : }
7441 : :
7442 : : static void
7443 : 332 : ut_examine_claimed_config0(struct spdk_bdev *bdev)
7444 : : {
7445 : 332 : examine_claimed_config(bdev, g_ut_examine_claimed_order[0]);
7446 : 332 : }
7447 : :
7448 : : static void
7449 : 332 : ut_examine_claimed_config1(struct spdk_bdev *bdev)
7450 : : {
7451 : 332 : examine_claimed_config(bdev, g_ut_examine_claimed_order[1]);
7452 : 332 : }
7453 : :
7454 : : static void
7455 : 632 : examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum)
7456 : : {
7457 [ + + ]: 632 : SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS);
7458 : 632 : struct spdk_bdev_module *module = &examine_claimed_mods[modnum];
7459 : 632 : struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum];
7460 : :
7461 [ + + + + ]: 632 : if (!ut_testing_examine_claimed) {
7462 : 600 : spdk_bdev_module_examine_done(module);
7463 : 600 : return;
7464 : : }
7465 : :
7466 : 32 : ctx->examine_disk_count++;
7467 : :
7468 : 32 : spdk_bdev_module_examine_done(module);
7469 : 158 : }
7470 : :
7471 : : static void
7472 : 316 : ut_examine_claimed_disk0(struct spdk_bdev *bdev)
7473 : : {
7474 : 316 : examine_claimed_disk(bdev, 0);
7475 : 316 : }
7476 : :
7477 : : static void
7478 : 316 : ut_examine_claimed_disk1(struct spdk_bdev *bdev)
7479 : : {
7480 : 316 : examine_claimed_disk(bdev, 1);
7481 : 316 : }
7482 : :
7483 : : static bool g_examine_done = false;
7484 : :
7485 : : static void
7486 : 12 : ut_examine_done_cb(void *ctx)
7487 : : {
7488 : 12 : g_examine_done = true;
7489 : 12 : }
7490 : :
7491 : : static void
7492 : 8 : examine_claimed_common(bool autoexamine)
7493 : : {
7494 : : struct spdk_bdev *bdev;
7495 : 8 : struct spdk_bdev_module *mod = examine_claimed_mods;
7496 : 8 : struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx;
7497 : 8 : struct spdk_bdev_opts bdev_opts = {};
7498 : : int rc;
7499 : :
7500 : 8 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
7501 : 8 : bdev_opts.bdev_auto_examine = autoexamine;
7502 : 8 : ut_init_bdev(&bdev_opts);
7503 : :
7504 : 8 : ut_testing_examine_claimed = true;
7505 : 8 : reset_examine_claimed_ctx();
7506 : :
7507 : : /*
7508 : : * With one module claiming, both modules' examine_config should be called, but only the
7509 : : * claiming module's examine_disk should be called.
7510 : : */
7511 : 8 : ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7512 : 8 : g_examine_done = false;
7513 : 8 : bdev = allocate_bdev("bdev0");
7514 : :
7515 [ + + ]: 8 : if (!autoexamine) {
7516 : 4 : rc = spdk_bdev_examine("bdev0");
7517 : 4 : CU_ASSERT(rc == 0);
7518 : 4 : rc = spdk_bdev_wait_for_examine(ut_examine_done_cb, NULL);
7519 : 4 : CU_ASSERT(rc == 0);
7520 [ - + ]: 4 : CU_ASSERT(!g_examine_done);
7521 : 4 : poll_threads();
7522 [ - + ]: 4 : CU_ASSERT(g_examine_done);
7523 : 1 : }
7524 : :
7525 : 8 : CU_ASSERT(ctx[0].examine_config_count == 1);
7526 : 8 : CU_ASSERT(ctx[0].examine_disk_count == 1);
7527 [ + + ]: 8 : SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL);
7528 : 8 : CU_ASSERT(ctx[0].desc->claim->module == &mod[0]);
7529 : 8 : CU_ASSERT(ctx[1].examine_config_count == 1);
7530 : 8 : CU_ASSERT(ctx[1].examine_disk_count == 0);
7531 : 8 : CU_ASSERT(ctx[1].desc == NULL);
7532 : 8 : reset_examine_claimed_ctx();
7533 : 8 : free_bdev(bdev);
7534 : :
7535 : : /*
7536 : : * With two modules claiming, both modules' examine_config and examine_disk should be
7537 : : * called.
7538 : : */
7539 : 8 : ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7540 : 8 : ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7541 : 8 : g_examine_done = false;
7542 : 8 : bdev = allocate_bdev("bdev0");
7543 : :
7544 [ + + ]: 8 : if (!autoexamine) {
7545 : 4 : rc = spdk_bdev_examine("bdev0");
7546 : 4 : CU_ASSERT(rc == 0);
7547 : 4 : rc = spdk_bdev_wait_for_examine(ut_examine_done_cb, NULL);
7548 : 4 : CU_ASSERT(rc == 0);
7549 [ - + ]: 4 : CU_ASSERT(!g_examine_done);
7550 : 4 : poll_threads();
7551 [ - + ]: 4 : CU_ASSERT(g_examine_done);
7552 : 1 : }
7553 : :
7554 : 8 : CU_ASSERT(ctx[0].examine_config_count == 1);
7555 : 8 : CU_ASSERT(ctx[0].examine_disk_count == 1);
7556 [ + + ]: 8 : SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL);
7557 : 8 : CU_ASSERT(ctx[0].desc->claim->module == &mod[0]);
7558 : 8 : CU_ASSERT(ctx[1].examine_config_count == 1);
7559 : 8 : CU_ASSERT(ctx[1].examine_disk_count == 1);
7560 [ + + ]: 8 : SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL);
7561 : 8 : CU_ASSERT(ctx[1].desc->claim->module == &mod[1]);
7562 : 8 : reset_examine_claimed_ctx();
7563 : 8 : free_bdev(bdev);
7564 : :
7565 : : /*
7566 : : * If two vbdev modules try to claim with conflicting claim types, the module that was added
7567 : : * last wins. The winner gets the claim and is the only one that has its examine_disk
7568 : : * callback invoked.
7569 : : */
7570 : 8 : ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE;
7571 : 8 : ctx[0].expect_claim_err = -EPERM;
7572 : 8 : ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE;
7573 : 8 : g_examine_done = false;
7574 : 8 : bdev = allocate_bdev("bdev0");
7575 : :
7576 [ + + ]: 8 : if (!autoexamine) {
7577 : 4 : rc = spdk_bdev_examine("bdev0");
7578 : 4 : CU_ASSERT(rc == 0);
7579 : 4 : rc = spdk_bdev_wait_for_examine(ut_examine_done_cb, NULL);
7580 : 4 : CU_ASSERT(rc == 0);
7581 [ - + ]: 4 : CU_ASSERT(!g_examine_done);
7582 : 4 : poll_threads();
7583 [ - + ]: 4 : CU_ASSERT(g_examine_done);
7584 : 1 : }
7585 : :
7586 : 8 : CU_ASSERT(ctx[0].examine_config_count == 1);
7587 : 8 : CU_ASSERT(ctx[0].examine_disk_count == 0);
7588 : 8 : CU_ASSERT(ctx[1].examine_config_count == 1);
7589 : 8 : CU_ASSERT(ctx[1].examine_disk_count == 1);
7590 [ - + ]: 8 : SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL);
7591 : 8 : CU_ASSERT(ctx[1].desc->claim->module == &mod[1]);
7592 : 8 : CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE);
7593 : 8 : reset_examine_claimed_ctx();
7594 : 8 : free_bdev(bdev);
7595 : :
7596 : 8 : ut_testing_examine_claimed = false;
7597 : :
7598 : 8 : ut_fini_bdev();
7599 : 8 : }
7600 : :
7601 : : static void
7602 : 4 : examine_claimed(void)
7603 : : {
7604 : 4 : examine_claimed_common(true);
7605 : 4 : }
7606 : :
7607 : : static void
7608 : 4 : examine_claimed_manual(void)
7609 : : {
7610 : 4 : examine_claimed_common(false);
7611 : 4 : }
7612 : :
7613 : : static void
7614 : 4 : get_numa_id(void)
7615 : : {
7616 : 4 : struct spdk_bdev bdev = {};
7617 : :
7618 : 4 : bdev.numa.id = 0;
7619 : 4 : bdev.numa.id_valid = 0;
7620 : 4 : CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == SPDK_ENV_NUMA_ID_ANY);
7621 : :
7622 : 4 : bdev.numa.id_valid = 1;
7623 : 4 : CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == 0);
7624 : :
7625 : 4 : bdev.numa.id = SPDK_ENV_NUMA_ID_ANY;
7626 : 4 : CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == SPDK_ENV_NUMA_ID_ANY);
7627 : 4 : }
7628 : :
7629 : : static void
7630 : 16 : get_device_stat_with_reset_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg,
7631 : : int rc)
7632 : : {
7633 : 16 : *(bool *)cb_arg = true;
7634 : 16 : }
7635 : :
7636 : : static void
7637 : 16 : get_device_stat_with_given_reset(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat,
7638 : : enum spdk_bdev_reset_stat_mode mode)
7639 : : {
7640 : 16 : bool done = false;
7641 : :
7642 : 16 : spdk_bdev_get_device_stat(bdev, stat, mode, get_device_stat_with_reset_cb, &done);
7643 [ + + + + ]: 32 : while (!done) { poll_threads(); }
7644 : 16 : }
7645 : :
7646 : : static void
7647 : 4 : get_device_stat_with_reset(void)
7648 : : {
7649 : : struct spdk_bdev *bdev;
7650 : 4 : struct spdk_bdev_desc *desc = NULL;
7651 : : struct spdk_io_channel *io_ch;
7652 : 4 : struct spdk_bdev_opts bdev_opts = {};
7653 : : struct spdk_bdev_io_stat *stat;
7654 : :
7655 : 4 : spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts));
7656 : 4 : bdev_opts.bdev_io_pool_size = 2;
7657 : 4 : bdev_opts.bdev_io_cache_size = 1;
7658 : 4 : ut_init_bdev(&bdev_opts);
7659 : 4 : bdev = allocate_bdev("bdev0");
7660 : :
7661 : 4 : CU_ASSERT(spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc) == 0);
7662 [ + + ]: 4 : SPDK_CU_ASSERT_FATAL(desc != NULL);
7663 : 4 : CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc));
7664 : 4 : io_ch = spdk_bdev_get_io_channel(desc);
7665 : 4 : CU_ASSERT(io_ch != NULL);
7666 : :
7667 : 4 : g_io_done = false;
7668 : 4 : CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
7669 : 4 : spdk_delay_us(10);
7670 : 4 : stub_complete_io(1);
7671 [ - + ]: 4 : CU_ASSERT(g_io_done == true);
7672 : :
7673 : 4 : stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
7674 [ - + ]: 4 : SPDK_CU_ASSERT_FATAL(stat != NULL);
7675 : :
7676 : : /* Get stat without resetting and check that it is correct */
7677 : 4 : get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_NONE);
7678 : 4 : CU_ASSERT(stat->bytes_read == 4096);
7679 : 4 : CU_ASSERT(stat->max_read_latency_ticks == 10);
7680 : :
7681 : : /**
7682 : : * Check that stat was not reseted after previous step,
7683 : : * send get request with resetting maxmin stats
7684 : : */
7685 : 4 : get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_MAXMIN);
7686 : 4 : CU_ASSERT(stat->bytes_read == 4096);
7687 : 4 : CU_ASSERT(stat->max_read_latency_ticks == 10);
7688 : :
7689 : : /**
7690 : : * Check that maxmins stats are reseted after previous step,
7691 : : * send get request with resetting all stats
7692 : : */
7693 : 4 : get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_ALL);
7694 : 4 : CU_ASSERT(stat->bytes_read == 4096);
7695 : 4 : CU_ASSERT(stat->max_read_latency_ticks == 0);
7696 : :
7697 : : /* Check that all stats are reseted after previous step */
7698 : 4 : get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_NONE);
7699 : 4 : CU_ASSERT(stat->bytes_read == 0);
7700 : 4 : CU_ASSERT(stat->max_read_latency_ticks == 0);
7701 : :
7702 : 4 : free(stat);
7703 : 4 : spdk_put_io_channel(io_ch);
7704 : 4 : spdk_bdev_close(desc);
7705 : 4 : free_bdev(bdev);
7706 : 4 : ut_fini_bdev();
7707 : 4 : }
7708 : :
7709 : : int
7710 : 4 : main(int argc, char **argv)
7711 : : {
7712 : 4 : CU_pSuite suite = NULL;
7713 : : unsigned int num_failures;
7714 : :
7715 : 4 : CU_initialize_registry();
7716 : :
7717 : 4 : suite = CU_add_suite("bdev", ut_bdev_setup, ut_bdev_teardown);
7718 : :
7719 : 4 : CU_ADD_TEST(suite, bytes_to_blocks_test);
7720 : 4 : CU_ADD_TEST(suite, num_blocks_test);
7721 : 4 : CU_ADD_TEST(suite, io_valid_test);
7722 : 4 : CU_ADD_TEST(suite, open_write_test);
7723 : 4 : CU_ADD_TEST(suite, claim_test);
7724 : 4 : CU_ADD_TEST(suite, alias_add_del_test);
7725 : 4 : CU_ADD_TEST(suite, get_device_stat_test);
7726 : 4 : CU_ADD_TEST(suite, bdev_io_types_test);
7727 : 4 : CU_ADD_TEST(suite, bdev_io_wait_test);
7728 : 4 : CU_ADD_TEST(suite, bdev_io_spans_split_test);
7729 : 4 : CU_ADD_TEST(suite, bdev_io_boundary_split_test);
7730 : 4 : CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test);
7731 : 4 : CU_ADD_TEST(suite, bdev_io_mix_split_test);
7732 : 4 : CU_ADD_TEST(suite, bdev_io_split_with_io_wait);
7733 : 4 : CU_ADD_TEST(suite, bdev_io_write_unit_split_test);
7734 : 4 : CU_ADD_TEST(suite, bdev_io_alignment_with_boundary);
7735 : 4 : CU_ADD_TEST(suite, bdev_io_alignment);
7736 : 4 : CU_ADD_TEST(suite, bdev_histograms);
7737 : 4 : CU_ADD_TEST(suite, bdev_write_zeroes);
7738 : 4 : CU_ADD_TEST(suite, bdev_compare_and_write);
7739 : 4 : CU_ADD_TEST(suite, bdev_compare);
7740 : 4 : CU_ADD_TEST(suite, bdev_compare_emulated);
7741 : 4 : CU_ADD_TEST(suite, bdev_zcopy_write);
7742 : 4 : CU_ADD_TEST(suite, bdev_zcopy_read);
7743 : 4 : CU_ADD_TEST(suite, bdev_open_while_hotremove);
7744 : 4 : CU_ADD_TEST(suite, bdev_close_while_hotremove);
7745 : 4 : CU_ADD_TEST(suite, bdev_open_ext_test);
7746 : 4 : CU_ADD_TEST(suite, bdev_open_ext_unregister);
7747 : 4 : CU_ADD_TEST(suite, bdev_set_io_timeout);
7748 : 4 : CU_ADD_TEST(suite, bdev_set_qd_sampling);
7749 : 4 : CU_ADD_TEST(suite, lba_range_overlap);
7750 : 4 : CU_ADD_TEST(suite, lock_lba_range_check_ranges);
7751 : 4 : CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding);
7752 : 4 : CU_ADD_TEST(suite, lock_lba_range_overlapped);
7753 : 4 : CU_ADD_TEST(suite, bdev_quiesce);
7754 : 4 : CU_ADD_TEST(suite, bdev_io_abort);
7755 : 4 : CU_ADD_TEST(suite, bdev_unmap);
7756 : 4 : CU_ADD_TEST(suite, bdev_write_zeroes_split_test);
7757 : 4 : CU_ADD_TEST(suite, bdev_set_options_test);
7758 : 4 : CU_ADD_TEST(suite, bdev_get_memory_domains);
7759 : 4 : CU_ADD_TEST(suite, bdev_io_ext);
7760 : 4 : CU_ADD_TEST(suite, bdev_io_ext_no_opts);
7761 : 4 : CU_ADD_TEST(suite, bdev_io_ext_invalid_opts);
7762 : 4 : CU_ADD_TEST(suite, bdev_io_ext_split);
7763 : 4 : CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer);
7764 : 4 : CU_ADD_TEST(suite, bdev_register_uuid_alias);
7765 : 4 : CU_ADD_TEST(suite, bdev_unregister_by_name);
7766 : 4 : CU_ADD_TEST(suite, for_each_bdev_test);
7767 : 4 : CU_ADD_TEST(suite, bdev_seek_test);
7768 : 4 : CU_ADD_TEST(suite, bdev_copy);
7769 : 4 : CU_ADD_TEST(suite, bdev_copy_split_test);
7770 : 4 : CU_ADD_TEST(suite, examine_locks);
7771 : 4 : CU_ADD_TEST(suite, claim_v2_rwo);
7772 : 4 : CU_ADD_TEST(suite, claim_v2_rom);
7773 : 4 : CU_ADD_TEST(suite, claim_v2_rwm);
7774 : 4 : CU_ADD_TEST(suite, claim_v2_existing_writer);
7775 : 4 : CU_ADD_TEST(suite, claim_v2_existing_v1);
7776 : 4 : CU_ADD_TEST(suite, claim_v1_existing_v2);
7777 : 4 : CU_ADD_TEST(suite, examine_claimed);
7778 : 4 : CU_ADD_TEST(suite, examine_claimed_manual);
7779 : 4 : CU_ADD_TEST(suite, get_numa_id);
7780 : 4 : CU_ADD_TEST(suite, get_device_stat_with_reset);
7781 : :
7782 : 4 : allocate_cores(1);
7783 : 4 : allocate_threads(1);
7784 : 4 : set_thread(0);
7785 : :
7786 : 4 : num_failures = spdk_ut_run_tests(argc, argv, NULL);
7787 : 4 : CU_cleanup_registry();
7788 : :
7789 : 4 : free_threads();
7790 : 4 : free_cores();
7791 : :
7792 : 4 : return num_failures;
7793 : : }
|